1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2016 Microsemi Corporation 4 * Copyright 2014-2015 PMC-Sierra, Inc. 5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; version 2 of the License. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 14 * NON INFRINGEMENT. See the GNU General Public License for more details. 15 * 16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com 17 * 18 */ 19 20 #include <linux/module.h> 21 #include <linux/interrupt.h> 22 #include <linux/types.h> 23 #include <linux/pci.h> 24 #include <linux/pci-aspm.h> 25 #include <linux/kernel.h> 26 #include <linux/slab.h> 27 #include <linux/delay.h> 28 #include <linux/fs.h> 29 #include <linux/timer.h> 30 #include <linux/init.h> 31 #include <linux/spinlock.h> 32 #include <linux/compat.h> 33 #include <linux/blktrace_api.h> 34 #include <linux/uaccess.h> 35 #include <linux/io.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/completion.h> 38 #include <linux/moduleparam.h> 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_cmnd.h> 41 #include <scsi/scsi_device.h> 42 #include <scsi/scsi_host.h> 43 #include <scsi/scsi_tcq.h> 44 #include <scsi/scsi_eh.h> 45 #include <scsi/scsi_transport_sas.h> 46 #include <scsi/scsi_dbg.h> 47 #include <linux/cciss_ioctl.h> 48 #include <linux/string.h> 49 #include <linux/bitmap.h> 50 #include <linux/atomic.h> 51 #include <linux/jiffies.h> 52 #include <linux/percpu-defs.h> 53 #include <linux/percpu.h> 54 #include <asm/unaligned.h> 55 #include <asm/div64.h> 56 #include "hpsa_cmd.h" 57 #include "hpsa.h" 58 59 /* 60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' 61 * with an optional trailing '-' followed by a byte value (0-255). 62 */ 63 #define HPSA_DRIVER_VERSION "3.4.14-0" 64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 65 #define HPSA "hpsa" 66 67 /* How long to wait for CISS doorbell communication */ 68 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ 69 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ 70 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ 71 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ 72 #define MAX_IOCTL_CONFIG_WAIT 1000 73 74 /*define how many times we will try a command because of bus resets */ 75 #define MAX_CMD_RETRIES 3 76 77 /* Embedded module documentation macros - see modules.h */ 78 MODULE_AUTHOR("Hewlett-Packard Company"); 79 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 80 HPSA_DRIVER_VERSION); 81 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 82 MODULE_VERSION(HPSA_DRIVER_VERSION); 83 MODULE_LICENSE("GPL"); 84 85 static int hpsa_allow_any; 86 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 87 MODULE_PARM_DESC(hpsa_allow_any, 88 "Allow hpsa driver to access unknown HP Smart Array hardware"); 89 static int hpsa_simple_mode; 90 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 91 MODULE_PARM_DESC(hpsa_simple_mode, 92 "Use 'simple mode' rather than 'performant mode'"); 93 94 /* define the PCI info for the cards we can control */ 95 static const struct pci_device_id hpsa_pci_device_id[] = { 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, 112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, 114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, 115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, 119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, 120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, 121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, 122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, 123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, 124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, 125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, 126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, 127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, 128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, 132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, 133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, 134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, 135 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, 136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, 137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581}, 138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582}, 139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583}, 140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584}, 141 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585}, 142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, 143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, 144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, 145 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, 146 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, 147 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 148 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 149 {0,} 150 }; 151 152 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 153 154 /* board_id = Subsystem Device ID & Vendor ID 155 * product = Marketing Name for the board 156 * access = Address of the struct of function pointers 157 */ 158 static struct board_type products[] = { 159 {0x3241103C, "Smart Array P212", &SA5_access}, 160 {0x3243103C, "Smart Array P410", &SA5_access}, 161 {0x3245103C, "Smart Array P410i", &SA5_access}, 162 {0x3247103C, "Smart Array P411", &SA5_access}, 163 {0x3249103C, "Smart Array P812", &SA5_access}, 164 {0x324A103C, "Smart Array P712m", &SA5_access}, 165 {0x324B103C, "Smart Array P711m", &SA5_access}, 166 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ 167 {0x3350103C, "Smart Array P222", &SA5_access}, 168 {0x3351103C, "Smart Array P420", &SA5_access}, 169 {0x3352103C, "Smart Array P421", &SA5_access}, 170 {0x3353103C, "Smart Array P822", &SA5_access}, 171 {0x3354103C, "Smart Array P420i", &SA5_access}, 172 {0x3355103C, "Smart Array P220i", &SA5_access}, 173 {0x3356103C, "Smart Array P721m", &SA5_access}, 174 {0x1921103C, "Smart Array P830i", &SA5_access}, 175 {0x1922103C, "Smart Array P430", &SA5_access}, 176 {0x1923103C, "Smart Array P431", &SA5_access}, 177 {0x1924103C, "Smart Array P830", &SA5_access}, 178 {0x1926103C, "Smart Array P731m", &SA5_access}, 179 {0x1928103C, "Smart Array P230i", &SA5_access}, 180 {0x1929103C, "Smart Array P530", &SA5_access}, 181 {0x21BD103C, "Smart Array P244br", &SA5_access}, 182 {0x21BE103C, "Smart Array P741m", &SA5_access}, 183 {0x21BF103C, "Smart HBA H240ar", &SA5_access}, 184 {0x21C0103C, "Smart Array P440ar", &SA5_access}, 185 {0x21C1103C, "Smart Array P840ar", &SA5_access}, 186 {0x21C2103C, "Smart Array P440", &SA5_access}, 187 {0x21C3103C, "Smart Array P441", &SA5_access}, 188 {0x21C4103C, "Smart Array", &SA5_access}, 189 {0x21C5103C, "Smart Array P841", &SA5_access}, 190 {0x21C6103C, "Smart HBA H244br", &SA5_access}, 191 {0x21C7103C, "Smart HBA H240", &SA5_access}, 192 {0x21C8103C, "Smart HBA H241", &SA5_access}, 193 {0x21C9103C, "Smart Array", &SA5_access}, 194 {0x21CA103C, "Smart Array P246br", &SA5_access}, 195 {0x21CB103C, "Smart Array P840", &SA5_access}, 196 {0x21CC103C, "Smart Array", &SA5_access}, 197 {0x21CD103C, "Smart Array", &SA5_access}, 198 {0x21CE103C, "Smart HBA", &SA5_access}, 199 {0x05809005, "SmartHBA-SA", &SA5_access}, 200 {0x05819005, "SmartHBA-SA 8i", &SA5_access}, 201 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access}, 202 {0x05839005, "SmartHBA-SA 8e", &SA5_access}, 203 {0x05849005, "SmartHBA-SA 16i", &SA5_access}, 204 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access}, 205 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, 206 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, 207 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, 208 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, 209 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, 210 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 211 }; 212 213 static struct scsi_transport_template *hpsa_sas_transport_template; 214 static int hpsa_add_sas_host(struct ctlr_info *h); 215 static void hpsa_delete_sas_host(struct ctlr_info *h); 216 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, 217 struct hpsa_scsi_dev_t *device); 218 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device); 219 static struct hpsa_scsi_dev_t 220 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, 221 struct sas_rphy *rphy); 222 223 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy) 224 static const struct scsi_cmnd hpsa_cmd_busy; 225 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle) 226 static const struct scsi_cmnd hpsa_cmd_idle; 227 static int number_of_controllers; 228 229 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 230 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 231 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 232 233 #ifdef CONFIG_COMPAT 234 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, 235 void __user *arg); 236 #endif 237 238 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 239 static struct CommandList *cmd_alloc(struct ctlr_info *h); 240 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); 241 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, 242 struct scsi_cmnd *scmd); 243 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 244 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 245 int cmd_type); 246 static void hpsa_free_cmd_pool(struct ctlr_info *h); 247 #define VPD_PAGE (1 << 8) 248 #define HPSA_SIMPLE_ERROR_BITS 0x03 249 250 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 251 static void hpsa_scan_start(struct Scsi_Host *); 252 static int hpsa_scan_finished(struct Scsi_Host *sh, 253 unsigned long elapsed_time); 254 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); 255 256 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 257 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); 258 static int hpsa_slave_alloc(struct scsi_device *sdev); 259 static int hpsa_slave_configure(struct scsi_device *sdev); 260 static void hpsa_slave_destroy(struct scsi_device *sdev); 261 262 static void hpsa_update_scsi_devices(struct ctlr_info *h); 263 static int check_for_unit_attention(struct ctlr_info *h, 264 struct CommandList *c); 265 static void check_ioctl_unit_attention(struct ctlr_info *h, 266 struct CommandList *c); 267 /* performant mode helper functions */ 268 static void calc_bucket_map(int *bucket, int num_buckets, 269 int nsgs, int min_blocks, u32 *bucket_map); 270 static void hpsa_free_performant_mode(struct ctlr_info *h); 271 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 272 static inline u32 next_command(struct ctlr_info *h, u8 q); 273 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 274 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 275 u64 *cfg_offset); 276 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 277 unsigned long *memory_bar); 278 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 279 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 280 int wait_for_ready); 281 static inline void finish_cmd(struct CommandList *c); 282 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); 283 #define BOARD_NOT_READY 0 284 #define BOARD_READY 1 285 static void hpsa_drain_accel_commands(struct ctlr_info *h); 286 static void hpsa_flush_cache(struct ctlr_info *h); 287 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 288 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 289 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); 290 static void hpsa_command_resubmit_worker(struct work_struct *work); 291 static u32 lockup_detected(struct ctlr_info *h); 292 static int detect_controller_lockup(struct ctlr_info *h); 293 static void hpsa_disable_rld_caching(struct ctlr_info *h); 294 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 295 struct ReportExtendedLUNdata *buf, int bufsize); 296 static int hpsa_luns_changed(struct ctlr_info *h); 297 298 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 299 { 300 unsigned long *priv = shost_priv(sdev->host); 301 return (struct ctlr_info *) *priv; 302 } 303 304 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 305 { 306 unsigned long *priv = shost_priv(sh); 307 return (struct ctlr_info *) *priv; 308 } 309 310 static inline bool hpsa_is_cmd_idle(struct CommandList *c) 311 { 312 return c->scsi_cmd == SCSI_CMD_IDLE; 313 } 314 315 static inline bool hpsa_is_pending_event(struct CommandList *c) 316 { 317 return c->abort_pending || c->reset_pending; 318 } 319 320 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ 321 static void decode_sense_data(const u8 *sense_data, int sense_data_len, 322 u8 *sense_key, u8 *asc, u8 *ascq) 323 { 324 struct scsi_sense_hdr sshdr; 325 bool rc; 326 327 *sense_key = -1; 328 *asc = -1; 329 *ascq = -1; 330 331 if (sense_data_len < 1) 332 return; 333 334 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr); 335 if (rc) { 336 *sense_key = sshdr.sense_key; 337 *asc = sshdr.asc; 338 *ascq = sshdr.ascq; 339 } 340 } 341 342 static int check_for_unit_attention(struct ctlr_info *h, 343 struct CommandList *c) 344 { 345 u8 sense_key, asc, ascq; 346 int sense_len; 347 348 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) 349 sense_len = sizeof(c->err_info->SenseInfo); 350 else 351 sense_len = c->err_info->SenseLen; 352 353 decode_sense_data(c->err_info->SenseInfo, sense_len, 354 &sense_key, &asc, &ascq); 355 if (sense_key != UNIT_ATTENTION || asc == 0xff) 356 return 0; 357 358 switch (asc) { 359 case STATE_CHANGED: 360 dev_warn(&h->pdev->dev, 361 "%s: a state change detected, command retried\n", 362 h->devname); 363 break; 364 case LUN_FAILED: 365 dev_warn(&h->pdev->dev, 366 "%s: LUN failure detected\n", h->devname); 367 break; 368 case REPORT_LUNS_CHANGED: 369 dev_warn(&h->pdev->dev, 370 "%s: report LUN data changed\n", h->devname); 371 /* 372 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 373 * target (array) devices. 374 */ 375 break; 376 case POWER_OR_RESET: 377 dev_warn(&h->pdev->dev, 378 "%s: a power on or device reset detected\n", 379 h->devname); 380 break; 381 case UNIT_ATTENTION_CLEARED: 382 dev_warn(&h->pdev->dev, 383 "%s: unit attention cleared by another initiator\n", 384 h->devname); 385 break; 386 default: 387 dev_warn(&h->pdev->dev, 388 "%s: unknown unit attention detected\n", 389 h->devname); 390 break; 391 } 392 return 1; 393 } 394 395 static int check_for_busy(struct ctlr_info *h, struct CommandList *c) 396 { 397 if (c->err_info->CommandStatus != CMD_TARGET_STATUS || 398 (c->err_info->ScsiStatus != SAM_STAT_BUSY && 399 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) 400 return 0; 401 dev_warn(&h->pdev->dev, HPSA "device busy"); 402 return 1; 403 } 404 405 static u32 lockup_detected(struct ctlr_info *h); 406 static ssize_t host_show_lockup_detected(struct device *dev, 407 struct device_attribute *attr, char *buf) 408 { 409 int ld; 410 struct ctlr_info *h; 411 struct Scsi_Host *shost = class_to_shost(dev); 412 413 h = shost_to_hba(shost); 414 ld = lockup_detected(h); 415 416 return sprintf(buf, "ld=%d\n", ld); 417 } 418 419 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, 420 struct device_attribute *attr, 421 const char *buf, size_t count) 422 { 423 int status, len; 424 struct ctlr_info *h; 425 struct Scsi_Host *shost = class_to_shost(dev); 426 char tmpbuf[10]; 427 428 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 429 return -EACCES; 430 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 431 strncpy(tmpbuf, buf, len); 432 tmpbuf[len] = '\0'; 433 if (sscanf(tmpbuf, "%d", &status) != 1) 434 return -EINVAL; 435 h = shost_to_hba(shost); 436 h->acciopath_status = !!status; 437 dev_warn(&h->pdev->dev, 438 "hpsa: HP SSD Smart Path %s via sysfs update.\n", 439 h->acciopath_status ? "enabled" : "disabled"); 440 return count; 441 } 442 443 static ssize_t host_store_raid_offload_debug(struct device *dev, 444 struct device_attribute *attr, 445 const char *buf, size_t count) 446 { 447 int debug_level, len; 448 struct ctlr_info *h; 449 struct Scsi_Host *shost = class_to_shost(dev); 450 char tmpbuf[10]; 451 452 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 453 return -EACCES; 454 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 455 strncpy(tmpbuf, buf, len); 456 tmpbuf[len] = '\0'; 457 if (sscanf(tmpbuf, "%d", &debug_level) != 1) 458 return -EINVAL; 459 if (debug_level < 0) 460 debug_level = 0; 461 h = shost_to_hba(shost); 462 h->raid_offload_debug = debug_level; 463 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", 464 h->raid_offload_debug); 465 return count; 466 } 467 468 static ssize_t host_store_rescan(struct device *dev, 469 struct device_attribute *attr, 470 const char *buf, size_t count) 471 { 472 struct ctlr_info *h; 473 struct Scsi_Host *shost = class_to_shost(dev); 474 h = shost_to_hba(shost); 475 hpsa_scan_start(h->scsi_host); 476 return count; 477 } 478 479 static ssize_t host_show_firmware_revision(struct device *dev, 480 struct device_attribute *attr, char *buf) 481 { 482 struct ctlr_info *h; 483 struct Scsi_Host *shost = class_to_shost(dev); 484 unsigned char *fwrev; 485 486 h = shost_to_hba(shost); 487 if (!h->hba_inquiry_data) 488 return 0; 489 fwrev = &h->hba_inquiry_data[32]; 490 return snprintf(buf, 20, "%c%c%c%c\n", 491 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 492 } 493 494 static ssize_t host_show_commands_outstanding(struct device *dev, 495 struct device_attribute *attr, char *buf) 496 { 497 struct Scsi_Host *shost = class_to_shost(dev); 498 struct ctlr_info *h = shost_to_hba(shost); 499 500 return snprintf(buf, 20, "%d\n", 501 atomic_read(&h->commands_outstanding)); 502 } 503 504 static ssize_t host_show_transport_mode(struct device *dev, 505 struct device_attribute *attr, char *buf) 506 { 507 struct ctlr_info *h; 508 struct Scsi_Host *shost = class_to_shost(dev); 509 510 h = shost_to_hba(shost); 511 return snprintf(buf, 20, "%s\n", 512 h->transMethod & CFGTBL_Trans_Performant ? 513 "performant" : "simple"); 514 } 515 516 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, 517 struct device_attribute *attr, char *buf) 518 { 519 struct ctlr_info *h; 520 struct Scsi_Host *shost = class_to_shost(dev); 521 522 h = shost_to_hba(shost); 523 return snprintf(buf, 30, "HP SSD Smart Path %s\n", 524 (h->acciopath_status == 1) ? "enabled" : "disabled"); 525 } 526 527 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 528 static u32 unresettable_controller[] = { 529 0x324a103C, /* Smart Array P712m */ 530 0x324b103C, /* Smart Array P711m */ 531 0x3223103C, /* Smart Array P800 */ 532 0x3234103C, /* Smart Array P400 */ 533 0x3235103C, /* Smart Array P400i */ 534 0x3211103C, /* Smart Array E200i */ 535 0x3212103C, /* Smart Array E200 */ 536 0x3213103C, /* Smart Array E200i */ 537 0x3214103C, /* Smart Array E200i */ 538 0x3215103C, /* Smart Array E200i */ 539 0x3237103C, /* Smart Array E500 */ 540 0x323D103C, /* Smart Array P700m */ 541 0x40800E11, /* Smart Array 5i */ 542 0x409C0E11, /* Smart Array 6400 */ 543 0x409D0E11, /* Smart Array 6400 EM */ 544 0x40700E11, /* Smart Array 5300 */ 545 0x40820E11, /* Smart Array 532 */ 546 0x40830E11, /* Smart Array 5312 */ 547 0x409A0E11, /* Smart Array 641 */ 548 0x409B0E11, /* Smart Array 642 */ 549 0x40910E11, /* Smart Array 6i */ 550 }; 551 552 /* List of controllers which cannot even be soft reset */ 553 static u32 soft_unresettable_controller[] = { 554 0x40800E11, /* Smart Array 5i */ 555 0x40700E11, /* Smart Array 5300 */ 556 0x40820E11, /* Smart Array 532 */ 557 0x40830E11, /* Smart Array 5312 */ 558 0x409A0E11, /* Smart Array 641 */ 559 0x409B0E11, /* Smart Array 642 */ 560 0x40910E11, /* Smart Array 6i */ 561 /* Exclude 640x boards. These are two pci devices in one slot 562 * which share a battery backed cache module. One controls the 563 * cache, the other accesses the cache through the one that controls 564 * it. If we reset the one controlling the cache, the other will 565 * likely not be happy. Just forbid resetting this conjoined mess. 566 * The 640x isn't really supported by hpsa anyway. 567 */ 568 0x409C0E11, /* Smart Array 6400 */ 569 0x409D0E11, /* Smart Array 6400 EM */ 570 }; 571 572 static u32 needs_abort_tags_swizzled[] = { 573 0x323D103C, /* Smart Array P700m */ 574 0x324a103C, /* Smart Array P712m */ 575 0x324b103C, /* SmartArray P711m */ 576 }; 577 578 static int board_id_in_array(u32 a[], int nelems, u32 board_id) 579 { 580 int i; 581 582 for (i = 0; i < nelems; i++) 583 if (a[i] == board_id) 584 return 1; 585 return 0; 586 } 587 588 static int ctlr_is_hard_resettable(u32 board_id) 589 { 590 return !board_id_in_array(unresettable_controller, 591 ARRAY_SIZE(unresettable_controller), board_id); 592 } 593 594 static int ctlr_is_soft_resettable(u32 board_id) 595 { 596 return !board_id_in_array(soft_unresettable_controller, 597 ARRAY_SIZE(soft_unresettable_controller), board_id); 598 } 599 600 static int ctlr_is_resettable(u32 board_id) 601 { 602 return ctlr_is_hard_resettable(board_id) || 603 ctlr_is_soft_resettable(board_id); 604 } 605 606 static int ctlr_needs_abort_tags_swizzled(u32 board_id) 607 { 608 return board_id_in_array(needs_abort_tags_swizzled, 609 ARRAY_SIZE(needs_abort_tags_swizzled), board_id); 610 } 611 612 static ssize_t host_show_resettable(struct device *dev, 613 struct device_attribute *attr, char *buf) 614 { 615 struct ctlr_info *h; 616 struct Scsi_Host *shost = class_to_shost(dev); 617 618 h = shost_to_hba(shost); 619 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 620 } 621 622 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 623 { 624 return (scsi3addr[3] & 0xC0) == 0x40; 625 } 626 627 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", 628 "1(+0)ADM", "UNKNOWN", "PHYS DRV" 629 }; 630 #define HPSA_RAID_0 0 631 #define HPSA_RAID_4 1 632 #define HPSA_RAID_1 2 /* also used for RAID 10 */ 633 #define HPSA_RAID_5 3 /* also used for RAID 50 */ 634 #define HPSA_RAID_51 4 635 #define HPSA_RAID_6 5 /* also used for RAID 60 */ 636 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 637 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2) 638 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1) 639 640 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device) 641 { 642 return !device->physical_device; 643 } 644 645 static ssize_t raid_level_show(struct device *dev, 646 struct device_attribute *attr, char *buf) 647 { 648 ssize_t l = 0; 649 unsigned char rlevel; 650 struct ctlr_info *h; 651 struct scsi_device *sdev; 652 struct hpsa_scsi_dev_t *hdev; 653 unsigned long flags; 654 655 sdev = to_scsi_device(dev); 656 h = sdev_to_hba(sdev); 657 spin_lock_irqsave(&h->lock, flags); 658 hdev = sdev->hostdata; 659 if (!hdev) { 660 spin_unlock_irqrestore(&h->lock, flags); 661 return -ENODEV; 662 } 663 664 /* Is this even a logical drive? */ 665 if (!is_logical_device(hdev)) { 666 spin_unlock_irqrestore(&h->lock, flags); 667 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 668 return l; 669 } 670 671 rlevel = hdev->raid_level; 672 spin_unlock_irqrestore(&h->lock, flags); 673 if (rlevel > RAID_UNKNOWN) 674 rlevel = RAID_UNKNOWN; 675 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 676 return l; 677 } 678 679 static ssize_t lunid_show(struct device *dev, 680 struct device_attribute *attr, char *buf) 681 { 682 struct ctlr_info *h; 683 struct scsi_device *sdev; 684 struct hpsa_scsi_dev_t *hdev; 685 unsigned long flags; 686 unsigned char lunid[8]; 687 688 sdev = to_scsi_device(dev); 689 h = sdev_to_hba(sdev); 690 spin_lock_irqsave(&h->lock, flags); 691 hdev = sdev->hostdata; 692 if (!hdev) { 693 spin_unlock_irqrestore(&h->lock, flags); 694 return -ENODEV; 695 } 696 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 697 spin_unlock_irqrestore(&h->lock, flags); 698 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 699 lunid[0], lunid[1], lunid[2], lunid[3], 700 lunid[4], lunid[5], lunid[6], lunid[7]); 701 } 702 703 static ssize_t unique_id_show(struct device *dev, 704 struct device_attribute *attr, char *buf) 705 { 706 struct ctlr_info *h; 707 struct scsi_device *sdev; 708 struct hpsa_scsi_dev_t *hdev; 709 unsigned long flags; 710 unsigned char sn[16]; 711 712 sdev = to_scsi_device(dev); 713 h = sdev_to_hba(sdev); 714 spin_lock_irqsave(&h->lock, flags); 715 hdev = sdev->hostdata; 716 if (!hdev) { 717 spin_unlock_irqrestore(&h->lock, flags); 718 return -ENODEV; 719 } 720 memcpy(sn, hdev->device_id, sizeof(sn)); 721 spin_unlock_irqrestore(&h->lock, flags); 722 return snprintf(buf, 16 * 2 + 2, 723 "%02X%02X%02X%02X%02X%02X%02X%02X" 724 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 725 sn[0], sn[1], sn[2], sn[3], 726 sn[4], sn[5], sn[6], sn[7], 727 sn[8], sn[9], sn[10], sn[11], 728 sn[12], sn[13], sn[14], sn[15]); 729 } 730 731 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, 732 struct device_attribute *attr, char *buf) 733 { 734 struct ctlr_info *h; 735 struct scsi_device *sdev; 736 struct hpsa_scsi_dev_t *hdev; 737 unsigned long flags; 738 int offload_enabled; 739 740 sdev = to_scsi_device(dev); 741 h = sdev_to_hba(sdev); 742 spin_lock_irqsave(&h->lock, flags); 743 hdev = sdev->hostdata; 744 if (!hdev) { 745 spin_unlock_irqrestore(&h->lock, flags); 746 return -ENODEV; 747 } 748 offload_enabled = hdev->offload_enabled; 749 spin_unlock_irqrestore(&h->lock, flags); 750 return snprintf(buf, 20, "%d\n", offload_enabled); 751 } 752 753 #define MAX_PATHS 8 754 static ssize_t path_info_show(struct device *dev, 755 struct device_attribute *attr, char *buf) 756 { 757 struct ctlr_info *h; 758 struct scsi_device *sdev; 759 struct hpsa_scsi_dev_t *hdev; 760 unsigned long flags; 761 int i; 762 int output_len = 0; 763 u8 box; 764 u8 bay; 765 u8 path_map_index = 0; 766 char *active; 767 unsigned char phys_connector[2]; 768 769 sdev = to_scsi_device(dev); 770 h = sdev_to_hba(sdev); 771 spin_lock_irqsave(&h->devlock, flags); 772 hdev = sdev->hostdata; 773 if (!hdev) { 774 spin_unlock_irqrestore(&h->devlock, flags); 775 return -ENODEV; 776 } 777 778 bay = hdev->bay; 779 for (i = 0; i < MAX_PATHS; i++) { 780 path_map_index = 1<<i; 781 if (i == hdev->active_path_index) 782 active = "Active"; 783 else if (hdev->path_map & path_map_index) 784 active = "Inactive"; 785 else 786 continue; 787 788 output_len += scnprintf(buf + output_len, 789 PAGE_SIZE - output_len, 790 "[%d:%d:%d:%d] %20.20s ", 791 h->scsi_host->host_no, 792 hdev->bus, hdev->target, hdev->lun, 793 scsi_device_type(hdev->devtype)); 794 795 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) { 796 output_len += scnprintf(buf + output_len, 797 PAGE_SIZE - output_len, 798 "%s\n", active); 799 continue; 800 } 801 802 box = hdev->box[i]; 803 memcpy(&phys_connector, &hdev->phys_connector[i], 804 sizeof(phys_connector)); 805 if (phys_connector[0] < '0') 806 phys_connector[0] = '0'; 807 if (phys_connector[1] < '0') 808 phys_connector[1] = '0'; 809 output_len += scnprintf(buf + output_len, 810 PAGE_SIZE - output_len, 811 "PORT: %.2s ", 812 phys_connector); 813 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) && 814 hdev->expose_device) { 815 if (box == 0 || box == 0xFF) { 816 output_len += scnprintf(buf + output_len, 817 PAGE_SIZE - output_len, 818 "BAY: %hhu %s\n", 819 bay, active); 820 } else { 821 output_len += scnprintf(buf + output_len, 822 PAGE_SIZE - output_len, 823 "BOX: %hhu BAY: %hhu %s\n", 824 box, bay, active); 825 } 826 } else if (box != 0 && box != 0xFF) { 827 output_len += scnprintf(buf + output_len, 828 PAGE_SIZE - output_len, "BOX: %hhu %s\n", 829 box, active); 830 } else 831 output_len += scnprintf(buf + output_len, 832 PAGE_SIZE - output_len, "%s\n", active); 833 } 834 835 spin_unlock_irqrestore(&h->devlock, flags); 836 return output_len; 837 } 838 839 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 840 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 841 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 842 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 843 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, 844 host_show_hp_ssd_smart_path_enabled, NULL); 845 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL); 846 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, 847 host_show_hp_ssd_smart_path_status, 848 host_store_hp_ssd_smart_path_status); 849 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, 850 host_store_raid_offload_debug); 851 static DEVICE_ATTR(firmware_revision, S_IRUGO, 852 host_show_firmware_revision, NULL); 853 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 854 host_show_commands_outstanding, NULL); 855 static DEVICE_ATTR(transport_mode, S_IRUGO, 856 host_show_transport_mode, NULL); 857 static DEVICE_ATTR(resettable, S_IRUGO, 858 host_show_resettable, NULL); 859 static DEVICE_ATTR(lockup_detected, S_IRUGO, 860 host_show_lockup_detected, NULL); 861 862 static struct device_attribute *hpsa_sdev_attrs[] = { 863 &dev_attr_raid_level, 864 &dev_attr_lunid, 865 &dev_attr_unique_id, 866 &dev_attr_hp_ssd_smart_path_enabled, 867 &dev_attr_path_info, 868 NULL, 869 }; 870 871 static struct device_attribute *hpsa_shost_attrs[] = { 872 &dev_attr_rescan, 873 &dev_attr_firmware_revision, 874 &dev_attr_commands_outstanding, 875 &dev_attr_transport_mode, 876 &dev_attr_resettable, 877 &dev_attr_hp_ssd_smart_path_status, 878 &dev_attr_raid_offload_debug, 879 &dev_attr_lockup_detected, 880 NULL, 881 }; 882 883 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \ 884 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS) 885 886 static struct scsi_host_template hpsa_driver_template = { 887 .module = THIS_MODULE, 888 .name = HPSA, 889 .proc_name = HPSA, 890 .queuecommand = hpsa_scsi_queue_command, 891 .scan_start = hpsa_scan_start, 892 .scan_finished = hpsa_scan_finished, 893 .change_queue_depth = hpsa_change_queue_depth, 894 .this_id = -1, 895 .use_clustering = ENABLE_CLUSTERING, 896 .eh_abort_handler = hpsa_eh_abort_handler, 897 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 898 .ioctl = hpsa_ioctl, 899 .slave_alloc = hpsa_slave_alloc, 900 .slave_configure = hpsa_slave_configure, 901 .slave_destroy = hpsa_slave_destroy, 902 #ifdef CONFIG_COMPAT 903 .compat_ioctl = hpsa_compat_ioctl, 904 #endif 905 .sdev_attrs = hpsa_sdev_attrs, 906 .shost_attrs = hpsa_shost_attrs, 907 .max_sectors = 8192, 908 .no_write_same = 1, 909 }; 910 911 static inline u32 next_command(struct ctlr_info *h, u8 q) 912 { 913 u32 a; 914 struct reply_queue_buffer *rq = &h->reply_queue[q]; 915 916 if (h->transMethod & CFGTBL_Trans_io_accel1) 917 return h->access.command_completed(h, q); 918 919 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 920 return h->access.command_completed(h, q); 921 922 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 923 a = rq->head[rq->current_entry]; 924 rq->current_entry++; 925 atomic_dec(&h->commands_outstanding); 926 } else { 927 a = FIFO_EMPTY; 928 } 929 /* Check for wraparound */ 930 if (rq->current_entry == h->max_commands) { 931 rq->current_entry = 0; 932 rq->wraparound ^= 1; 933 } 934 return a; 935 } 936 937 /* 938 * There are some special bits in the bus address of the 939 * command that we have to set for the controller to know 940 * how to process the command: 941 * 942 * Normal performant mode: 943 * bit 0: 1 means performant mode, 0 means simple mode. 944 * bits 1-3 = block fetch table entry 945 * bits 4-6 = command type (== 0) 946 * 947 * ioaccel1 mode: 948 * bit 0 = "performant mode" bit. 949 * bits 1-3 = block fetch table entry 950 * bits 4-6 = command type (== 110) 951 * (command type is needed because ioaccel1 mode 952 * commands are submitted through the same register as normal 953 * mode commands, so this is how the controller knows whether 954 * the command is normal mode or ioaccel1 mode.) 955 * 956 * ioaccel2 mode: 957 * bit 0 = "performant mode" bit. 958 * bits 1-4 = block fetch table entry (note extra bit) 959 * bits 4-6 = not needed, because ioaccel2 mode has 960 * a separate special register for submitting commands. 961 */ 962 963 /* 964 * set_performant_mode: Modify the tag for cciss performant 965 * set bit 0 for pull model, bits 3-1 for block fetch 966 * register number 967 */ 968 #define DEFAULT_REPLY_QUEUE (-1) 969 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, 970 int reply_queue) 971 { 972 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 973 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 974 if (unlikely(!h->msix_vector)) 975 return; 976 if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) 977 c->Header.ReplyQueue = 978 raw_smp_processor_id() % h->nreply_queues; 979 else 980 c->Header.ReplyQueue = reply_queue % h->nreply_queues; 981 } 982 } 983 984 static void set_ioaccel1_performant_mode(struct ctlr_info *h, 985 struct CommandList *c, 986 int reply_queue) 987 { 988 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 989 990 /* 991 * Tell the controller to post the reply to the queue for this 992 * processor. This seems to give the best I/O throughput. 993 */ 994 if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) 995 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; 996 else 997 cp->ReplyQueue = reply_queue % h->nreply_queues; 998 /* 999 * Set the bits in the address sent down to include: 1000 * - performant mode bit (bit 0) 1001 * - pull count (bits 1-3) 1002 * - command type (bits 4-6) 1003 */ 1004 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | 1005 IOACCEL1_BUSADDR_CMDTYPE; 1006 } 1007 1008 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, 1009 struct CommandList *c, 1010 int reply_queue) 1011 { 1012 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *) 1013 &h->ioaccel2_cmd_pool[c->cmdindex]; 1014 1015 /* Tell the controller to post the reply to the queue for this 1016 * processor. This seems to give the best I/O throughput. 1017 */ 1018 if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) 1019 cp->reply_queue = smp_processor_id() % h->nreply_queues; 1020 else 1021 cp->reply_queue = reply_queue % h->nreply_queues; 1022 /* Set the bits in the address sent down to include: 1023 * - performant mode bit not used in ioaccel mode 2 1024 * - pull count (bits 0-3) 1025 * - command type isn't needed for ioaccel2 1026 */ 1027 c->busaddr |= h->ioaccel2_blockFetchTable[0]; 1028 } 1029 1030 static void set_ioaccel2_performant_mode(struct ctlr_info *h, 1031 struct CommandList *c, 1032 int reply_queue) 1033 { 1034 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 1035 1036 /* 1037 * Tell the controller to post the reply to the queue for this 1038 * processor. This seems to give the best I/O throughput. 1039 */ 1040 if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) 1041 cp->reply_queue = smp_processor_id() % h->nreply_queues; 1042 else 1043 cp->reply_queue = reply_queue % h->nreply_queues; 1044 /* 1045 * Set the bits in the address sent down to include: 1046 * - performant mode bit not used in ioaccel mode 2 1047 * - pull count (bits 0-3) 1048 * - command type isn't needed for ioaccel2 1049 */ 1050 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); 1051 } 1052 1053 static int is_firmware_flash_cmd(u8 *cdb) 1054 { 1055 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; 1056 } 1057 1058 /* 1059 * During firmware flash, the heartbeat register may not update as frequently 1060 * as it should. So we dial down lockup detection during firmware flash. and 1061 * dial it back up when firmware flash completes. 1062 */ 1063 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 1064 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) 1065 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 1066 struct CommandList *c) 1067 { 1068 if (!is_firmware_flash_cmd(c->Request.CDB)) 1069 return; 1070 atomic_inc(&h->firmware_flash_in_progress); 1071 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; 1072 } 1073 1074 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, 1075 struct CommandList *c) 1076 { 1077 if (is_firmware_flash_cmd(c->Request.CDB) && 1078 atomic_dec_and_test(&h->firmware_flash_in_progress)) 1079 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 1080 } 1081 1082 static void __enqueue_cmd_and_start_io(struct ctlr_info *h, 1083 struct CommandList *c, int reply_queue) 1084 { 1085 dial_down_lockup_detection_during_fw_flash(h, c); 1086 atomic_inc(&h->commands_outstanding); 1087 switch (c->cmd_type) { 1088 case CMD_IOACCEL1: 1089 set_ioaccel1_performant_mode(h, c, reply_queue); 1090 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 1091 break; 1092 case CMD_IOACCEL2: 1093 set_ioaccel2_performant_mode(h, c, reply_queue); 1094 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 1095 break; 1096 case IOACCEL2_TMF: 1097 set_ioaccel2_tmf_performant_mode(h, c, reply_queue); 1098 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 1099 break; 1100 default: 1101 set_performant_mode(h, c, reply_queue); 1102 h->access.submit_command(h, c); 1103 } 1104 } 1105 1106 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) 1107 { 1108 if (unlikely(hpsa_is_pending_event(c))) 1109 return finish_cmd(c); 1110 1111 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); 1112 } 1113 1114 static inline int is_hba_lunid(unsigned char scsi3addr[]) 1115 { 1116 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 1117 } 1118 1119 static inline int is_scsi_rev_5(struct ctlr_info *h) 1120 { 1121 if (!h->hba_inquiry_data) 1122 return 0; 1123 if ((h->hba_inquiry_data[2] & 0x07) == 5) 1124 return 1; 1125 return 0; 1126 } 1127 1128 static int hpsa_find_target_lun(struct ctlr_info *h, 1129 unsigned char scsi3addr[], int bus, int *target, int *lun) 1130 { 1131 /* finds an unused bus, target, lun for a new physical device 1132 * assumes h->devlock is held 1133 */ 1134 int i, found = 0; 1135 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 1136 1137 bitmap_zero(lun_taken, HPSA_MAX_DEVICES); 1138 1139 for (i = 0; i < h->ndevices; i++) { 1140 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 1141 __set_bit(h->dev[i]->target, lun_taken); 1142 } 1143 1144 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); 1145 if (i < HPSA_MAX_DEVICES) { 1146 /* *bus = 1; */ 1147 *target = i; 1148 *lun = 0; 1149 found = 1; 1150 } 1151 return !found; 1152 } 1153 1154 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, 1155 struct hpsa_scsi_dev_t *dev, char *description) 1156 { 1157 #define LABEL_SIZE 25 1158 char label[LABEL_SIZE]; 1159 1160 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL) 1161 return; 1162 1163 switch (dev->devtype) { 1164 case TYPE_RAID: 1165 snprintf(label, LABEL_SIZE, "controller"); 1166 break; 1167 case TYPE_ENCLOSURE: 1168 snprintf(label, LABEL_SIZE, "enclosure"); 1169 break; 1170 case TYPE_DISK: 1171 case TYPE_ZBC: 1172 if (dev->external) 1173 snprintf(label, LABEL_SIZE, "external"); 1174 else if (!is_logical_dev_addr_mode(dev->scsi3addr)) 1175 snprintf(label, LABEL_SIZE, "%s", 1176 raid_label[PHYSICAL_DRIVE]); 1177 else 1178 snprintf(label, LABEL_SIZE, "RAID-%s", 1179 dev->raid_level > RAID_UNKNOWN ? "?" : 1180 raid_label[dev->raid_level]); 1181 break; 1182 case TYPE_ROM: 1183 snprintf(label, LABEL_SIZE, "rom"); 1184 break; 1185 case TYPE_TAPE: 1186 snprintf(label, LABEL_SIZE, "tape"); 1187 break; 1188 case TYPE_MEDIUM_CHANGER: 1189 snprintf(label, LABEL_SIZE, "changer"); 1190 break; 1191 default: 1192 snprintf(label, LABEL_SIZE, "UNKNOWN"); 1193 break; 1194 } 1195 1196 dev_printk(level, &h->pdev->dev, 1197 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n", 1198 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 1199 description, 1200 scsi_device_type(dev->devtype), 1201 dev->vendor, 1202 dev->model, 1203 label, 1204 dev->offload_config ? '+' : '-', 1205 dev->offload_enabled ? '+' : '-', 1206 dev->expose_device); 1207 } 1208 1209 /* Add an entry into h->dev[] array. */ 1210 static int hpsa_scsi_add_entry(struct ctlr_info *h, 1211 struct hpsa_scsi_dev_t *device, 1212 struct hpsa_scsi_dev_t *added[], int *nadded) 1213 { 1214 /* assumes h->devlock is held */ 1215 int n = h->ndevices; 1216 int i; 1217 unsigned char addr1[8], addr2[8]; 1218 struct hpsa_scsi_dev_t *sd; 1219 1220 if (n >= HPSA_MAX_DEVICES) { 1221 dev_err(&h->pdev->dev, "too many devices, some will be " 1222 "inaccessible.\n"); 1223 return -1; 1224 } 1225 1226 /* physical devices do not have lun or target assigned until now. */ 1227 if (device->lun != -1) 1228 /* Logical device, lun is already assigned. */ 1229 goto lun_assigned; 1230 1231 /* If this device a non-zero lun of a multi-lun device 1232 * byte 4 of the 8-byte LUN addr will contain the logical 1233 * unit no, zero otherwise. 1234 */ 1235 if (device->scsi3addr[4] == 0) { 1236 /* This is not a non-zero lun of a multi-lun device */ 1237 if (hpsa_find_target_lun(h, device->scsi3addr, 1238 device->bus, &device->target, &device->lun) != 0) 1239 return -1; 1240 goto lun_assigned; 1241 } 1242 1243 /* This is a non-zero lun of a multi-lun device. 1244 * Search through our list and find the device which 1245 * has the same 8 byte LUN address, excepting byte 4 and 5. 1246 * Assign the same bus and target for this new LUN. 1247 * Use the logical unit number from the firmware. 1248 */ 1249 memcpy(addr1, device->scsi3addr, 8); 1250 addr1[4] = 0; 1251 addr1[5] = 0; 1252 for (i = 0; i < n; i++) { 1253 sd = h->dev[i]; 1254 memcpy(addr2, sd->scsi3addr, 8); 1255 addr2[4] = 0; 1256 addr2[5] = 0; 1257 /* differ only in byte 4 and 5? */ 1258 if (memcmp(addr1, addr2, 8) == 0) { 1259 device->bus = sd->bus; 1260 device->target = sd->target; 1261 device->lun = device->scsi3addr[4]; 1262 break; 1263 } 1264 } 1265 if (device->lun == -1) { 1266 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 1267 " suspect firmware bug or unsupported hardware " 1268 "configuration.\n"); 1269 return -1; 1270 } 1271 1272 lun_assigned: 1273 1274 h->dev[n] = device; 1275 h->ndevices++; 1276 added[*nadded] = device; 1277 (*nadded)++; 1278 hpsa_show_dev_msg(KERN_INFO, h, device, 1279 device->expose_device ? "added" : "masked"); 1280 device->offload_to_be_enabled = device->offload_enabled; 1281 device->offload_enabled = 0; 1282 return 0; 1283 } 1284 1285 /* Update an entry in h->dev[] array. */ 1286 static void hpsa_scsi_update_entry(struct ctlr_info *h, 1287 int entry, struct hpsa_scsi_dev_t *new_entry) 1288 { 1289 int offload_enabled; 1290 /* assumes h->devlock is held */ 1291 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1292 1293 /* Raid level changed. */ 1294 h->dev[entry]->raid_level = new_entry->raid_level; 1295 1296 /* Raid offload parameters changed. Careful about the ordering. */ 1297 if (new_entry->offload_config && new_entry->offload_enabled) { 1298 /* 1299 * if drive is newly offload_enabled, we want to copy the 1300 * raid map data first. If previously offload_enabled and 1301 * offload_config were set, raid map data had better be 1302 * the same as it was before. if raid map data is changed 1303 * then it had better be the case that 1304 * h->dev[entry]->offload_enabled is currently 0. 1305 */ 1306 h->dev[entry]->raid_map = new_entry->raid_map; 1307 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 1308 } 1309 if (new_entry->hba_ioaccel_enabled) { 1310 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 1311 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ 1312 } 1313 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; 1314 h->dev[entry]->offload_config = new_entry->offload_config; 1315 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; 1316 h->dev[entry]->queue_depth = new_entry->queue_depth; 1317 1318 /* 1319 * We can turn off ioaccel offload now, but need to delay turning 1320 * it on until we can update h->dev[entry]->phys_disk[], but we 1321 * can't do that until all the devices are updated. 1322 */ 1323 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled; 1324 if (!new_entry->offload_enabled) 1325 h->dev[entry]->offload_enabled = 0; 1326 1327 offload_enabled = h->dev[entry]->offload_enabled; 1328 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled; 1329 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); 1330 h->dev[entry]->offload_enabled = offload_enabled; 1331 } 1332 1333 /* Replace an entry from h->dev[] array. */ 1334 static void hpsa_scsi_replace_entry(struct ctlr_info *h, 1335 int entry, struct hpsa_scsi_dev_t *new_entry, 1336 struct hpsa_scsi_dev_t *added[], int *nadded, 1337 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1338 { 1339 /* assumes h->devlock is held */ 1340 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1341 removed[*nremoved] = h->dev[entry]; 1342 (*nremoved)++; 1343 1344 /* 1345 * New physical devices won't have target/lun assigned yet 1346 * so we need to preserve the values in the slot we are replacing. 1347 */ 1348 if (new_entry->target == -1) { 1349 new_entry->target = h->dev[entry]->target; 1350 new_entry->lun = h->dev[entry]->lun; 1351 } 1352 1353 h->dev[entry] = new_entry; 1354 added[*nadded] = new_entry; 1355 (*nadded)++; 1356 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); 1357 new_entry->offload_to_be_enabled = new_entry->offload_enabled; 1358 new_entry->offload_enabled = 0; 1359 } 1360 1361 /* Remove an entry from h->dev[] array. */ 1362 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry, 1363 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1364 { 1365 /* assumes h->devlock is held */ 1366 int i; 1367 struct hpsa_scsi_dev_t *sd; 1368 1369 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1370 1371 sd = h->dev[entry]; 1372 removed[*nremoved] = h->dev[entry]; 1373 (*nremoved)++; 1374 1375 for (i = entry; i < h->ndevices-1; i++) 1376 h->dev[i] = h->dev[i+1]; 1377 h->ndevices--; 1378 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); 1379 } 1380 1381 #define SCSI3ADDR_EQ(a, b) ( \ 1382 (a)[7] == (b)[7] && \ 1383 (a)[6] == (b)[6] && \ 1384 (a)[5] == (b)[5] && \ 1385 (a)[4] == (b)[4] && \ 1386 (a)[3] == (b)[3] && \ 1387 (a)[2] == (b)[2] && \ 1388 (a)[1] == (b)[1] && \ 1389 (a)[0] == (b)[0]) 1390 1391 static void fixup_botched_add(struct ctlr_info *h, 1392 struct hpsa_scsi_dev_t *added) 1393 { 1394 /* called when scsi_add_device fails in order to re-adjust 1395 * h->dev[] to match the mid layer's view. 1396 */ 1397 unsigned long flags; 1398 int i, j; 1399 1400 spin_lock_irqsave(&h->lock, flags); 1401 for (i = 0; i < h->ndevices; i++) { 1402 if (h->dev[i] == added) { 1403 for (j = i; j < h->ndevices-1; j++) 1404 h->dev[j] = h->dev[j+1]; 1405 h->ndevices--; 1406 break; 1407 } 1408 } 1409 spin_unlock_irqrestore(&h->lock, flags); 1410 kfree(added); 1411 } 1412 1413 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 1414 struct hpsa_scsi_dev_t *dev2) 1415 { 1416 /* we compare everything except lun and target as these 1417 * are not yet assigned. Compare parts likely 1418 * to differ first 1419 */ 1420 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 1421 sizeof(dev1->scsi3addr)) != 0) 1422 return 0; 1423 if (memcmp(dev1->device_id, dev2->device_id, 1424 sizeof(dev1->device_id)) != 0) 1425 return 0; 1426 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 1427 return 0; 1428 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 1429 return 0; 1430 if (dev1->devtype != dev2->devtype) 1431 return 0; 1432 if (dev1->bus != dev2->bus) 1433 return 0; 1434 return 1; 1435 } 1436 1437 static inline int device_updated(struct hpsa_scsi_dev_t *dev1, 1438 struct hpsa_scsi_dev_t *dev2) 1439 { 1440 /* Device attributes that can change, but don't mean 1441 * that the device is a different device, nor that the OS 1442 * needs to be told anything about the change. 1443 */ 1444 if (dev1->raid_level != dev2->raid_level) 1445 return 1; 1446 if (dev1->offload_config != dev2->offload_config) 1447 return 1; 1448 if (dev1->offload_enabled != dev2->offload_enabled) 1449 return 1; 1450 if (!is_logical_dev_addr_mode(dev1->scsi3addr)) 1451 if (dev1->queue_depth != dev2->queue_depth) 1452 return 1; 1453 return 0; 1454 } 1455 1456 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 1457 * and return needle location in *index. If scsi3addr matches, but not 1458 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 1459 * location in *index. 1460 * In the case of a minor device attribute change, such as RAID level, just 1461 * return DEVICE_UPDATED, along with the updated device's location in index. 1462 * If needle not found, return DEVICE_NOT_FOUND. 1463 */ 1464 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 1465 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 1466 int *index) 1467 { 1468 int i; 1469 #define DEVICE_NOT_FOUND 0 1470 #define DEVICE_CHANGED 1 1471 #define DEVICE_SAME 2 1472 #define DEVICE_UPDATED 3 1473 if (needle == NULL) 1474 return DEVICE_NOT_FOUND; 1475 1476 for (i = 0; i < haystack_size; i++) { 1477 if (haystack[i] == NULL) /* previously removed. */ 1478 continue; 1479 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 1480 *index = i; 1481 if (device_is_the_same(needle, haystack[i])) { 1482 if (device_updated(needle, haystack[i])) 1483 return DEVICE_UPDATED; 1484 return DEVICE_SAME; 1485 } else { 1486 /* Keep offline devices offline */ 1487 if (needle->volume_offline) 1488 return DEVICE_NOT_FOUND; 1489 return DEVICE_CHANGED; 1490 } 1491 } 1492 } 1493 *index = -1; 1494 return DEVICE_NOT_FOUND; 1495 } 1496 1497 static void hpsa_monitor_offline_device(struct ctlr_info *h, 1498 unsigned char scsi3addr[]) 1499 { 1500 struct offline_device_entry *device; 1501 unsigned long flags; 1502 1503 /* Check to see if device is already on the list */ 1504 spin_lock_irqsave(&h->offline_device_lock, flags); 1505 list_for_each_entry(device, &h->offline_device_list, offline_list) { 1506 if (memcmp(device->scsi3addr, scsi3addr, 1507 sizeof(device->scsi3addr)) == 0) { 1508 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1509 return; 1510 } 1511 } 1512 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1513 1514 /* Device is not on the list, add it. */ 1515 device = kmalloc(sizeof(*device), GFP_KERNEL); 1516 if (!device) { 1517 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); 1518 return; 1519 } 1520 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1521 spin_lock_irqsave(&h->offline_device_lock, flags); 1522 list_add_tail(&device->offline_list, &h->offline_device_list); 1523 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1524 } 1525 1526 /* Print a message explaining various offline volume states */ 1527 static void hpsa_show_volume_status(struct ctlr_info *h, 1528 struct hpsa_scsi_dev_t *sd) 1529 { 1530 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) 1531 dev_info(&h->pdev->dev, 1532 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", 1533 h->scsi_host->host_no, 1534 sd->bus, sd->target, sd->lun); 1535 switch (sd->volume_offline) { 1536 case HPSA_LV_OK: 1537 break; 1538 case HPSA_LV_UNDERGOING_ERASE: 1539 dev_info(&h->pdev->dev, 1540 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", 1541 h->scsi_host->host_no, 1542 sd->bus, sd->target, sd->lun); 1543 break; 1544 case HPSA_LV_NOT_AVAILABLE: 1545 dev_info(&h->pdev->dev, 1546 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n", 1547 h->scsi_host->host_no, 1548 sd->bus, sd->target, sd->lun); 1549 break; 1550 case HPSA_LV_UNDERGOING_RPI: 1551 dev_info(&h->pdev->dev, 1552 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n", 1553 h->scsi_host->host_no, 1554 sd->bus, sd->target, sd->lun); 1555 break; 1556 case HPSA_LV_PENDING_RPI: 1557 dev_info(&h->pdev->dev, 1558 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", 1559 h->scsi_host->host_no, 1560 sd->bus, sd->target, sd->lun); 1561 break; 1562 case HPSA_LV_ENCRYPTED_NO_KEY: 1563 dev_info(&h->pdev->dev, 1564 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", 1565 h->scsi_host->host_no, 1566 sd->bus, sd->target, sd->lun); 1567 break; 1568 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 1569 dev_info(&h->pdev->dev, 1570 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", 1571 h->scsi_host->host_no, 1572 sd->bus, sd->target, sd->lun); 1573 break; 1574 case HPSA_LV_UNDERGOING_ENCRYPTION: 1575 dev_info(&h->pdev->dev, 1576 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", 1577 h->scsi_host->host_no, 1578 sd->bus, sd->target, sd->lun); 1579 break; 1580 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 1581 dev_info(&h->pdev->dev, 1582 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", 1583 h->scsi_host->host_no, 1584 sd->bus, sd->target, sd->lun); 1585 break; 1586 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1587 dev_info(&h->pdev->dev, 1588 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", 1589 h->scsi_host->host_no, 1590 sd->bus, sd->target, sd->lun); 1591 break; 1592 case HPSA_LV_PENDING_ENCRYPTION: 1593 dev_info(&h->pdev->dev, 1594 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", 1595 h->scsi_host->host_no, 1596 sd->bus, sd->target, sd->lun); 1597 break; 1598 case HPSA_LV_PENDING_ENCRYPTION_REKEYING: 1599 dev_info(&h->pdev->dev, 1600 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", 1601 h->scsi_host->host_no, 1602 sd->bus, sd->target, sd->lun); 1603 break; 1604 } 1605 } 1606 1607 /* 1608 * Figure the list of physical drive pointers for a logical drive with 1609 * raid offload configured. 1610 */ 1611 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, 1612 struct hpsa_scsi_dev_t *dev[], int ndevices, 1613 struct hpsa_scsi_dev_t *logical_drive) 1614 { 1615 struct raid_map_data *map = &logical_drive->raid_map; 1616 struct raid_map_disk_data *dd = &map->data[0]; 1617 int i, j; 1618 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + 1619 le16_to_cpu(map->metadata_disks_per_row); 1620 int nraid_map_entries = le16_to_cpu(map->row_cnt) * 1621 le16_to_cpu(map->layout_map_count) * 1622 total_disks_per_row; 1623 int nphys_disk = le16_to_cpu(map->layout_map_count) * 1624 total_disks_per_row; 1625 int qdepth; 1626 1627 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) 1628 nraid_map_entries = RAID_MAP_MAX_ENTRIES; 1629 1630 logical_drive->nphysical_disks = nraid_map_entries; 1631 1632 qdepth = 0; 1633 for (i = 0; i < nraid_map_entries; i++) { 1634 logical_drive->phys_disk[i] = NULL; 1635 if (!logical_drive->offload_config) 1636 continue; 1637 for (j = 0; j < ndevices; j++) { 1638 if (dev[j] == NULL) 1639 continue; 1640 if (dev[j]->devtype != TYPE_DISK) 1641 continue; 1642 if (dev[j]->devtype != TYPE_ZBC) 1643 continue; 1644 if (is_logical_device(dev[j])) 1645 continue; 1646 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) 1647 continue; 1648 1649 logical_drive->phys_disk[i] = dev[j]; 1650 if (i < nphys_disk) 1651 qdepth = min(h->nr_cmds, qdepth + 1652 logical_drive->phys_disk[i]->queue_depth); 1653 break; 1654 } 1655 1656 /* 1657 * This can happen if a physical drive is removed and 1658 * the logical drive is degraded. In that case, the RAID 1659 * map data will refer to a physical disk which isn't actually 1660 * present. And in that case offload_enabled should already 1661 * be 0, but we'll turn it off here just in case 1662 */ 1663 if (!logical_drive->phys_disk[i]) { 1664 logical_drive->offload_enabled = 0; 1665 logical_drive->offload_to_be_enabled = 0; 1666 logical_drive->queue_depth = 8; 1667 } 1668 } 1669 if (nraid_map_entries) 1670 /* 1671 * This is correct for reads, too high for full stripe writes, 1672 * way too high for partial stripe writes 1673 */ 1674 logical_drive->queue_depth = qdepth; 1675 else 1676 logical_drive->queue_depth = h->nr_cmds; 1677 } 1678 1679 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, 1680 struct hpsa_scsi_dev_t *dev[], int ndevices) 1681 { 1682 int i; 1683 1684 for (i = 0; i < ndevices; i++) { 1685 if (dev[i] == NULL) 1686 continue; 1687 if (dev[i]->devtype != TYPE_DISK) 1688 continue; 1689 if (dev[i]->devtype != TYPE_ZBC) 1690 continue; 1691 if (!is_logical_device(dev[i])) 1692 continue; 1693 1694 /* 1695 * If offload is currently enabled, the RAID map and 1696 * phys_disk[] assignment *better* not be changing 1697 * and since it isn't changing, we do not need to 1698 * update it. 1699 */ 1700 if (dev[i]->offload_enabled) 1701 continue; 1702 1703 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); 1704 } 1705 } 1706 1707 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 1708 { 1709 int rc = 0; 1710 1711 if (!h->scsi_host) 1712 return 1; 1713 1714 if (is_logical_device(device)) /* RAID */ 1715 rc = scsi_add_device(h->scsi_host, device->bus, 1716 device->target, device->lun); 1717 else /* HBA */ 1718 rc = hpsa_add_sas_device(h->sas_host, device); 1719 1720 return rc; 1721 } 1722 1723 static void hpsa_remove_device(struct ctlr_info *h, 1724 struct hpsa_scsi_dev_t *device) 1725 { 1726 struct scsi_device *sdev = NULL; 1727 1728 if (!h->scsi_host) 1729 return; 1730 1731 if (is_logical_device(device)) { /* RAID */ 1732 sdev = scsi_device_lookup(h->scsi_host, device->bus, 1733 device->target, device->lun); 1734 if (sdev) { 1735 scsi_remove_device(sdev); 1736 scsi_device_put(sdev); 1737 } else { 1738 /* 1739 * We don't expect to get here. Future commands 1740 * to this device will get a selection timeout as 1741 * if the device were gone. 1742 */ 1743 hpsa_show_dev_msg(KERN_WARNING, h, device, 1744 "didn't find device for removal."); 1745 } 1746 } else /* HBA */ 1747 hpsa_remove_sas_device(device); 1748 } 1749 1750 static void adjust_hpsa_scsi_table(struct ctlr_info *h, 1751 struct hpsa_scsi_dev_t *sd[], int nsds) 1752 { 1753 /* sd contains scsi3 addresses and devtypes, and inquiry 1754 * data. This function takes what's in sd to be the current 1755 * reality and updates h->dev[] to reflect that reality. 1756 */ 1757 int i, entry, device_change, changes = 0; 1758 struct hpsa_scsi_dev_t *csd; 1759 unsigned long flags; 1760 struct hpsa_scsi_dev_t **added, **removed; 1761 int nadded, nremoved; 1762 1763 /* 1764 * A reset can cause a device status to change 1765 * re-schedule the scan to see what happened. 1766 */ 1767 if (h->reset_in_progress) { 1768 h->drv_req_rescan = 1; 1769 return; 1770 } 1771 1772 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 1773 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 1774 1775 if (!added || !removed) { 1776 dev_warn(&h->pdev->dev, "out of memory in " 1777 "adjust_hpsa_scsi_table\n"); 1778 goto free_and_out; 1779 } 1780 1781 spin_lock_irqsave(&h->devlock, flags); 1782 1783 /* find any devices in h->dev[] that are not in 1784 * sd[] and remove them from h->dev[], and for any 1785 * devices which have changed, remove the old device 1786 * info and add the new device info. 1787 * If minor device attributes change, just update 1788 * the existing device structure. 1789 */ 1790 i = 0; 1791 nremoved = 0; 1792 nadded = 0; 1793 while (i < h->ndevices) { 1794 csd = h->dev[i]; 1795 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 1796 if (device_change == DEVICE_NOT_FOUND) { 1797 changes++; 1798 hpsa_scsi_remove_entry(h, i, removed, &nremoved); 1799 continue; /* remove ^^^, hence i not incremented */ 1800 } else if (device_change == DEVICE_CHANGED) { 1801 changes++; 1802 hpsa_scsi_replace_entry(h, i, sd[entry], 1803 added, &nadded, removed, &nremoved); 1804 /* Set it to NULL to prevent it from being freed 1805 * at the bottom of hpsa_update_scsi_devices() 1806 */ 1807 sd[entry] = NULL; 1808 } else if (device_change == DEVICE_UPDATED) { 1809 hpsa_scsi_update_entry(h, i, sd[entry]); 1810 } 1811 i++; 1812 } 1813 1814 /* Now, make sure every device listed in sd[] is also 1815 * listed in h->dev[], adding them if they aren't found 1816 */ 1817 1818 for (i = 0; i < nsds; i++) { 1819 if (!sd[i]) /* if already added above. */ 1820 continue; 1821 1822 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS 1823 * as the SCSI mid-layer does not handle such devices well. 1824 * It relentlessly loops sending TUR at 3Hz, then READ(10) 1825 * at 160Hz, and prevents the system from coming up. 1826 */ 1827 if (sd[i]->volume_offline) { 1828 hpsa_show_volume_status(h, sd[i]); 1829 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); 1830 continue; 1831 } 1832 1833 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 1834 h->ndevices, &entry); 1835 if (device_change == DEVICE_NOT_FOUND) { 1836 changes++; 1837 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0) 1838 break; 1839 sd[i] = NULL; /* prevent from being freed later. */ 1840 } else if (device_change == DEVICE_CHANGED) { 1841 /* should never happen... */ 1842 changes++; 1843 dev_warn(&h->pdev->dev, 1844 "device unexpectedly changed.\n"); 1845 /* but if it does happen, we just ignore that device */ 1846 } 1847 } 1848 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); 1849 1850 /* Now that h->dev[]->phys_disk[] is coherent, we can enable 1851 * any logical drives that need it enabled. 1852 */ 1853 for (i = 0; i < h->ndevices; i++) { 1854 if (h->dev[i] == NULL) 1855 continue; 1856 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; 1857 } 1858 1859 spin_unlock_irqrestore(&h->devlock, flags); 1860 1861 /* Monitor devices which are in one of several NOT READY states to be 1862 * brought online later. This must be done without holding h->devlock, 1863 * so don't touch h->dev[] 1864 */ 1865 for (i = 0; i < nsds; i++) { 1866 if (!sd[i]) /* if already added above. */ 1867 continue; 1868 if (sd[i]->volume_offline) 1869 hpsa_monitor_offline_device(h, sd[i]->scsi3addr); 1870 } 1871 1872 /* Don't notify scsi mid layer of any changes the first time through 1873 * (or if there are no changes) scsi_scan_host will do it later the 1874 * first time through. 1875 */ 1876 if (!changes) 1877 goto free_and_out; 1878 1879 /* Notify scsi mid layer of any removed devices */ 1880 for (i = 0; i < nremoved; i++) { 1881 if (removed[i] == NULL) 1882 continue; 1883 if (removed[i]->expose_device) 1884 hpsa_remove_device(h, removed[i]); 1885 kfree(removed[i]); 1886 removed[i] = NULL; 1887 } 1888 1889 /* Notify scsi mid layer of any added devices */ 1890 for (i = 0; i < nadded; i++) { 1891 int rc = 0; 1892 1893 if (added[i] == NULL) 1894 continue; 1895 if (!(added[i]->expose_device)) 1896 continue; 1897 rc = hpsa_add_device(h, added[i]); 1898 if (!rc) 1899 continue; 1900 dev_warn(&h->pdev->dev, 1901 "addition failed %d, device not added.", rc); 1902 /* now we have to remove it from h->dev, 1903 * since it didn't get added to scsi mid layer 1904 */ 1905 fixup_botched_add(h, added[i]); 1906 h->drv_req_rescan = 1; 1907 } 1908 1909 free_and_out: 1910 kfree(added); 1911 kfree(removed); 1912 } 1913 1914 /* 1915 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * 1916 * Assume's h->devlock is held. 1917 */ 1918 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 1919 int bus, int target, int lun) 1920 { 1921 int i; 1922 struct hpsa_scsi_dev_t *sd; 1923 1924 for (i = 0; i < h->ndevices; i++) { 1925 sd = h->dev[i]; 1926 if (sd->bus == bus && sd->target == target && sd->lun == lun) 1927 return sd; 1928 } 1929 return NULL; 1930 } 1931 1932 static int hpsa_slave_alloc(struct scsi_device *sdev) 1933 { 1934 struct hpsa_scsi_dev_t *sd; 1935 unsigned long flags; 1936 struct ctlr_info *h; 1937 1938 h = sdev_to_hba(sdev); 1939 spin_lock_irqsave(&h->devlock, flags); 1940 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) { 1941 struct scsi_target *starget; 1942 struct sas_rphy *rphy; 1943 1944 starget = scsi_target(sdev); 1945 rphy = target_to_rphy(starget); 1946 sd = hpsa_find_device_by_sas_rphy(h, rphy); 1947 if (sd) { 1948 sd->target = sdev_id(sdev); 1949 sd->lun = sdev->lun; 1950 } 1951 } else 1952 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 1953 sdev_id(sdev), sdev->lun); 1954 1955 if (sd && sd->expose_device) { 1956 atomic_set(&sd->ioaccel_cmds_out, 0); 1957 sdev->hostdata = sd; 1958 } else 1959 sdev->hostdata = NULL; 1960 spin_unlock_irqrestore(&h->devlock, flags); 1961 return 0; 1962 } 1963 1964 /* configure scsi device based on internal per-device structure */ 1965 static int hpsa_slave_configure(struct scsi_device *sdev) 1966 { 1967 struct hpsa_scsi_dev_t *sd; 1968 int queue_depth; 1969 1970 sd = sdev->hostdata; 1971 sdev->no_uld_attach = !sd || !sd->expose_device; 1972 1973 if (sd) 1974 queue_depth = sd->queue_depth != 0 ? 1975 sd->queue_depth : sdev->host->can_queue; 1976 else 1977 queue_depth = sdev->host->can_queue; 1978 1979 scsi_change_queue_depth(sdev, queue_depth); 1980 1981 return 0; 1982 } 1983 1984 static void hpsa_slave_destroy(struct scsi_device *sdev) 1985 { 1986 /* nothing to do. */ 1987 } 1988 1989 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) 1990 { 1991 int i; 1992 1993 if (!h->ioaccel2_cmd_sg_list) 1994 return; 1995 for (i = 0; i < h->nr_cmds; i++) { 1996 kfree(h->ioaccel2_cmd_sg_list[i]); 1997 h->ioaccel2_cmd_sg_list[i] = NULL; 1998 } 1999 kfree(h->ioaccel2_cmd_sg_list); 2000 h->ioaccel2_cmd_sg_list = NULL; 2001 } 2002 2003 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) 2004 { 2005 int i; 2006 2007 if (h->chainsize <= 0) 2008 return 0; 2009 2010 h->ioaccel2_cmd_sg_list = 2011 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds, 2012 GFP_KERNEL); 2013 if (!h->ioaccel2_cmd_sg_list) 2014 return -ENOMEM; 2015 for (i = 0; i < h->nr_cmds; i++) { 2016 h->ioaccel2_cmd_sg_list[i] = 2017 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) * 2018 h->maxsgentries, GFP_KERNEL); 2019 if (!h->ioaccel2_cmd_sg_list[i]) 2020 goto clean; 2021 } 2022 return 0; 2023 2024 clean: 2025 hpsa_free_ioaccel2_sg_chain_blocks(h); 2026 return -ENOMEM; 2027 } 2028 2029 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 2030 { 2031 int i; 2032 2033 if (!h->cmd_sg_list) 2034 return; 2035 for (i = 0; i < h->nr_cmds; i++) { 2036 kfree(h->cmd_sg_list[i]); 2037 h->cmd_sg_list[i] = NULL; 2038 } 2039 kfree(h->cmd_sg_list); 2040 h->cmd_sg_list = NULL; 2041 } 2042 2043 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) 2044 { 2045 int i; 2046 2047 if (h->chainsize <= 0) 2048 return 0; 2049 2050 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 2051 GFP_KERNEL); 2052 if (!h->cmd_sg_list) { 2053 dev_err(&h->pdev->dev, "Failed to allocate SG list\n"); 2054 return -ENOMEM; 2055 } 2056 for (i = 0; i < h->nr_cmds; i++) { 2057 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 2058 h->chainsize, GFP_KERNEL); 2059 if (!h->cmd_sg_list[i]) { 2060 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n"); 2061 goto clean; 2062 } 2063 } 2064 return 0; 2065 2066 clean: 2067 hpsa_free_sg_chain_blocks(h); 2068 return -ENOMEM; 2069 } 2070 2071 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, 2072 struct io_accel2_cmd *cp, struct CommandList *c) 2073 { 2074 struct ioaccel2_sg_element *chain_block; 2075 u64 temp64; 2076 u32 chain_size; 2077 2078 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; 2079 chain_size = le32_to_cpu(cp->sg[0].length); 2080 temp64 = pci_map_single(h->pdev, chain_block, chain_size, 2081 PCI_DMA_TODEVICE); 2082 if (dma_mapping_error(&h->pdev->dev, temp64)) { 2083 /* prevent subsequent unmapping */ 2084 cp->sg->address = 0; 2085 return -1; 2086 } 2087 cp->sg->address = cpu_to_le64(temp64); 2088 return 0; 2089 } 2090 2091 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, 2092 struct io_accel2_cmd *cp) 2093 { 2094 struct ioaccel2_sg_element *chain_sg; 2095 u64 temp64; 2096 u32 chain_size; 2097 2098 chain_sg = cp->sg; 2099 temp64 = le64_to_cpu(chain_sg->address); 2100 chain_size = le32_to_cpu(cp->sg[0].length); 2101 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); 2102 } 2103 2104 static int hpsa_map_sg_chain_block(struct ctlr_info *h, 2105 struct CommandList *c) 2106 { 2107 struct SGDescriptor *chain_sg, *chain_block; 2108 u64 temp64; 2109 u32 chain_len; 2110 2111 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 2112 chain_block = h->cmd_sg_list[c->cmdindex]; 2113 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); 2114 chain_len = sizeof(*chain_sg) * 2115 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); 2116 chain_sg->Len = cpu_to_le32(chain_len); 2117 temp64 = pci_map_single(h->pdev, chain_block, chain_len, 2118 PCI_DMA_TODEVICE); 2119 if (dma_mapping_error(&h->pdev->dev, temp64)) { 2120 /* prevent subsequent unmapping */ 2121 chain_sg->Addr = cpu_to_le64(0); 2122 return -1; 2123 } 2124 chain_sg->Addr = cpu_to_le64(temp64); 2125 return 0; 2126 } 2127 2128 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 2129 struct CommandList *c) 2130 { 2131 struct SGDescriptor *chain_sg; 2132 2133 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) 2134 return; 2135 2136 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 2137 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), 2138 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); 2139 } 2140 2141 2142 /* Decode the various types of errors on ioaccel2 path. 2143 * Return 1 for any error that should generate a RAID path retry. 2144 * Return 0 for errors that don't require a RAID path retry. 2145 */ 2146 static int handle_ioaccel_mode2_error(struct ctlr_info *h, 2147 struct CommandList *c, 2148 struct scsi_cmnd *cmd, 2149 struct io_accel2_cmd *c2) 2150 { 2151 int data_len; 2152 int retry = 0; 2153 u32 ioaccel2_resid = 0; 2154 2155 switch (c2->error_data.serv_response) { 2156 case IOACCEL2_SERV_RESPONSE_COMPLETE: 2157 switch (c2->error_data.status) { 2158 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: 2159 break; 2160 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: 2161 cmd->result |= SAM_STAT_CHECK_CONDITION; 2162 if (c2->error_data.data_present != 2163 IOACCEL2_SENSE_DATA_PRESENT) { 2164 memset(cmd->sense_buffer, 0, 2165 SCSI_SENSE_BUFFERSIZE); 2166 break; 2167 } 2168 /* copy the sense data */ 2169 data_len = c2->error_data.sense_data_len; 2170 if (data_len > SCSI_SENSE_BUFFERSIZE) 2171 data_len = SCSI_SENSE_BUFFERSIZE; 2172 if (data_len > sizeof(c2->error_data.sense_data_buff)) 2173 data_len = 2174 sizeof(c2->error_data.sense_data_buff); 2175 memcpy(cmd->sense_buffer, 2176 c2->error_data.sense_data_buff, data_len); 2177 retry = 1; 2178 break; 2179 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 2180 retry = 1; 2181 break; 2182 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: 2183 retry = 1; 2184 break; 2185 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: 2186 retry = 1; 2187 break; 2188 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: 2189 retry = 1; 2190 break; 2191 default: 2192 retry = 1; 2193 break; 2194 } 2195 break; 2196 case IOACCEL2_SERV_RESPONSE_FAILURE: 2197 switch (c2->error_data.status) { 2198 case IOACCEL2_STATUS_SR_IO_ERROR: 2199 case IOACCEL2_STATUS_SR_IO_ABORTED: 2200 case IOACCEL2_STATUS_SR_OVERRUN: 2201 retry = 1; 2202 break; 2203 case IOACCEL2_STATUS_SR_UNDERRUN: 2204 cmd->result = (DID_OK << 16); /* host byte */ 2205 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 2206 ioaccel2_resid = get_unaligned_le32( 2207 &c2->error_data.resid_cnt[0]); 2208 scsi_set_resid(cmd, ioaccel2_resid); 2209 break; 2210 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: 2211 case IOACCEL2_STATUS_SR_INVALID_DEVICE: 2212 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: 2213 /* We will get an event from ctlr to trigger rescan */ 2214 retry = 1; 2215 break; 2216 default: 2217 retry = 1; 2218 } 2219 break; 2220 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 2221 break; 2222 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 2223 break; 2224 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 2225 retry = 1; 2226 break; 2227 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 2228 break; 2229 default: 2230 retry = 1; 2231 break; 2232 } 2233 2234 return retry; /* retry on raid path? */ 2235 } 2236 2237 static void hpsa_cmd_resolve_events(struct ctlr_info *h, 2238 struct CommandList *c) 2239 { 2240 bool do_wake = false; 2241 2242 /* 2243 * Prevent the following race in the abort handler: 2244 * 2245 * 1. LLD is requested to abort a SCSI command 2246 * 2. The SCSI command completes 2247 * 3. The struct CommandList associated with step 2 is made available 2248 * 4. New I/O request to LLD to another LUN re-uses struct CommandList 2249 * 5. Abort handler follows scsi_cmnd->host_scribble and 2250 * finds struct CommandList and tries to aborts it 2251 * Now we have aborted the wrong command. 2252 * 2253 * Reset c->scsi_cmd here so that the abort or reset handler will know 2254 * this command has completed. Then, check to see if the handler is 2255 * waiting for this command, and, if so, wake it. 2256 */ 2257 c->scsi_cmd = SCSI_CMD_IDLE; 2258 mb(); /* Declare command idle before checking for pending events. */ 2259 if (c->abort_pending) { 2260 do_wake = true; 2261 c->abort_pending = false; 2262 } 2263 if (c->reset_pending) { 2264 unsigned long flags; 2265 struct hpsa_scsi_dev_t *dev; 2266 2267 /* 2268 * There appears to be a reset pending; lock the lock and 2269 * reconfirm. If so, then decrement the count of outstanding 2270 * commands and wake the reset command if this is the last one. 2271 */ 2272 spin_lock_irqsave(&h->lock, flags); 2273 dev = c->reset_pending; /* Re-fetch under the lock. */ 2274 if (dev && atomic_dec_and_test(&dev->reset_cmds_out)) 2275 do_wake = true; 2276 c->reset_pending = NULL; 2277 spin_unlock_irqrestore(&h->lock, flags); 2278 } 2279 2280 if (do_wake) 2281 wake_up_all(&h->event_sync_wait_queue); 2282 } 2283 2284 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, 2285 struct CommandList *c) 2286 { 2287 hpsa_cmd_resolve_events(h, c); 2288 cmd_tagged_free(h, c); 2289 } 2290 2291 static void hpsa_cmd_free_and_done(struct ctlr_info *h, 2292 struct CommandList *c, struct scsi_cmnd *cmd) 2293 { 2294 hpsa_cmd_resolve_and_free(h, c); 2295 cmd->scsi_done(cmd); 2296 } 2297 2298 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) 2299 { 2300 INIT_WORK(&c->work, hpsa_command_resubmit_worker); 2301 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); 2302 } 2303 2304 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd) 2305 { 2306 cmd->result = DID_ABORT << 16; 2307 } 2308 2309 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c, 2310 struct scsi_cmnd *cmd) 2311 { 2312 hpsa_set_scsi_cmd_aborted(cmd); 2313 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", 2314 c->Request.CDB, c->err_info->ScsiStatus); 2315 hpsa_cmd_resolve_and_free(h, c); 2316 } 2317 2318 static void process_ioaccel2_completion(struct ctlr_info *h, 2319 struct CommandList *c, struct scsi_cmnd *cmd, 2320 struct hpsa_scsi_dev_t *dev) 2321 { 2322 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 2323 2324 /* check for good status */ 2325 if (likely(c2->error_data.serv_response == 0 && 2326 c2->error_data.status == 0)) 2327 return hpsa_cmd_free_and_done(h, c, cmd); 2328 2329 /* 2330 * Any RAID offload error results in retry which will use 2331 * the normal I/O path so the controller can handle whatever's 2332 * wrong. 2333 */ 2334 if (is_logical_device(dev) && 2335 c2->error_data.serv_response == 2336 IOACCEL2_SERV_RESPONSE_FAILURE) { 2337 if (c2->error_data.status == 2338 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) 2339 dev->offload_enabled = 0; 2340 2341 return hpsa_retry_cmd(h, c); 2342 } 2343 2344 if (handle_ioaccel_mode2_error(h, c, cmd, c2)) 2345 return hpsa_retry_cmd(h, c); 2346 2347 return hpsa_cmd_free_and_done(h, c, cmd); 2348 } 2349 2350 /* Returns 0 on success, < 0 otherwise. */ 2351 static int hpsa_evaluate_tmf_status(struct ctlr_info *h, 2352 struct CommandList *cp) 2353 { 2354 u8 tmf_status = cp->err_info->ScsiStatus; 2355 2356 switch (tmf_status) { 2357 case CISS_TMF_COMPLETE: 2358 /* 2359 * CISS_TMF_COMPLETE never happens, instead, 2360 * ei->CommandStatus == 0 for this case. 2361 */ 2362 case CISS_TMF_SUCCESS: 2363 return 0; 2364 case CISS_TMF_INVALID_FRAME: 2365 case CISS_TMF_NOT_SUPPORTED: 2366 case CISS_TMF_FAILED: 2367 case CISS_TMF_WRONG_LUN: 2368 case CISS_TMF_OVERLAPPED_TAG: 2369 break; 2370 default: 2371 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", 2372 tmf_status); 2373 break; 2374 } 2375 return -tmf_status; 2376 } 2377 2378 static void complete_scsi_command(struct CommandList *cp) 2379 { 2380 struct scsi_cmnd *cmd; 2381 struct ctlr_info *h; 2382 struct ErrorInfo *ei; 2383 struct hpsa_scsi_dev_t *dev; 2384 struct io_accel2_cmd *c2; 2385 2386 u8 sense_key; 2387 u8 asc; /* additional sense code */ 2388 u8 ascq; /* additional sense code qualifier */ 2389 unsigned long sense_data_size; 2390 2391 ei = cp->err_info; 2392 cmd = cp->scsi_cmd; 2393 h = cp->h; 2394 dev = cmd->device->hostdata; 2395 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; 2396 2397 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 2398 if ((cp->cmd_type == CMD_SCSI) && 2399 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) 2400 hpsa_unmap_sg_chain_block(h, cp); 2401 2402 if ((cp->cmd_type == CMD_IOACCEL2) && 2403 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) 2404 hpsa_unmap_ioaccel2_sg_chain_block(h, c2); 2405 2406 cmd->result = (DID_OK << 16); /* host byte */ 2407 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 2408 2409 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) 2410 atomic_dec(&cp->phys_disk->ioaccel_cmds_out); 2411 2412 /* 2413 * We check for lockup status here as it may be set for 2414 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by 2415 * fail_all_oustanding_cmds() 2416 */ 2417 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { 2418 /* DID_NO_CONNECT will prevent a retry */ 2419 cmd->result = DID_NO_CONNECT << 16; 2420 return hpsa_cmd_free_and_done(h, cp, cmd); 2421 } 2422 2423 if ((unlikely(hpsa_is_pending_event(cp)))) { 2424 if (cp->reset_pending) 2425 return hpsa_cmd_resolve_and_free(h, cp); 2426 if (cp->abort_pending) 2427 return hpsa_cmd_abort_and_free(h, cp, cmd); 2428 } 2429 2430 if (cp->cmd_type == CMD_IOACCEL2) 2431 return process_ioaccel2_completion(h, cp, cmd, dev); 2432 2433 scsi_set_resid(cmd, ei->ResidualCnt); 2434 if (ei->CommandStatus == 0) 2435 return hpsa_cmd_free_and_done(h, cp, cmd); 2436 2437 /* For I/O accelerator commands, copy over some fields to the normal 2438 * CISS header used below for error handling. 2439 */ 2440 if (cp->cmd_type == CMD_IOACCEL1) { 2441 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 2442 cp->Header.SGList = scsi_sg_count(cmd); 2443 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); 2444 cp->Request.CDBLen = le16_to_cpu(c->io_flags) & 2445 IOACCEL1_IOFLAGS_CDBLEN_MASK; 2446 cp->Header.tag = c->tag; 2447 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 2448 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 2449 2450 /* Any RAID offload error results in retry which will use 2451 * the normal I/O path so the controller can handle whatever's 2452 * wrong. 2453 */ 2454 if (is_logical_device(dev)) { 2455 if (ei->CommandStatus == CMD_IOACCEL_DISABLED) 2456 dev->offload_enabled = 0; 2457 return hpsa_retry_cmd(h, cp); 2458 } 2459 } 2460 2461 /* an error has occurred */ 2462 switch (ei->CommandStatus) { 2463 2464 case CMD_TARGET_STATUS: 2465 cmd->result |= ei->ScsiStatus; 2466 /* copy the sense data */ 2467 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 2468 sense_data_size = SCSI_SENSE_BUFFERSIZE; 2469 else 2470 sense_data_size = sizeof(ei->SenseInfo); 2471 if (ei->SenseLen < sense_data_size) 2472 sense_data_size = ei->SenseLen; 2473 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 2474 if (ei->ScsiStatus) 2475 decode_sense_data(ei->SenseInfo, sense_data_size, 2476 &sense_key, &asc, &ascq); 2477 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 2478 if (sense_key == ABORTED_COMMAND) { 2479 cmd->result |= DID_SOFT_ERROR << 16; 2480 break; 2481 } 2482 break; 2483 } 2484 /* Problem was not a check condition 2485 * Pass it up to the upper layers... 2486 */ 2487 if (ei->ScsiStatus) { 2488 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 2489 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 2490 "Returning result: 0x%x\n", 2491 cp, ei->ScsiStatus, 2492 sense_key, asc, ascq, 2493 cmd->result); 2494 } else { /* scsi status is zero??? How??? */ 2495 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 2496 "Returning no connection.\n", cp), 2497 2498 /* Ordinarily, this case should never happen, 2499 * but there is a bug in some released firmware 2500 * revisions that allows it to happen if, for 2501 * example, a 4100 backplane loses power and 2502 * the tape drive is in it. We assume that 2503 * it's a fatal error of some kind because we 2504 * can't show that it wasn't. We will make it 2505 * look like selection timeout since that is 2506 * the most common reason for this to occur, 2507 * and it's severe enough. 2508 */ 2509 2510 cmd->result = DID_NO_CONNECT << 16; 2511 } 2512 break; 2513 2514 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 2515 break; 2516 case CMD_DATA_OVERRUN: 2517 dev_warn(&h->pdev->dev, 2518 "CDB %16phN data overrun\n", cp->Request.CDB); 2519 break; 2520 case CMD_INVALID: { 2521 /* print_bytes(cp, sizeof(*cp), 1, 0); 2522 print_cmd(cp); */ 2523 /* We get CMD_INVALID if you address a non-existent device 2524 * instead of a selection timeout (no response). You will 2525 * see this if you yank out a drive, then try to access it. 2526 * This is kind of a shame because it means that any other 2527 * CMD_INVALID (e.g. driver bug) will get interpreted as a 2528 * missing target. */ 2529 cmd->result = DID_NO_CONNECT << 16; 2530 } 2531 break; 2532 case CMD_PROTOCOL_ERR: 2533 cmd->result = DID_ERROR << 16; 2534 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", 2535 cp->Request.CDB); 2536 break; 2537 case CMD_HARDWARE_ERR: 2538 cmd->result = DID_ERROR << 16; 2539 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", 2540 cp->Request.CDB); 2541 break; 2542 case CMD_CONNECTION_LOST: 2543 cmd->result = DID_ERROR << 16; 2544 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", 2545 cp->Request.CDB); 2546 break; 2547 case CMD_ABORTED: 2548 /* Return now to avoid calling scsi_done(). */ 2549 return hpsa_cmd_abort_and_free(h, cp, cmd); 2550 case CMD_ABORT_FAILED: 2551 cmd->result = DID_ERROR << 16; 2552 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", 2553 cp->Request.CDB); 2554 break; 2555 case CMD_UNSOLICITED_ABORT: 2556 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 2557 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", 2558 cp->Request.CDB); 2559 break; 2560 case CMD_TIMEOUT: 2561 cmd->result = DID_TIME_OUT << 16; 2562 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", 2563 cp->Request.CDB); 2564 break; 2565 case CMD_UNABORTABLE: 2566 cmd->result = DID_ERROR << 16; 2567 dev_warn(&h->pdev->dev, "Command unabortable\n"); 2568 break; 2569 case CMD_TMF_STATUS: 2570 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ 2571 cmd->result = DID_ERROR << 16; 2572 break; 2573 case CMD_IOACCEL_DISABLED: 2574 /* This only handles the direct pass-through case since RAID 2575 * offload is handled above. Just attempt a retry. 2576 */ 2577 cmd->result = DID_SOFT_ERROR << 16; 2578 dev_warn(&h->pdev->dev, 2579 "cp %p had HP SSD Smart Path error\n", cp); 2580 break; 2581 default: 2582 cmd->result = DID_ERROR << 16; 2583 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 2584 cp, ei->CommandStatus); 2585 } 2586 2587 return hpsa_cmd_free_and_done(h, cp, cmd); 2588 } 2589 2590 static void hpsa_pci_unmap(struct pci_dev *pdev, 2591 struct CommandList *c, int sg_used, int data_direction) 2592 { 2593 int i; 2594 2595 for (i = 0; i < sg_used; i++) 2596 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), 2597 le32_to_cpu(c->SG[i].Len), 2598 data_direction); 2599 } 2600 2601 static int hpsa_map_one(struct pci_dev *pdev, 2602 struct CommandList *cp, 2603 unsigned char *buf, 2604 size_t buflen, 2605 int data_direction) 2606 { 2607 u64 addr64; 2608 2609 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 2610 cp->Header.SGList = 0; 2611 cp->Header.SGTotal = cpu_to_le16(0); 2612 return 0; 2613 } 2614 2615 addr64 = pci_map_single(pdev, buf, buflen, data_direction); 2616 if (dma_mapping_error(&pdev->dev, addr64)) { 2617 /* Prevent subsequent unmap of something never mapped */ 2618 cp->Header.SGList = 0; 2619 cp->Header.SGTotal = cpu_to_le16(0); 2620 return -1; 2621 } 2622 cp->SG[0].Addr = cpu_to_le64(addr64); 2623 cp->SG[0].Len = cpu_to_le32(buflen); 2624 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ 2625 cp->Header.SGList = 1; /* no. SGs contig in this cmd */ 2626 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ 2627 return 0; 2628 } 2629 2630 #define NO_TIMEOUT ((unsigned long) -1) 2631 #define DEFAULT_TIMEOUT 30000 /* milliseconds */ 2632 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 2633 struct CommandList *c, int reply_queue, unsigned long timeout_msecs) 2634 { 2635 DECLARE_COMPLETION_ONSTACK(wait); 2636 2637 c->waiting = &wait; 2638 __enqueue_cmd_and_start_io(h, c, reply_queue); 2639 if (timeout_msecs == NO_TIMEOUT) { 2640 /* TODO: get rid of this no-timeout thing */ 2641 wait_for_completion_io(&wait); 2642 return IO_OK; 2643 } 2644 if (!wait_for_completion_io_timeout(&wait, 2645 msecs_to_jiffies(timeout_msecs))) { 2646 dev_warn(&h->pdev->dev, "Command timed out.\n"); 2647 return -ETIMEDOUT; 2648 } 2649 return IO_OK; 2650 } 2651 2652 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, 2653 int reply_queue, unsigned long timeout_msecs) 2654 { 2655 if (unlikely(lockup_detected(h))) { 2656 c->err_info->CommandStatus = CMD_CTLR_LOCKUP; 2657 return IO_OK; 2658 } 2659 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); 2660 } 2661 2662 static u32 lockup_detected(struct ctlr_info *h) 2663 { 2664 int cpu; 2665 u32 rc, *lockup_detected; 2666 2667 cpu = get_cpu(); 2668 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 2669 rc = *lockup_detected; 2670 put_cpu(); 2671 return rc; 2672 } 2673 2674 #define MAX_DRIVER_CMD_RETRIES 25 2675 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 2676 struct CommandList *c, int data_direction, unsigned long timeout_msecs) 2677 { 2678 int backoff_time = 10, retry_count = 0; 2679 int rc; 2680 2681 do { 2682 memset(c->err_info, 0, sizeof(*c->err_info)); 2683 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 2684 timeout_msecs); 2685 if (rc) 2686 break; 2687 retry_count++; 2688 if (retry_count > 3) { 2689 msleep(backoff_time); 2690 if (backoff_time < 1000) 2691 backoff_time *= 2; 2692 } 2693 } while ((check_for_unit_attention(h, c) || 2694 check_for_busy(h, c)) && 2695 retry_count <= MAX_DRIVER_CMD_RETRIES); 2696 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 2697 if (retry_count > MAX_DRIVER_CMD_RETRIES) 2698 rc = -EIO; 2699 return rc; 2700 } 2701 2702 static void hpsa_print_cmd(struct ctlr_info *h, char *txt, 2703 struct CommandList *c) 2704 { 2705 const u8 *cdb = c->Request.CDB; 2706 const u8 *lun = c->Header.LUN.LunAddrBytes; 2707 2708 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" 2709 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", 2710 txt, lun[0], lun[1], lun[2], lun[3], 2711 lun[4], lun[5], lun[6], lun[7], 2712 cdb[0], cdb[1], cdb[2], cdb[3], 2713 cdb[4], cdb[5], cdb[6], cdb[7], 2714 cdb[8], cdb[9], cdb[10], cdb[11], 2715 cdb[12], cdb[13], cdb[14], cdb[15]); 2716 } 2717 2718 static void hpsa_scsi_interpret_error(struct ctlr_info *h, 2719 struct CommandList *cp) 2720 { 2721 const struct ErrorInfo *ei = cp->err_info; 2722 struct device *d = &cp->h->pdev->dev; 2723 u8 sense_key, asc, ascq; 2724 int sense_len; 2725 2726 switch (ei->CommandStatus) { 2727 case CMD_TARGET_STATUS: 2728 if (ei->SenseLen > sizeof(ei->SenseInfo)) 2729 sense_len = sizeof(ei->SenseInfo); 2730 else 2731 sense_len = ei->SenseLen; 2732 decode_sense_data(ei->SenseInfo, sense_len, 2733 &sense_key, &asc, &ascq); 2734 hpsa_print_cmd(h, "SCSI status", cp); 2735 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) 2736 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n", 2737 sense_key, asc, ascq); 2738 else 2739 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus); 2740 if (ei->ScsiStatus == 0) 2741 dev_warn(d, "SCSI status is abnormally zero. " 2742 "(probably indicates selection timeout " 2743 "reported incorrectly due to a known " 2744 "firmware bug, circa July, 2001.)\n"); 2745 break; 2746 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 2747 break; 2748 case CMD_DATA_OVERRUN: 2749 hpsa_print_cmd(h, "overrun condition", cp); 2750 break; 2751 case CMD_INVALID: { 2752 /* controller unfortunately reports SCSI passthru's 2753 * to non-existent targets as invalid commands. 2754 */ 2755 hpsa_print_cmd(h, "invalid command", cp); 2756 dev_warn(d, "probably means device no longer present\n"); 2757 } 2758 break; 2759 case CMD_PROTOCOL_ERR: 2760 hpsa_print_cmd(h, "protocol error", cp); 2761 break; 2762 case CMD_HARDWARE_ERR: 2763 hpsa_print_cmd(h, "hardware error", cp); 2764 break; 2765 case CMD_CONNECTION_LOST: 2766 hpsa_print_cmd(h, "connection lost", cp); 2767 break; 2768 case CMD_ABORTED: 2769 hpsa_print_cmd(h, "aborted", cp); 2770 break; 2771 case CMD_ABORT_FAILED: 2772 hpsa_print_cmd(h, "abort failed", cp); 2773 break; 2774 case CMD_UNSOLICITED_ABORT: 2775 hpsa_print_cmd(h, "unsolicited abort", cp); 2776 break; 2777 case CMD_TIMEOUT: 2778 hpsa_print_cmd(h, "timed out", cp); 2779 break; 2780 case CMD_UNABORTABLE: 2781 hpsa_print_cmd(h, "unabortable", cp); 2782 break; 2783 case CMD_CTLR_LOCKUP: 2784 hpsa_print_cmd(h, "controller lockup detected", cp); 2785 break; 2786 default: 2787 hpsa_print_cmd(h, "unknown status", cp); 2788 dev_warn(d, "Unknown command status %x\n", 2789 ei->CommandStatus); 2790 } 2791 } 2792 2793 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 2794 u16 page, unsigned char *buf, 2795 unsigned char bufsize) 2796 { 2797 int rc = IO_OK; 2798 struct CommandList *c; 2799 struct ErrorInfo *ei; 2800 2801 c = cmd_alloc(h); 2802 2803 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, 2804 page, scsi3addr, TYPE_CMD)) { 2805 rc = -1; 2806 goto out; 2807 } 2808 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 2809 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 2810 if (rc) 2811 goto out; 2812 ei = c->err_info; 2813 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2814 hpsa_scsi_interpret_error(h, c); 2815 rc = -1; 2816 } 2817 out: 2818 cmd_free(h, c); 2819 return rc; 2820 } 2821 2822 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, 2823 u8 reset_type, int reply_queue) 2824 { 2825 int rc = IO_OK; 2826 struct CommandList *c; 2827 struct ErrorInfo *ei; 2828 2829 c = cmd_alloc(h); 2830 2831 2832 /* fill_cmd can't fail here, no data buffer to map. */ 2833 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, 2834 scsi3addr, TYPE_MSG); 2835 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 2836 if (rc) { 2837 dev_warn(&h->pdev->dev, "Failed to send reset command\n"); 2838 goto out; 2839 } 2840 /* no unmap needed here because no data xfer. */ 2841 2842 ei = c->err_info; 2843 if (ei->CommandStatus != 0) { 2844 hpsa_scsi_interpret_error(h, c); 2845 rc = -1; 2846 } 2847 out: 2848 cmd_free(h, c); 2849 return rc; 2850 } 2851 2852 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, 2853 struct hpsa_scsi_dev_t *dev, 2854 unsigned char *scsi3addr) 2855 { 2856 int i; 2857 bool match = false; 2858 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 2859 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; 2860 2861 if (hpsa_is_cmd_idle(c)) 2862 return false; 2863 2864 switch (c->cmd_type) { 2865 case CMD_SCSI: 2866 case CMD_IOCTL_PEND: 2867 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes, 2868 sizeof(c->Header.LUN.LunAddrBytes)); 2869 break; 2870 2871 case CMD_IOACCEL1: 2872 case CMD_IOACCEL2: 2873 if (c->phys_disk == dev) { 2874 /* HBA mode match */ 2875 match = true; 2876 } else { 2877 /* Possible RAID mode -- check each phys dev. */ 2878 /* FIXME: Do we need to take out a lock here? If 2879 * so, we could just call hpsa_get_pdisk_of_ioaccel2() 2880 * instead. */ 2881 for (i = 0; i < dev->nphysical_disks && !match; i++) { 2882 /* FIXME: an alternate test might be 2883 * 2884 * match = dev->phys_disk[i]->ioaccel_handle 2885 * == c2->scsi_nexus; */ 2886 match = dev->phys_disk[i] == c->phys_disk; 2887 } 2888 } 2889 break; 2890 2891 case IOACCEL2_TMF: 2892 for (i = 0; i < dev->nphysical_disks && !match; i++) { 2893 match = dev->phys_disk[i]->ioaccel_handle == 2894 le32_to_cpu(ac->it_nexus); 2895 } 2896 break; 2897 2898 case 0: /* The command is in the middle of being initialized. */ 2899 match = false; 2900 break; 2901 2902 default: 2903 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n", 2904 c->cmd_type); 2905 BUG(); 2906 } 2907 2908 return match; 2909 } 2910 2911 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, 2912 unsigned char *scsi3addr, u8 reset_type, int reply_queue) 2913 { 2914 int i; 2915 int rc = 0; 2916 2917 /* We can really only handle one reset at a time */ 2918 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { 2919 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n"); 2920 return -EINTR; 2921 } 2922 2923 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0); 2924 2925 for (i = 0; i < h->nr_cmds; i++) { 2926 struct CommandList *c = h->cmd_pool + i; 2927 int refcount = atomic_inc_return(&c->refcount); 2928 2929 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) { 2930 unsigned long flags; 2931 2932 /* 2933 * Mark the target command as having a reset pending, 2934 * then lock a lock so that the command cannot complete 2935 * while we're considering it. If the command is not 2936 * idle then count it; otherwise revoke the event. 2937 */ 2938 c->reset_pending = dev; 2939 spin_lock_irqsave(&h->lock, flags); /* Implied MB */ 2940 if (!hpsa_is_cmd_idle(c)) 2941 atomic_inc(&dev->reset_cmds_out); 2942 else 2943 c->reset_pending = NULL; 2944 spin_unlock_irqrestore(&h->lock, flags); 2945 } 2946 2947 cmd_free(h, c); 2948 } 2949 2950 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue); 2951 if (!rc) 2952 wait_event(h->event_sync_wait_queue, 2953 atomic_read(&dev->reset_cmds_out) == 0 || 2954 lockup_detected(h)); 2955 2956 if (unlikely(lockup_detected(h))) { 2957 dev_warn(&h->pdev->dev, 2958 "Controller lockup detected during reset wait\n"); 2959 rc = -ENODEV; 2960 } 2961 2962 if (unlikely(rc)) 2963 atomic_set(&dev->reset_cmds_out, 0); 2964 2965 mutex_unlock(&h->reset_mutex); 2966 return rc; 2967 } 2968 2969 static void hpsa_get_raid_level(struct ctlr_info *h, 2970 unsigned char *scsi3addr, unsigned char *raid_level) 2971 { 2972 int rc; 2973 unsigned char *buf; 2974 2975 *raid_level = RAID_UNKNOWN; 2976 buf = kzalloc(64, GFP_KERNEL); 2977 if (!buf) 2978 return; 2979 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); 2980 if (rc == 0) 2981 *raid_level = buf[8]; 2982 if (*raid_level > RAID_UNKNOWN) 2983 *raid_level = RAID_UNKNOWN; 2984 kfree(buf); 2985 return; 2986 } 2987 2988 #define HPSA_MAP_DEBUG 2989 #ifdef HPSA_MAP_DEBUG 2990 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, 2991 struct raid_map_data *map_buff) 2992 { 2993 struct raid_map_disk_data *dd = &map_buff->data[0]; 2994 int map, row, col; 2995 u16 map_cnt, row_cnt, disks_per_row; 2996 2997 if (rc != 0) 2998 return; 2999 3000 /* Show details only if debugging has been activated. */ 3001 if (h->raid_offload_debug < 2) 3002 return; 3003 3004 dev_info(&h->pdev->dev, "structure_size = %u\n", 3005 le32_to_cpu(map_buff->structure_size)); 3006 dev_info(&h->pdev->dev, "volume_blk_size = %u\n", 3007 le32_to_cpu(map_buff->volume_blk_size)); 3008 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", 3009 le64_to_cpu(map_buff->volume_blk_cnt)); 3010 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", 3011 map_buff->phys_blk_shift); 3012 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", 3013 map_buff->parity_rotation_shift); 3014 dev_info(&h->pdev->dev, "strip_size = %u\n", 3015 le16_to_cpu(map_buff->strip_size)); 3016 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", 3017 le64_to_cpu(map_buff->disk_starting_blk)); 3018 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", 3019 le64_to_cpu(map_buff->disk_blk_cnt)); 3020 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", 3021 le16_to_cpu(map_buff->data_disks_per_row)); 3022 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", 3023 le16_to_cpu(map_buff->metadata_disks_per_row)); 3024 dev_info(&h->pdev->dev, "row_cnt = %u\n", 3025 le16_to_cpu(map_buff->row_cnt)); 3026 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 3027 le16_to_cpu(map_buff->layout_map_count)); 3028 dev_info(&h->pdev->dev, "flags = 0x%x\n", 3029 le16_to_cpu(map_buff->flags)); 3030 dev_info(&h->pdev->dev, "encrypytion = %s\n", 3031 le16_to_cpu(map_buff->flags) & 3032 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); 3033 dev_info(&h->pdev->dev, "dekindex = %u\n", 3034 le16_to_cpu(map_buff->dekindex)); 3035 map_cnt = le16_to_cpu(map_buff->layout_map_count); 3036 for (map = 0; map < map_cnt; map++) { 3037 dev_info(&h->pdev->dev, "Map%u:\n", map); 3038 row_cnt = le16_to_cpu(map_buff->row_cnt); 3039 for (row = 0; row < row_cnt; row++) { 3040 dev_info(&h->pdev->dev, " Row%u:\n", row); 3041 disks_per_row = 3042 le16_to_cpu(map_buff->data_disks_per_row); 3043 for (col = 0; col < disks_per_row; col++, dd++) 3044 dev_info(&h->pdev->dev, 3045 " D%02u: h=0x%04x xor=%u,%u\n", 3046 col, dd->ioaccel_handle, 3047 dd->xor_mult[0], dd->xor_mult[1]); 3048 disks_per_row = 3049 le16_to_cpu(map_buff->metadata_disks_per_row); 3050 for (col = 0; col < disks_per_row; col++, dd++) 3051 dev_info(&h->pdev->dev, 3052 " M%02u: h=0x%04x xor=%u,%u\n", 3053 col, dd->ioaccel_handle, 3054 dd->xor_mult[0], dd->xor_mult[1]); 3055 } 3056 } 3057 } 3058 #else 3059 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, 3060 __attribute__((unused)) int rc, 3061 __attribute__((unused)) struct raid_map_data *map_buff) 3062 { 3063 } 3064 #endif 3065 3066 static int hpsa_get_raid_map(struct ctlr_info *h, 3067 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 3068 { 3069 int rc = 0; 3070 struct CommandList *c; 3071 struct ErrorInfo *ei; 3072 3073 c = cmd_alloc(h); 3074 3075 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, 3076 sizeof(this_device->raid_map), 0, 3077 scsi3addr, TYPE_CMD)) { 3078 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n"); 3079 cmd_free(h, c); 3080 return -1; 3081 } 3082 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3083 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3084 if (rc) 3085 goto out; 3086 ei = c->err_info; 3087 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3088 hpsa_scsi_interpret_error(h, c); 3089 rc = -1; 3090 goto out; 3091 } 3092 cmd_free(h, c); 3093 3094 /* @todo in the future, dynamically allocate RAID map memory */ 3095 if (le32_to_cpu(this_device->raid_map.structure_size) > 3096 sizeof(this_device->raid_map)) { 3097 dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); 3098 rc = -1; 3099 } 3100 hpsa_debug_map_buff(h, rc, &this_device->raid_map); 3101 return rc; 3102 out: 3103 cmd_free(h, c); 3104 return rc; 3105 } 3106 3107 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, 3108 unsigned char scsi3addr[], u16 bmic_device_index, 3109 struct bmic_sense_subsystem_info *buf, size_t bufsize) 3110 { 3111 int rc = IO_OK; 3112 struct CommandList *c; 3113 struct ErrorInfo *ei; 3114 3115 c = cmd_alloc(h); 3116 3117 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize, 3118 0, RAID_CTLR_LUNID, TYPE_CMD); 3119 if (rc) 3120 goto out; 3121 3122 c->Request.CDB[2] = bmic_device_index & 0xff; 3123 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 3124 3125 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3126 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3127 if (rc) 3128 goto out; 3129 ei = c->err_info; 3130 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3131 hpsa_scsi_interpret_error(h, c); 3132 rc = -1; 3133 } 3134 out: 3135 cmd_free(h, c); 3136 return rc; 3137 } 3138 3139 static int hpsa_bmic_id_controller(struct ctlr_info *h, 3140 struct bmic_identify_controller *buf, size_t bufsize) 3141 { 3142 int rc = IO_OK; 3143 struct CommandList *c; 3144 struct ErrorInfo *ei; 3145 3146 c = cmd_alloc(h); 3147 3148 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize, 3149 0, RAID_CTLR_LUNID, TYPE_CMD); 3150 if (rc) 3151 goto out; 3152 3153 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3154 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3155 if (rc) 3156 goto out; 3157 ei = c->err_info; 3158 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3159 hpsa_scsi_interpret_error(h, c); 3160 rc = -1; 3161 } 3162 out: 3163 cmd_free(h, c); 3164 return rc; 3165 } 3166 3167 static int hpsa_bmic_id_physical_device(struct ctlr_info *h, 3168 unsigned char scsi3addr[], u16 bmic_device_index, 3169 struct bmic_identify_physical_device *buf, size_t bufsize) 3170 { 3171 int rc = IO_OK; 3172 struct CommandList *c; 3173 struct ErrorInfo *ei; 3174 3175 c = cmd_alloc(h); 3176 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, 3177 0, RAID_CTLR_LUNID, TYPE_CMD); 3178 if (rc) 3179 goto out; 3180 3181 c->Request.CDB[2] = bmic_device_index & 0xff; 3182 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 3183 3184 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, 3185 NO_TIMEOUT); 3186 ei = c->err_info; 3187 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3188 hpsa_scsi_interpret_error(h, c); 3189 rc = -1; 3190 } 3191 out: 3192 cmd_free(h, c); 3193 3194 return rc; 3195 } 3196 3197 /* 3198 * get enclosure information 3199 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number 3200 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure 3201 * Uses id_physical_device to determine the box_index. 3202 */ 3203 static void hpsa_get_enclosure_info(struct ctlr_info *h, 3204 unsigned char *scsi3addr, 3205 struct ReportExtendedLUNdata *rlep, int rle_index, 3206 struct hpsa_scsi_dev_t *encl_dev) 3207 { 3208 int rc = -1; 3209 struct CommandList *c = NULL; 3210 struct ErrorInfo *ei = NULL; 3211 struct bmic_sense_storage_box_params *bssbp = NULL; 3212 struct bmic_identify_physical_device *id_phys = NULL; 3213 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; 3214 u16 bmic_device_index = 0; 3215 3216 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); 3217 3218 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) { 3219 rc = IO_OK; 3220 goto out; 3221 } 3222 3223 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL); 3224 if (!bssbp) 3225 goto out; 3226 3227 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); 3228 if (!id_phys) 3229 goto out; 3230 3231 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index, 3232 id_phys, sizeof(*id_phys)); 3233 if (rc) { 3234 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n", 3235 __func__, encl_dev->external, bmic_device_index); 3236 goto out; 3237 } 3238 3239 c = cmd_alloc(h); 3240 3241 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp, 3242 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD); 3243 3244 if (rc) 3245 goto out; 3246 3247 if (id_phys->phys_connector[1] == 'E') 3248 c->Request.CDB[5] = id_phys->box_index; 3249 else 3250 c->Request.CDB[5] = 0; 3251 3252 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, 3253 NO_TIMEOUT); 3254 if (rc) 3255 goto out; 3256 3257 ei = c->err_info; 3258 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3259 rc = -1; 3260 goto out; 3261 } 3262 3263 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port; 3264 memcpy(&encl_dev->phys_connector[id_phys->active_path_number], 3265 bssbp->phys_connector, sizeof(bssbp->phys_connector)); 3266 3267 rc = IO_OK; 3268 out: 3269 kfree(bssbp); 3270 kfree(id_phys); 3271 3272 if (c) 3273 cmd_free(h, c); 3274 3275 if (rc != IO_OK) 3276 hpsa_show_dev_msg(KERN_INFO, h, encl_dev, 3277 "Error, could not get enclosure information\n"); 3278 } 3279 3280 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, 3281 unsigned char *scsi3addr) 3282 { 3283 struct ReportExtendedLUNdata *physdev; 3284 u32 nphysicals; 3285 u64 sa = 0; 3286 int i; 3287 3288 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL); 3289 if (!physdev) 3290 return 0; 3291 3292 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { 3293 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 3294 kfree(physdev); 3295 return 0; 3296 } 3297 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24; 3298 3299 for (i = 0; i < nphysicals; i++) 3300 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) { 3301 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]); 3302 break; 3303 } 3304 3305 kfree(physdev); 3306 3307 return sa; 3308 } 3309 3310 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, 3311 struct hpsa_scsi_dev_t *dev) 3312 { 3313 int rc; 3314 u64 sa = 0; 3315 3316 if (is_hba_lunid(scsi3addr)) { 3317 struct bmic_sense_subsystem_info *ssi; 3318 3319 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); 3320 if (ssi == NULL) { 3321 dev_warn(&h->pdev->dev, 3322 "%s: out of memory\n", __func__); 3323 return; 3324 } 3325 3326 rc = hpsa_bmic_sense_subsystem_information(h, 3327 scsi3addr, 0, ssi, sizeof(*ssi)); 3328 if (rc == 0) { 3329 sa = get_unaligned_be64(ssi->primary_world_wide_id); 3330 h->sas_address = sa; 3331 } 3332 3333 kfree(ssi); 3334 } else 3335 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr); 3336 3337 dev->sas_address = sa; 3338 } 3339 3340 /* Get a device id from inquiry page 0x83 */ 3341 static int hpsa_vpd_page_supported(struct ctlr_info *h, 3342 unsigned char scsi3addr[], u8 page) 3343 { 3344 int rc; 3345 int i; 3346 int pages; 3347 unsigned char *buf, bufsize; 3348 3349 buf = kzalloc(256, GFP_KERNEL); 3350 if (!buf) 3351 return 0; 3352 3353 /* Get the size of the page list first */ 3354 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 3355 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 3356 buf, HPSA_VPD_HEADER_SZ); 3357 if (rc != 0) 3358 goto exit_unsupported; 3359 pages = buf[3]; 3360 if ((pages + HPSA_VPD_HEADER_SZ) <= 255) 3361 bufsize = pages + HPSA_VPD_HEADER_SZ; 3362 else 3363 bufsize = 255; 3364 3365 /* Get the whole VPD page list */ 3366 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 3367 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 3368 buf, bufsize); 3369 if (rc != 0) 3370 goto exit_unsupported; 3371 3372 pages = buf[3]; 3373 for (i = 1; i <= pages; i++) 3374 if (buf[3 + i] == page) 3375 goto exit_supported; 3376 exit_unsupported: 3377 kfree(buf); 3378 return 0; 3379 exit_supported: 3380 kfree(buf); 3381 return 1; 3382 } 3383 3384 static void hpsa_get_ioaccel_status(struct ctlr_info *h, 3385 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 3386 { 3387 int rc; 3388 unsigned char *buf; 3389 u8 ioaccel_status; 3390 3391 this_device->offload_config = 0; 3392 this_device->offload_enabled = 0; 3393 this_device->offload_to_be_enabled = 0; 3394 3395 buf = kzalloc(64, GFP_KERNEL); 3396 if (!buf) 3397 return; 3398 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) 3399 goto out; 3400 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 3401 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); 3402 if (rc != 0) 3403 goto out; 3404 3405 #define IOACCEL_STATUS_BYTE 4 3406 #define OFFLOAD_CONFIGURED_BIT 0x01 3407 #define OFFLOAD_ENABLED_BIT 0x02 3408 ioaccel_status = buf[IOACCEL_STATUS_BYTE]; 3409 this_device->offload_config = 3410 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 3411 if (this_device->offload_config) { 3412 this_device->offload_enabled = 3413 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 3414 if (hpsa_get_raid_map(h, scsi3addr, this_device)) 3415 this_device->offload_enabled = 0; 3416 } 3417 this_device->offload_to_be_enabled = this_device->offload_enabled; 3418 out: 3419 kfree(buf); 3420 return; 3421 } 3422 3423 /* Get the device id from inquiry page 0x83 */ 3424 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 3425 unsigned char *device_id, int index, int buflen) 3426 { 3427 int rc; 3428 unsigned char *buf; 3429 3430 if (buflen > 16) 3431 buflen = 16; 3432 buf = kzalloc(64, GFP_KERNEL); 3433 if (!buf) 3434 return -ENOMEM; 3435 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 3436 if (rc == 0) 3437 memcpy(device_id, &buf[index], buflen); 3438 3439 kfree(buf); 3440 3441 return rc != 0; 3442 } 3443 3444 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 3445 void *buf, int bufsize, 3446 int extended_response) 3447 { 3448 int rc = IO_OK; 3449 struct CommandList *c; 3450 unsigned char scsi3addr[8]; 3451 struct ErrorInfo *ei; 3452 3453 c = cmd_alloc(h); 3454 3455 /* address the controller */ 3456 memset(scsi3addr, 0, sizeof(scsi3addr)); 3457 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 3458 buf, bufsize, 0, scsi3addr, TYPE_CMD)) { 3459 rc = -1; 3460 goto out; 3461 } 3462 if (extended_response) 3463 c->Request.CDB[1] = extended_response; 3464 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3465 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3466 if (rc) 3467 goto out; 3468 ei = c->err_info; 3469 if (ei->CommandStatus != 0 && 3470 ei->CommandStatus != CMD_DATA_UNDERRUN) { 3471 hpsa_scsi_interpret_error(h, c); 3472 rc = -1; 3473 } else { 3474 struct ReportLUNdata *rld = buf; 3475 3476 if (rld->extended_response_flag != extended_response) { 3477 dev_err(&h->pdev->dev, 3478 "report luns requested format %u, got %u\n", 3479 extended_response, 3480 rld->extended_response_flag); 3481 rc = -1; 3482 } 3483 } 3484 out: 3485 cmd_free(h, c); 3486 return rc; 3487 } 3488 3489 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 3490 struct ReportExtendedLUNdata *buf, int bufsize) 3491 { 3492 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, 3493 HPSA_REPORT_PHYS_EXTENDED); 3494 } 3495 3496 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 3497 struct ReportLUNdata *buf, int bufsize) 3498 { 3499 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 3500 } 3501 3502 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 3503 int bus, int target, int lun) 3504 { 3505 device->bus = bus; 3506 device->target = target; 3507 device->lun = lun; 3508 } 3509 3510 /* Use VPD inquiry to get details of volume status */ 3511 static int hpsa_get_volume_status(struct ctlr_info *h, 3512 unsigned char scsi3addr[]) 3513 { 3514 int rc; 3515 int status; 3516 int size; 3517 unsigned char *buf; 3518 3519 buf = kzalloc(64, GFP_KERNEL); 3520 if (!buf) 3521 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 3522 3523 /* Does controller have VPD for logical volume status? */ 3524 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) 3525 goto exit_failed; 3526 3527 /* Get the size of the VPD return buffer */ 3528 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 3529 buf, HPSA_VPD_HEADER_SZ); 3530 if (rc != 0) 3531 goto exit_failed; 3532 size = buf[3]; 3533 3534 /* Now get the whole VPD buffer */ 3535 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 3536 buf, size + HPSA_VPD_HEADER_SZ); 3537 if (rc != 0) 3538 goto exit_failed; 3539 status = buf[4]; /* status byte */ 3540 3541 kfree(buf); 3542 return status; 3543 exit_failed: 3544 kfree(buf); 3545 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 3546 } 3547 3548 /* Determine offline status of a volume. 3549 * Return either: 3550 * 0 (not offline) 3551 * 0xff (offline for unknown reasons) 3552 * # (integer code indicating one of several NOT READY states 3553 * describing why a volume is to be kept offline) 3554 */ 3555 static int hpsa_volume_offline(struct ctlr_info *h, 3556 unsigned char scsi3addr[]) 3557 { 3558 struct CommandList *c; 3559 unsigned char *sense; 3560 u8 sense_key, asc, ascq; 3561 int sense_len; 3562 int rc, ldstat = 0; 3563 u16 cmd_status; 3564 u8 scsi_status; 3565 #define ASC_LUN_NOT_READY 0x04 3566 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 3567 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 3568 3569 c = cmd_alloc(h); 3570 3571 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); 3572 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); 3573 if (rc) { 3574 cmd_free(h, c); 3575 return 0; 3576 } 3577 sense = c->err_info->SenseInfo; 3578 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) 3579 sense_len = sizeof(c->err_info->SenseInfo); 3580 else 3581 sense_len = c->err_info->SenseLen; 3582 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq); 3583 cmd_status = c->err_info->CommandStatus; 3584 scsi_status = c->err_info->ScsiStatus; 3585 cmd_free(h, c); 3586 /* Is the volume 'not ready'? */ 3587 if (cmd_status != CMD_TARGET_STATUS || 3588 scsi_status != SAM_STAT_CHECK_CONDITION || 3589 sense_key != NOT_READY || 3590 asc != ASC_LUN_NOT_READY) { 3591 return 0; 3592 } 3593 3594 /* Determine the reason for not ready state */ 3595 ldstat = hpsa_get_volume_status(h, scsi3addr); 3596 3597 /* Keep volume offline in certain cases: */ 3598 switch (ldstat) { 3599 case HPSA_LV_UNDERGOING_ERASE: 3600 case HPSA_LV_NOT_AVAILABLE: 3601 case HPSA_LV_UNDERGOING_RPI: 3602 case HPSA_LV_PENDING_RPI: 3603 case HPSA_LV_ENCRYPTED_NO_KEY: 3604 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 3605 case HPSA_LV_UNDERGOING_ENCRYPTION: 3606 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 3607 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 3608 return ldstat; 3609 case HPSA_VPD_LV_STATUS_UNSUPPORTED: 3610 /* If VPD status page isn't available, 3611 * use ASC/ASCQ to determine state 3612 */ 3613 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || 3614 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) 3615 return ldstat; 3616 break; 3617 default: 3618 break; 3619 } 3620 return 0; 3621 } 3622 3623 /* 3624 * Find out if a logical device supports aborts by simply trying one. 3625 * Smart Array may claim not to support aborts on logical drives, but 3626 * if a MSA2000 * is connected, the drives on that will be presented 3627 * by the Smart Array as logical drives, and aborts may be sent to 3628 * those devices successfully. So the simplest way to find out is 3629 * to simply try an abort and see how the device responds. 3630 */ 3631 static int hpsa_device_supports_aborts(struct ctlr_info *h, 3632 unsigned char *scsi3addr) 3633 { 3634 struct CommandList *c; 3635 struct ErrorInfo *ei; 3636 int rc = 0; 3637 3638 u64 tag = (u64) -1; /* bogus tag */ 3639 3640 /* Assume that physical devices support aborts */ 3641 if (!is_logical_dev_addr_mode(scsi3addr)) 3642 return 1; 3643 3644 c = cmd_alloc(h); 3645 3646 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); 3647 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); 3648 /* no unmap needed here because no data xfer. */ 3649 ei = c->err_info; 3650 switch (ei->CommandStatus) { 3651 case CMD_INVALID: 3652 rc = 0; 3653 break; 3654 case CMD_UNABORTABLE: 3655 case CMD_ABORT_FAILED: 3656 rc = 1; 3657 break; 3658 case CMD_TMF_STATUS: 3659 rc = hpsa_evaluate_tmf_status(h, c); 3660 break; 3661 default: 3662 rc = 0; 3663 break; 3664 } 3665 cmd_free(h, c); 3666 return rc; 3667 } 3668 3669 static int hpsa_update_device_info(struct ctlr_info *h, 3670 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 3671 unsigned char *is_OBDR_device) 3672 { 3673 3674 #define OBDR_SIG_OFFSET 43 3675 #define OBDR_TAPE_SIG "$DR-10" 3676 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 3677 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 3678 3679 unsigned char *inq_buff; 3680 unsigned char *obdr_sig; 3681 int rc = 0; 3682 3683 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 3684 if (!inq_buff) { 3685 rc = -ENOMEM; 3686 goto bail_out; 3687 } 3688 3689 /* Do an inquiry to the device to see what it is. */ 3690 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 3691 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 3692 /* Inquiry failed (msg printed already) */ 3693 dev_err(&h->pdev->dev, 3694 "hpsa_update_device_info: inquiry failed\n"); 3695 rc = -EIO; 3696 goto bail_out; 3697 } 3698 3699 scsi_sanitize_inquiry_string(&inq_buff[8], 8); 3700 scsi_sanitize_inquiry_string(&inq_buff[16], 16); 3701 3702 this_device->devtype = (inq_buff[0] & 0x1f); 3703 memcpy(this_device->scsi3addr, scsi3addr, 8); 3704 memcpy(this_device->vendor, &inq_buff[8], 3705 sizeof(this_device->vendor)); 3706 memcpy(this_device->model, &inq_buff[16], 3707 sizeof(this_device->model)); 3708 memset(this_device->device_id, 0, 3709 sizeof(this_device->device_id)); 3710 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, 3711 sizeof(this_device->device_id)); 3712 3713 if ((this_device->devtype == TYPE_DISK || 3714 this_device->devtype == TYPE_ZBC) && 3715 is_logical_dev_addr_mode(scsi3addr)) { 3716 int volume_offline; 3717 3718 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 3719 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 3720 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 3721 volume_offline = hpsa_volume_offline(h, scsi3addr); 3722 if (volume_offline < 0 || volume_offline > 0xff) 3723 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; 3724 this_device->volume_offline = volume_offline & 0xff; 3725 } else { 3726 this_device->raid_level = RAID_UNKNOWN; 3727 this_device->offload_config = 0; 3728 this_device->offload_enabled = 0; 3729 this_device->offload_to_be_enabled = 0; 3730 this_device->hba_ioaccel_enabled = 0; 3731 this_device->volume_offline = 0; 3732 this_device->queue_depth = h->nr_cmds; 3733 } 3734 3735 if (is_OBDR_device) { 3736 /* See if this is a One-Button-Disaster-Recovery device 3737 * by looking for "$DR-10" at offset 43 in inquiry data. 3738 */ 3739 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 3740 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 3741 strncmp(obdr_sig, OBDR_TAPE_SIG, 3742 OBDR_SIG_LEN) == 0); 3743 } 3744 kfree(inq_buff); 3745 return 0; 3746 3747 bail_out: 3748 kfree(inq_buff); 3749 return rc; 3750 } 3751 3752 static void hpsa_update_device_supports_aborts(struct ctlr_info *h, 3753 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr) 3754 { 3755 unsigned long flags; 3756 int rc, entry; 3757 /* 3758 * See if this device supports aborts. If we already know 3759 * the device, we already know if it supports aborts, otherwise 3760 * we have to find out if it supports aborts by trying one. 3761 */ 3762 spin_lock_irqsave(&h->devlock, flags); 3763 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry); 3764 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) && 3765 entry >= 0 && entry < h->ndevices) { 3766 dev->supports_aborts = h->dev[entry]->supports_aborts; 3767 spin_unlock_irqrestore(&h->devlock, flags); 3768 } else { 3769 spin_unlock_irqrestore(&h->devlock, flags); 3770 dev->supports_aborts = 3771 hpsa_device_supports_aborts(h, scsi3addr); 3772 if (dev->supports_aborts < 0) 3773 dev->supports_aborts = 0; 3774 } 3775 } 3776 3777 /* 3778 * Helper function to assign bus, target, lun mapping of devices. 3779 * Logical drive target and lun are assigned at this time, but 3780 * physical device lun and target assignment are deferred (assigned 3781 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 3782 */ 3783 static void figure_bus_target_lun(struct ctlr_info *h, 3784 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) 3785 { 3786 u32 lunid = get_unaligned_le32(lunaddrbytes); 3787 3788 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 3789 /* physical device, target and lun filled in later */ 3790 if (is_hba_lunid(lunaddrbytes)) 3791 hpsa_set_bus_target_lun(device, 3792 HPSA_HBA_BUS, 0, lunid & 0x3fff); 3793 else 3794 /* defer target, lun assignment for physical devices */ 3795 hpsa_set_bus_target_lun(device, 3796 HPSA_PHYSICAL_DEVICE_BUS, -1, -1); 3797 return; 3798 } 3799 /* It's a logical device */ 3800 if (device->external) { 3801 hpsa_set_bus_target_lun(device, 3802 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff, 3803 lunid & 0x00ff); 3804 return; 3805 } 3806 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS, 3807 0, lunid & 0x3fff); 3808 } 3809 3810 3811 /* 3812 * Get address of physical disk used for an ioaccel2 mode command: 3813 * 1. Extract ioaccel2 handle from the command. 3814 * 2. Find a matching ioaccel2 handle from list of physical disks. 3815 * 3. Return: 3816 * 1 and set scsi3addr to address of matching physical 3817 * 0 if no matching physical disk was found. 3818 */ 3819 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, 3820 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) 3821 { 3822 struct io_accel2_cmd *c2 = 3823 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; 3824 unsigned long flags; 3825 int i; 3826 3827 spin_lock_irqsave(&h->devlock, flags); 3828 for (i = 0; i < h->ndevices; i++) 3829 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) { 3830 memcpy(scsi3addr, h->dev[i]->scsi3addr, 3831 sizeof(h->dev[i]->scsi3addr)); 3832 spin_unlock_irqrestore(&h->devlock, flags); 3833 return 1; 3834 } 3835 spin_unlock_irqrestore(&h->devlock, flags); 3836 return 0; 3837 } 3838 3839 static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, 3840 int i, int nphysicals, int nlocal_logicals) 3841 { 3842 /* In report logicals, local logicals are listed first, 3843 * then any externals. 3844 */ 3845 int logicals_start = nphysicals + (raid_ctlr_position == 0); 3846 3847 if (i == raid_ctlr_position) 3848 return 0; 3849 3850 if (i < logicals_start) 3851 return 0; 3852 3853 /* i is in logicals range, but still within local logicals */ 3854 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals) 3855 return 0; 3856 3857 return 1; /* it's an external lun */ 3858 } 3859 3860 /* 3861 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 3862 * logdev. The number of luns in physdev and logdev are returned in 3863 * *nphysicals and *nlogicals, respectively. 3864 * Returns 0 on success, -1 otherwise. 3865 */ 3866 static int hpsa_gather_lun_info(struct ctlr_info *h, 3867 struct ReportExtendedLUNdata *physdev, u32 *nphysicals, 3868 struct ReportLUNdata *logdev, u32 *nlogicals) 3869 { 3870 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { 3871 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 3872 return -1; 3873 } 3874 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24; 3875 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 3876 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", 3877 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); 3878 *nphysicals = HPSA_MAX_PHYS_LUN; 3879 } 3880 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { 3881 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 3882 return -1; 3883 } 3884 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 3885 /* Reject Logicals in excess of our max capability. */ 3886 if (*nlogicals > HPSA_MAX_LUN) { 3887 dev_warn(&h->pdev->dev, 3888 "maximum logical LUNs (%d) exceeded. " 3889 "%d LUNs ignored.\n", HPSA_MAX_LUN, 3890 *nlogicals - HPSA_MAX_LUN); 3891 *nlogicals = HPSA_MAX_LUN; 3892 } 3893 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 3894 dev_warn(&h->pdev->dev, 3895 "maximum logical + physical LUNs (%d) exceeded. " 3896 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 3897 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 3898 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 3899 } 3900 return 0; 3901 } 3902 3903 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, 3904 int i, int nphysicals, int nlogicals, 3905 struct ReportExtendedLUNdata *physdev_list, 3906 struct ReportLUNdata *logdev_list) 3907 { 3908 /* Helper function, figure out where the LUN ID info is coming from 3909 * given index i, lists of physical and logical devices, where in 3910 * the list the raid controller is supposed to appear (first or last) 3911 */ 3912 3913 int logicals_start = nphysicals + (raid_ctlr_position == 0); 3914 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 3915 3916 if (i == raid_ctlr_position) 3917 return RAID_CTLR_LUNID; 3918 3919 if (i < logicals_start) 3920 return &physdev_list->LUN[i - 3921 (raid_ctlr_position == 0)].lunid[0]; 3922 3923 if (i < last_device) 3924 return &logdev_list->LUN[i - nphysicals - 3925 (raid_ctlr_position == 0)][0]; 3926 BUG(); 3927 return NULL; 3928 } 3929 3930 /* get physical drive ioaccel handle and queue depth */ 3931 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, 3932 struct hpsa_scsi_dev_t *dev, 3933 struct ReportExtendedLUNdata *rlep, int rle_index, 3934 struct bmic_identify_physical_device *id_phys) 3935 { 3936 int rc; 3937 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; 3938 3939 dev->ioaccel_handle = rle->ioaccel_handle; 3940 if ((rle->device_flags & 0x08) && dev->ioaccel_handle) 3941 dev->hba_ioaccel_enabled = 1; 3942 memset(id_phys, 0, sizeof(*id_phys)); 3943 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0], 3944 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys, 3945 sizeof(*id_phys)); 3946 if (!rc) 3947 /* Reserve space for FW operations */ 3948 #define DRIVE_CMDS_RESERVED_FOR_FW 2 3949 #define DRIVE_QUEUE_DEPTH 7 3950 dev->queue_depth = 3951 le16_to_cpu(id_phys->current_queue_depth_limit) - 3952 DRIVE_CMDS_RESERVED_FOR_FW; 3953 else 3954 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ 3955 } 3956 3957 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device, 3958 struct ReportExtendedLUNdata *rlep, int rle_index, 3959 struct bmic_identify_physical_device *id_phys) 3960 { 3961 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; 3962 3963 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle) 3964 this_device->hba_ioaccel_enabled = 1; 3965 3966 memcpy(&this_device->active_path_index, 3967 &id_phys->active_path_number, 3968 sizeof(this_device->active_path_index)); 3969 memcpy(&this_device->path_map, 3970 &id_phys->redundant_path_present_map, 3971 sizeof(this_device->path_map)); 3972 memcpy(&this_device->box, 3973 &id_phys->alternate_paths_phys_box_on_port, 3974 sizeof(this_device->box)); 3975 memcpy(&this_device->phys_connector, 3976 &id_phys->alternate_paths_phys_connector, 3977 sizeof(this_device->phys_connector)); 3978 memcpy(&this_device->bay, 3979 &id_phys->phys_bay_in_box, 3980 sizeof(this_device->bay)); 3981 } 3982 3983 /* get number of local logical disks. */ 3984 static int hpsa_set_local_logical_count(struct ctlr_info *h, 3985 struct bmic_identify_controller *id_ctlr, 3986 u32 *nlocals) 3987 { 3988 int rc; 3989 3990 if (!id_ctlr) { 3991 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n", 3992 __func__); 3993 return -ENOMEM; 3994 } 3995 memset(id_ctlr, 0, sizeof(*id_ctlr)); 3996 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); 3997 if (!rc) 3998 if (id_ctlr->configured_logical_drive_count < 256) 3999 *nlocals = id_ctlr->configured_logical_drive_count; 4000 else 4001 *nlocals = le16_to_cpu( 4002 id_ctlr->extended_logical_unit_count); 4003 else 4004 *nlocals = -1; 4005 return rc; 4006 } 4007 4008 4009 static void hpsa_update_scsi_devices(struct ctlr_info *h) 4010 { 4011 /* the idea here is we could get notified 4012 * that some devices have changed, so we do a report 4013 * physical luns and report logical luns cmd, and adjust 4014 * our list of devices accordingly. 4015 * 4016 * The scsi3addr's of devices won't change so long as the 4017 * adapter is not reset. That means we can rescan and 4018 * tell which devices we already know about, vs. new 4019 * devices, vs. disappearing devices. 4020 */ 4021 struct ReportExtendedLUNdata *physdev_list = NULL; 4022 struct ReportLUNdata *logdev_list = NULL; 4023 struct bmic_identify_physical_device *id_phys = NULL; 4024 struct bmic_identify_controller *id_ctlr = NULL; 4025 u32 nphysicals = 0; 4026 u32 nlogicals = 0; 4027 u32 nlocal_logicals = 0; 4028 u32 ndev_allocated = 0; 4029 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 4030 int ncurrent = 0; 4031 int i, n_ext_target_devs, ndevs_to_allocate; 4032 int raid_ctlr_position; 4033 bool physical_device; 4034 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 4035 4036 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 4037 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); 4038 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); 4039 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 4040 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); 4041 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL); 4042 4043 if (!currentsd || !physdev_list || !logdev_list || 4044 !tmpdevice || !id_phys || !id_ctlr) { 4045 dev_err(&h->pdev->dev, "out of memory\n"); 4046 goto out; 4047 } 4048 memset(lunzerobits, 0, sizeof(lunzerobits)); 4049 4050 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ 4051 4052 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, 4053 logdev_list, &nlogicals)) { 4054 h->drv_req_rescan = 1; 4055 goto out; 4056 } 4057 4058 /* Set number of local logicals (non PTRAID) */ 4059 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) { 4060 dev_warn(&h->pdev->dev, 4061 "%s: Can't determine number of local logical devices.\n", 4062 __func__); 4063 } 4064 4065 /* We might see up to the maximum number of logical and physical disks 4066 * plus external target devices, and a device for the local RAID 4067 * controller. 4068 */ 4069 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; 4070 4071 /* Allocate the per device structures */ 4072 for (i = 0; i < ndevs_to_allocate; i++) { 4073 if (i >= HPSA_MAX_DEVICES) { 4074 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." 4075 " %d devices ignored.\n", HPSA_MAX_DEVICES, 4076 ndevs_to_allocate - HPSA_MAX_DEVICES); 4077 break; 4078 } 4079 4080 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 4081 if (!currentsd[i]) { 4082 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 4083 __FILE__, __LINE__); 4084 h->drv_req_rescan = 1; 4085 goto out; 4086 } 4087 ndev_allocated++; 4088 } 4089 4090 if (is_scsi_rev_5(h)) 4091 raid_ctlr_position = 0; 4092 else 4093 raid_ctlr_position = nphysicals + nlogicals; 4094 4095 /* adjust our table of devices */ 4096 n_ext_target_devs = 0; 4097 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 4098 u8 *lunaddrbytes, is_OBDR = 0; 4099 int rc = 0; 4100 int phys_dev_index = i - (raid_ctlr_position == 0); 4101 4102 physical_device = i < nphysicals + (raid_ctlr_position == 0); 4103 4104 /* Figure out where the LUN ID info is coming from */ 4105 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 4106 i, nphysicals, nlogicals, physdev_list, logdev_list); 4107 4108 /* skip masked non-disk devices */ 4109 if (MASKED_DEVICE(lunaddrbytes) && physical_device && 4110 (physdev_list->LUN[phys_dev_index].device_type != 0x06) && 4111 (physdev_list->LUN[phys_dev_index].device_flags & 0x01)) 4112 continue; 4113 4114 /* Get device type, vendor, model, device id */ 4115 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 4116 &is_OBDR); 4117 if (rc == -ENOMEM) { 4118 dev_warn(&h->pdev->dev, 4119 "Out of memory, rescan deferred.\n"); 4120 h->drv_req_rescan = 1; 4121 goto out; 4122 } 4123 if (rc) { 4124 dev_warn(&h->pdev->dev, 4125 "Inquiry failed, skipping device.\n"); 4126 continue; 4127 } 4128 4129 /* Determine if this is a lun from an external target array */ 4130 tmpdevice->external = 4131 figure_external_status(h, raid_ctlr_position, i, 4132 nphysicals, nlocal_logicals); 4133 4134 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 4135 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes); 4136 this_device = currentsd[ncurrent]; 4137 4138 /* Turn on discovery_polling if there are ext target devices. 4139 * Event-based change notification is unreliable for those. 4140 */ 4141 if (!h->discovery_polling) { 4142 if (tmpdevice->external) { 4143 h->discovery_polling = 1; 4144 dev_info(&h->pdev->dev, 4145 "External target, activate discovery polling.\n"); 4146 } 4147 } 4148 4149 4150 *this_device = *tmpdevice; 4151 this_device->physical_device = physical_device; 4152 4153 /* 4154 * Expose all devices except for physical devices that 4155 * are masked. 4156 */ 4157 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device) 4158 this_device->expose_device = 0; 4159 else 4160 this_device->expose_device = 1; 4161 4162 4163 /* 4164 * Get the SAS address for physical devices that are exposed. 4165 */ 4166 if (this_device->physical_device && this_device->expose_device) 4167 hpsa_get_sas_address(h, lunaddrbytes, this_device); 4168 4169 switch (this_device->devtype) { 4170 case TYPE_ROM: 4171 /* We don't *really* support actual CD-ROM devices, 4172 * just "One Button Disaster Recovery" tape drive 4173 * which temporarily pretends to be a CD-ROM drive. 4174 * So we check that the device is really an OBDR tape 4175 * device by checking for "$DR-10" in bytes 43-48 of 4176 * the inquiry data. 4177 */ 4178 if (is_OBDR) 4179 ncurrent++; 4180 break; 4181 case TYPE_DISK: 4182 case TYPE_ZBC: 4183 if (this_device->physical_device) { 4184 /* The disk is in HBA mode. */ 4185 /* Never use RAID mapper in HBA mode. */ 4186 this_device->offload_enabled = 0; 4187 hpsa_get_ioaccel_drive_info(h, this_device, 4188 physdev_list, phys_dev_index, id_phys); 4189 hpsa_get_path_info(this_device, 4190 physdev_list, phys_dev_index, id_phys); 4191 } 4192 ncurrent++; 4193 break; 4194 case TYPE_TAPE: 4195 case TYPE_MEDIUM_CHANGER: 4196 ncurrent++; 4197 break; 4198 case TYPE_ENCLOSURE: 4199 if (!this_device->external) 4200 hpsa_get_enclosure_info(h, lunaddrbytes, 4201 physdev_list, phys_dev_index, 4202 this_device); 4203 ncurrent++; 4204 break; 4205 case TYPE_RAID: 4206 /* Only present the Smartarray HBA as a RAID controller. 4207 * If it's a RAID controller other than the HBA itself 4208 * (an external RAID controller, MSA500 or similar) 4209 * don't present it. 4210 */ 4211 if (!is_hba_lunid(lunaddrbytes)) 4212 break; 4213 ncurrent++; 4214 break; 4215 default: 4216 break; 4217 } 4218 if (ncurrent >= HPSA_MAX_DEVICES) 4219 break; 4220 } 4221 4222 if (h->sas_host == NULL) { 4223 int rc = 0; 4224 4225 rc = hpsa_add_sas_host(h); 4226 if (rc) { 4227 dev_warn(&h->pdev->dev, 4228 "Could not add sas host %d\n", rc); 4229 goto out; 4230 } 4231 } 4232 4233 adjust_hpsa_scsi_table(h, currentsd, ncurrent); 4234 out: 4235 kfree(tmpdevice); 4236 for (i = 0; i < ndev_allocated; i++) 4237 kfree(currentsd[i]); 4238 kfree(currentsd); 4239 kfree(physdev_list); 4240 kfree(logdev_list); 4241 kfree(id_ctlr); 4242 kfree(id_phys); 4243 } 4244 4245 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc, 4246 struct scatterlist *sg) 4247 { 4248 u64 addr64 = (u64) sg_dma_address(sg); 4249 unsigned int len = sg_dma_len(sg); 4250 4251 desc->Addr = cpu_to_le64(addr64); 4252 desc->Len = cpu_to_le32(len); 4253 desc->Ext = 0; 4254 } 4255 4256 /* 4257 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 4258 * dma mapping and fills in the scatter gather entries of the 4259 * hpsa command, cp. 4260 */ 4261 static int hpsa_scatter_gather(struct ctlr_info *h, 4262 struct CommandList *cp, 4263 struct scsi_cmnd *cmd) 4264 { 4265 struct scatterlist *sg; 4266 int use_sg, i, sg_limit, chained, last_sg; 4267 struct SGDescriptor *curr_sg; 4268 4269 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 4270 4271 use_sg = scsi_dma_map(cmd); 4272 if (use_sg < 0) 4273 return use_sg; 4274 4275 if (!use_sg) 4276 goto sglist_finished; 4277 4278 /* 4279 * If the number of entries is greater than the max for a single list, 4280 * then we have a chained list; we will set up all but one entry in the 4281 * first list (the last entry is saved for link information); 4282 * otherwise, we don't have a chained list and we'll set up at each of 4283 * the entries in the one list. 4284 */ 4285 curr_sg = cp->SG; 4286 chained = use_sg > h->max_cmd_sg_entries; 4287 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; 4288 last_sg = scsi_sg_count(cmd) - 1; 4289 scsi_for_each_sg(cmd, sg, sg_limit, i) { 4290 hpsa_set_sg_descriptor(curr_sg, sg); 4291 curr_sg++; 4292 } 4293 4294 if (chained) { 4295 /* 4296 * Continue with the chained list. Set curr_sg to the chained 4297 * list. Modify the limit to the total count less the entries 4298 * we've already set up. Resume the scan at the list entry 4299 * where the previous loop left off. 4300 */ 4301 curr_sg = h->cmd_sg_list[cp->cmdindex]; 4302 sg_limit = use_sg - sg_limit; 4303 for_each_sg(sg, sg, sg_limit, i) { 4304 hpsa_set_sg_descriptor(curr_sg, sg); 4305 curr_sg++; 4306 } 4307 } 4308 4309 /* Back the pointer up to the last entry and mark it as "last". */ 4310 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST); 4311 4312 if (use_sg + chained > h->maxSG) 4313 h->maxSG = use_sg + chained; 4314 4315 if (chained) { 4316 cp->Header.SGList = h->max_cmd_sg_entries; 4317 cp->Header.SGTotal = cpu_to_le16(use_sg + 1); 4318 if (hpsa_map_sg_chain_block(h, cp)) { 4319 scsi_dma_unmap(cmd); 4320 return -1; 4321 } 4322 return 0; 4323 } 4324 4325 sglist_finished: 4326 4327 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 4328 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ 4329 return 0; 4330 } 4331 4332 #define IO_ACCEL_INELIGIBLE (1) 4333 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) 4334 { 4335 int is_write = 0; 4336 u32 block; 4337 u32 block_cnt; 4338 4339 /* Perform some CDB fixups if needed using 10 byte reads/writes only */ 4340 switch (cdb[0]) { 4341 case WRITE_6: 4342 case WRITE_12: 4343 is_write = 1; 4344 case READ_6: 4345 case READ_12: 4346 if (*cdb_len == 6) { 4347 block = get_unaligned_be16(&cdb[2]); 4348 block_cnt = cdb[4]; 4349 if (block_cnt == 0) 4350 block_cnt = 256; 4351 } else { 4352 BUG_ON(*cdb_len != 12); 4353 block = get_unaligned_be32(&cdb[2]); 4354 block_cnt = get_unaligned_be32(&cdb[6]); 4355 } 4356 if (block_cnt > 0xffff) 4357 return IO_ACCEL_INELIGIBLE; 4358 4359 cdb[0] = is_write ? WRITE_10 : READ_10; 4360 cdb[1] = 0; 4361 cdb[2] = (u8) (block >> 24); 4362 cdb[3] = (u8) (block >> 16); 4363 cdb[4] = (u8) (block >> 8); 4364 cdb[5] = (u8) (block); 4365 cdb[6] = 0; 4366 cdb[7] = (u8) (block_cnt >> 8); 4367 cdb[8] = (u8) (block_cnt); 4368 cdb[9] = 0; 4369 *cdb_len = 10; 4370 break; 4371 } 4372 return 0; 4373 } 4374 4375 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, 4376 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 4377 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) 4378 { 4379 struct scsi_cmnd *cmd = c->scsi_cmd; 4380 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 4381 unsigned int len; 4382 unsigned int total_len = 0; 4383 struct scatterlist *sg; 4384 u64 addr64; 4385 int use_sg, i; 4386 struct SGDescriptor *curr_sg; 4387 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; 4388 4389 /* TODO: implement chaining support */ 4390 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { 4391 atomic_dec(&phys_disk->ioaccel_cmds_out); 4392 return IO_ACCEL_INELIGIBLE; 4393 } 4394 4395 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 4396 4397 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 4398 atomic_dec(&phys_disk->ioaccel_cmds_out); 4399 return IO_ACCEL_INELIGIBLE; 4400 } 4401 4402 c->cmd_type = CMD_IOACCEL1; 4403 4404 /* Adjust the DMA address to point to the accelerated command buffer */ 4405 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + 4406 (c->cmdindex * sizeof(*cp)); 4407 BUG_ON(c->busaddr & 0x0000007F); 4408 4409 use_sg = scsi_dma_map(cmd); 4410 if (use_sg < 0) { 4411 atomic_dec(&phys_disk->ioaccel_cmds_out); 4412 return use_sg; 4413 } 4414 4415 if (use_sg) { 4416 curr_sg = cp->SG; 4417 scsi_for_each_sg(cmd, sg, use_sg, i) { 4418 addr64 = (u64) sg_dma_address(sg); 4419 len = sg_dma_len(sg); 4420 total_len += len; 4421 curr_sg->Addr = cpu_to_le64(addr64); 4422 curr_sg->Len = cpu_to_le32(len); 4423 curr_sg->Ext = cpu_to_le32(0); 4424 curr_sg++; 4425 } 4426 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); 4427 4428 switch (cmd->sc_data_direction) { 4429 case DMA_TO_DEVICE: 4430 control |= IOACCEL1_CONTROL_DATA_OUT; 4431 break; 4432 case DMA_FROM_DEVICE: 4433 control |= IOACCEL1_CONTROL_DATA_IN; 4434 break; 4435 case DMA_NONE: 4436 control |= IOACCEL1_CONTROL_NODATAXFER; 4437 break; 4438 default: 4439 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 4440 cmd->sc_data_direction); 4441 BUG(); 4442 break; 4443 } 4444 } else { 4445 control |= IOACCEL1_CONTROL_NODATAXFER; 4446 } 4447 4448 c->Header.SGList = use_sg; 4449 /* Fill out the command structure to submit */ 4450 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); 4451 cp->transfer_len = cpu_to_le32(total_len); 4452 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | 4453 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); 4454 cp->control = cpu_to_le32(control); 4455 memcpy(cp->CDB, cdb, cdb_len); 4456 memcpy(cp->CISS_LUN, scsi3addr, 8); 4457 /* Tag was already set at init time. */ 4458 enqueue_cmd_and_start_io(h, c); 4459 return 0; 4460 } 4461 4462 /* 4463 * Queue a command directly to a device behind the controller using the 4464 * I/O accelerator path. 4465 */ 4466 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, 4467 struct CommandList *c) 4468 { 4469 struct scsi_cmnd *cmd = c->scsi_cmd; 4470 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 4471 4472 c->phys_disk = dev; 4473 4474 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, 4475 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); 4476 } 4477 4478 /* 4479 * Set encryption parameters for the ioaccel2 request 4480 */ 4481 static void set_encrypt_ioaccel2(struct ctlr_info *h, 4482 struct CommandList *c, struct io_accel2_cmd *cp) 4483 { 4484 struct scsi_cmnd *cmd = c->scsi_cmd; 4485 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 4486 struct raid_map_data *map = &dev->raid_map; 4487 u64 first_block; 4488 4489 /* Are we doing encryption on this device */ 4490 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) 4491 return; 4492 /* Set the data encryption key index. */ 4493 cp->dekindex = map->dekindex; 4494 4495 /* Set the encryption enable flag, encoded into direction field. */ 4496 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; 4497 4498 /* Set encryption tweak values based on logical block address 4499 * If block size is 512, tweak value is LBA. 4500 * For other block sizes, tweak is (LBA * block size)/ 512) 4501 */ 4502 switch (cmd->cmnd[0]) { 4503 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ 4504 case WRITE_6: 4505 case READ_6: 4506 first_block = get_unaligned_be16(&cmd->cmnd[2]); 4507 break; 4508 case WRITE_10: 4509 case READ_10: 4510 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ 4511 case WRITE_12: 4512 case READ_12: 4513 first_block = get_unaligned_be32(&cmd->cmnd[2]); 4514 break; 4515 case WRITE_16: 4516 case READ_16: 4517 first_block = get_unaligned_be64(&cmd->cmnd[2]); 4518 break; 4519 default: 4520 dev_err(&h->pdev->dev, 4521 "ERROR: %s: size (0x%x) not supported for encryption\n", 4522 __func__, cmd->cmnd[0]); 4523 BUG(); 4524 break; 4525 } 4526 4527 if (le32_to_cpu(map->volume_blk_size) != 512) 4528 first_block = first_block * 4529 le32_to_cpu(map->volume_blk_size)/512; 4530 4531 cp->tweak_lower = cpu_to_le32(first_block); 4532 cp->tweak_upper = cpu_to_le32(first_block >> 32); 4533 } 4534 4535 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 4536 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 4537 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) 4538 { 4539 struct scsi_cmnd *cmd = c->scsi_cmd; 4540 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 4541 struct ioaccel2_sg_element *curr_sg; 4542 int use_sg, i; 4543 struct scatterlist *sg; 4544 u64 addr64; 4545 u32 len; 4546 u32 total_len = 0; 4547 4548 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 4549 4550 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 4551 atomic_dec(&phys_disk->ioaccel_cmds_out); 4552 return IO_ACCEL_INELIGIBLE; 4553 } 4554 4555 c->cmd_type = CMD_IOACCEL2; 4556 /* Adjust the DMA address to point to the accelerated command buffer */ 4557 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 4558 (c->cmdindex * sizeof(*cp)); 4559 BUG_ON(c->busaddr & 0x0000007F); 4560 4561 memset(cp, 0, sizeof(*cp)); 4562 cp->IU_type = IOACCEL2_IU_TYPE; 4563 4564 use_sg = scsi_dma_map(cmd); 4565 if (use_sg < 0) { 4566 atomic_dec(&phys_disk->ioaccel_cmds_out); 4567 return use_sg; 4568 } 4569 4570 if (use_sg) { 4571 curr_sg = cp->sg; 4572 if (use_sg > h->ioaccel_maxsg) { 4573 addr64 = le64_to_cpu( 4574 h->ioaccel2_cmd_sg_list[c->cmdindex]->address); 4575 curr_sg->address = cpu_to_le64(addr64); 4576 curr_sg->length = 0; 4577 curr_sg->reserved[0] = 0; 4578 curr_sg->reserved[1] = 0; 4579 curr_sg->reserved[2] = 0; 4580 curr_sg->chain_indicator = 0x80; 4581 4582 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; 4583 } 4584 scsi_for_each_sg(cmd, sg, use_sg, i) { 4585 addr64 = (u64) sg_dma_address(sg); 4586 len = sg_dma_len(sg); 4587 total_len += len; 4588 curr_sg->address = cpu_to_le64(addr64); 4589 curr_sg->length = cpu_to_le32(len); 4590 curr_sg->reserved[0] = 0; 4591 curr_sg->reserved[1] = 0; 4592 curr_sg->reserved[2] = 0; 4593 curr_sg->chain_indicator = 0; 4594 curr_sg++; 4595 } 4596 4597 switch (cmd->sc_data_direction) { 4598 case DMA_TO_DEVICE: 4599 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 4600 cp->direction |= IOACCEL2_DIR_DATA_OUT; 4601 break; 4602 case DMA_FROM_DEVICE: 4603 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 4604 cp->direction |= IOACCEL2_DIR_DATA_IN; 4605 break; 4606 case DMA_NONE: 4607 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 4608 cp->direction |= IOACCEL2_DIR_NO_DATA; 4609 break; 4610 default: 4611 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 4612 cmd->sc_data_direction); 4613 BUG(); 4614 break; 4615 } 4616 } else { 4617 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 4618 cp->direction |= IOACCEL2_DIR_NO_DATA; 4619 } 4620 4621 /* Set encryption parameters, if necessary */ 4622 set_encrypt_ioaccel2(h, c, cp); 4623 4624 cp->scsi_nexus = cpu_to_le32(ioaccel_handle); 4625 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); 4626 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 4627 4628 cp->data_len = cpu_to_le32(total_len); 4629 cp->err_ptr = cpu_to_le64(c->busaddr + 4630 offsetof(struct io_accel2_cmd, error_data)); 4631 cp->err_len = cpu_to_le32(sizeof(cp->error_data)); 4632 4633 /* fill in sg elements */ 4634 if (use_sg > h->ioaccel_maxsg) { 4635 cp->sg_count = 1; 4636 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0])); 4637 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { 4638 atomic_dec(&phys_disk->ioaccel_cmds_out); 4639 scsi_dma_unmap(cmd); 4640 return -1; 4641 } 4642 } else 4643 cp->sg_count = (u8) use_sg; 4644 4645 enqueue_cmd_and_start_io(h, c); 4646 return 0; 4647 } 4648 4649 /* 4650 * Queue a command to the correct I/O accelerator path. 4651 */ 4652 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 4653 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 4654 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) 4655 { 4656 /* Try to honor the device's queue depth */ 4657 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > 4658 phys_disk->queue_depth) { 4659 atomic_dec(&phys_disk->ioaccel_cmds_out); 4660 return IO_ACCEL_INELIGIBLE; 4661 } 4662 if (h->transMethod & CFGTBL_Trans_io_accel1) 4663 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, 4664 cdb, cdb_len, scsi3addr, 4665 phys_disk); 4666 else 4667 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, 4668 cdb, cdb_len, scsi3addr, 4669 phys_disk); 4670 } 4671 4672 static void raid_map_helper(struct raid_map_data *map, 4673 int offload_to_mirror, u32 *map_index, u32 *current_group) 4674 { 4675 if (offload_to_mirror == 0) { 4676 /* use physical disk in the first mirrored group. */ 4677 *map_index %= le16_to_cpu(map->data_disks_per_row); 4678 return; 4679 } 4680 do { 4681 /* determine mirror group that *map_index indicates */ 4682 *current_group = *map_index / 4683 le16_to_cpu(map->data_disks_per_row); 4684 if (offload_to_mirror == *current_group) 4685 continue; 4686 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { 4687 /* select map index from next group */ 4688 *map_index += le16_to_cpu(map->data_disks_per_row); 4689 (*current_group)++; 4690 } else { 4691 /* select map index from first group */ 4692 *map_index %= le16_to_cpu(map->data_disks_per_row); 4693 *current_group = 0; 4694 } 4695 } while (offload_to_mirror != *current_group); 4696 } 4697 4698 /* 4699 * Attempt to perform offload RAID mapping for a logical volume I/O. 4700 */ 4701 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, 4702 struct CommandList *c) 4703 { 4704 struct scsi_cmnd *cmd = c->scsi_cmd; 4705 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 4706 struct raid_map_data *map = &dev->raid_map; 4707 struct raid_map_disk_data *dd = &map->data[0]; 4708 int is_write = 0; 4709 u32 map_index; 4710 u64 first_block, last_block; 4711 u32 block_cnt; 4712 u32 blocks_per_row; 4713 u64 first_row, last_row; 4714 u32 first_row_offset, last_row_offset; 4715 u32 first_column, last_column; 4716 u64 r0_first_row, r0_last_row; 4717 u32 r5or6_blocks_per_row; 4718 u64 r5or6_first_row, r5or6_last_row; 4719 u32 r5or6_first_row_offset, r5or6_last_row_offset; 4720 u32 r5or6_first_column, r5or6_last_column; 4721 u32 total_disks_per_row; 4722 u32 stripesize; 4723 u32 first_group, last_group, current_group; 4724 u32 map_row; 4725 u32 disk_handle; 4726 u64 disk_block; 4727 u32 disk_block_cnt; 4728 u8 cdb[16]; 4729 u8 cdb_len; 4730 u16 strip_size; 4731 #if BITS_PER_LONG == 32 4732 u64 tmpdiv; 4733 #endif 4734 int offload_to_mirror; 4735 4736 /* check for valid opcode, get LBA and block count */ 4737 switch (cmd->cmnd[0]) { 4738 case WRITE_6: 4739 is_write = 1; 4740 case READ_6: 4741 first_block = get_unaligned_be16(&cmd->cmnd[2]); 4742 block_cnt = cmd->cmnd[4]; 4743 if (block_cnt == 0) 4744 block_cnt = 256; 4745 break; 4746 case WRITE_10: 4747 is_write = 1; 4748 case READ_10: 4749 first_block = 4750 (((u64) cmd->cmnd[2]) << 24) | 4751 (((u64) cmd->cmnd[3]) << 16) | 4752 (((u64) cmd->cmnd[4]) << 8) | 4753 cmd->cmnd[5]; 4754 block_cnt = 4755 (((u32) cmd->cmnd[7]) << 8) | 4756 cmd->cmnd[8]; 4757 break; 4758 case WRITE_12: 4759 is_write = 1; 4760 case READ_12: 4761 first_block = 4762 (((u64) cmd->cmnd[2]) << 24) | 4763 (((u64) cmd->cmnd[3]) << 16) | 4764 (((u64) cmd->cmnd[4]) << 8) | 4765 cmd->cmnd[5]; 4766 block_cnt = 4767 (((u32) cmd->cmnd[6]) << 24) | 4768 (((u32) cmd->cmnd[7]) << 16) | 4769 (((u32) cmd->cmnd[8]) << 8) | 4770 cmd->cmnd[9]; 4771 break; 4772 case WRITE_16: 4773 is_write = 1; 4774 case READ_16: 4775 first_block = 4776 (((u64) cmd->cmnd[2]) << 56) | 4777 (((u64) cmd->cmnd[3]) << 48) | 4778 (((u64) cmd->cmnd[4]) << 40) | 4779 (((u64) cmd->cmnd[5]) << 32) | 4780 (((u64) cmd->cmnd[6]) << 24) | 4781 (((u64) cmd->cmnd[7]) << 16) | 4782 (((u64) cmd->cmnd[8]) << 8) | 4783 cmd->cmnd[9]; 4784 block_cnt = 4785 (((u32) cmd->cmnd[10]) << 24) | 4786 (((u32) cmd->cmnd[11]) << 16) | 4787 (((u32) cmd->cmnd[12]) << 8) | 4788 cmd->cmnd[13]; 4789 break; 4790 default: 4791 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ 4792 } 4793 last_block = first_block + block_cnt - 1; 4794 4795 /* check for write to non-RAID-0 */ 4796 if (is_write && dev->raid_level != 0) 4797 return IO_ACCEL_INELIGIBLE; 4798 4799 /* check for invalid block or wraparound */ 4800 if (last_block >= le64_to_cpu(map->volume_blk_cnt) || 4801 last_block < first_block) 4802 return IO_ACCEL_INELIGIBLE; 4803 4804 /* calculate stripe information for the request */ 4805 blocks_per_row = le16_to_cpu(map->data_disks_per_row) * 4806 le16_to_cpu(map->strip_size); 4807 strip_size = le16_to_cpu(map->strip_size); 4808 #if BITS_PER_LONG == 32 4809 tmpdiv = first_block; 4810 (void) do_div(tmpdiv, blocks_per_row); 4811 first_row = tmpdiv; 4812 tmpdiv = last_block; 4813 (void) do_div(tmpdiv, blocks_per_row); 4814 last_row = tmpdiv; 4815 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 4816 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 4817 tmpdiv = first_row_offset; 4818 (void) do_div(tmpdiv, strip_size); 4819 first_column = tmpdiv; 4820 tmpdiv = last_row_offset; 4821 (void) do_div(tmpdiv, strip_size); 4822 last_column = tmpdiv; 4823 #else 4824 first_row = first_block / blocks_per_row; 4825 last_row = last_block / blocks_per_row; 4826 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 4827 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 4828 first_column = first_row_offset / strip_size; 4829 last_column = last_row_offset / strip_size; 4830 #endif 4831 4832 /* if this isn't a single row/column then give to the controller */ 4833 if ((first_row != last_row) || (first_column != last_column)) 4834 return IO_ACCEL_INELIGIBLE; 4835 4836 /* proceeding with driver mapping */ 4837 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + 4838 le16_to_cpu(map->metadata_disks_per_row); 4839 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 4840 le16_to_cpu(map->row_cnt); 4841 map_index = (map_row * total_disks_per_row) + first_column; 4842 4843 switch (dev->raid_level) { 4844 case HPSA_RAID_0: 4845 break; /* nothing special to do */ 4846 case HPSA_RAID_1: 4847 /* Handles load balance across RAID 1 members. 4848 * (2-drive R1 and R10 with even # of drives.) 4849 * Appropriate for SSDs, not optimal for HDDs 4850 */ 4851 BUG_ON(le16_to_cpu(map->layout_map_count) != 2); 4852 if (dev->offload_to_mirror) 4853 map_index += le16_to_cpu(map->data_disks_per_row); 4854 dev->offload_to_mirror = !dev->offload_to_mirror; 4855 break; 4856 case HPSA_RAID_ADM: 4857 /* Handles N-way mirrors (R1-ADM) 4858 * and R10 with # of drives divisible by 3.) 4859 */ 4860 BUG_ON(le16_to_cpu(map->layout_map_count) != 3); 4861 4862 offload_to_mirror = dev->offload_to_mirror; 4863 raid_map_helper(map, offload_to_mirror, 4864 &map_index, ¤t_group); 4865 /* set mirror group to use next time */ 4866 offload_to_mirror = 4867 (offload_to_mirror >= 4868 le16_to_cpu(map->layout_map_count) - 1) 4869 ? 0 : offload_to_mirror + 1; 4870 dev->offload_to_mirror = offload_to_mirror; 4871 /* Avoid direct use of dev->offload_to_mirror within this 4872 * function since multiple threads might simultaneously 4873 * increment it beyond the range of dev->layout_map_count -1. 4874 */ 4875 break; 4876 case HPSA_RAID_5: 4877 case HPSA_RAID_6: 4878 if (le16_to_cpu(map->layout_map_count) <= 1) 4879 break; 4880 4881 /* Verify first and last block are in same RAID group */ 4882 r5or6_blocks_per_row = 4883 le16_to_cpu(map->strip_size) * 4884 le16_to_cpu(map->data_disks_per_row); 4885 BUG_ON(r5or6_blocks_per_row == 0); 4886 stripesize = r5or6_blocks_per_row * 4887 le16_to_cpu(map->layout_map_count); 4888 #if BITS_PER_LONG == 32 4889 tmpdiv = first_block; 4890 first_group = do_div(tmpdiv, stripesize); 4891 tmpdiv = first_group; 4892 (void) do_div(tmpdiv, r5or6_blocks_per_row); 4893 first_group = tmpdiv; 4894 tmpdiv = last_block; 4895 last_group = do_div(tmpdiv, stripesize); 4896 tmpdiv = last_group; 4897 (void) do_div(tmpdiv, r5or6_blocks_per_row); 4898 last_group = tmpdiv; 4899 #else 4900 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 4901 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 4902 #endif 4903 if (first_group != last_group) 4904 return IO_ACCEL_INELIGIBLE; 4905 4906 /* Verify request is in a single row of RAID 5/6 */ 4907 #if BITS_PER_LONG == 32 4908 tmpdiv = first_block; 4909 (void) do_div(tmpdiv, stripesize); 4910 first_row = r5or6_first_row = r0_first_row = tmpdiv; 4911 tmpdiv = last_block; 4912 (void) do_div(tmpdiv, stripesize); 4913 r5or6_last_row = r0_last_row = tmpdiv; 4914 #else 4915 first_row = r5or6_first_row = r0_first_row = 4916 first_block / stripesize; 4917 r5or6_last_row = r0_last_row = last_block / stripesize; 4918 #endif 4919 if (r5or6_first_row != r5or6_last_row) 4920 return IO_ACCEL_INELIGIBLE; 4921 4922 4923 /* Verify request is in a single column */ 4924 #if BITS_PER_LONG == 32 4925 tmpdiv = first_block; 4926 first_row_offset = do_div(tmpdiv, stripesize); 4927 tmpdiv = first_row_offset; 4928 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); 4929 r5or6_first_row_offset = first_row_offset; 4930 tmpdiv = last_block; 4931 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 4932 tmpdiv = r5or6_last_row_offset; 4933 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 4934 tmpdiv = r5or6_first_row_offset; 4935 (void) do_div(tmpdiv, map->strip_size); 4936 first_column = r5or6_first_column = tmpdiv; 4937 tmpdiv = r5or6_last_row_offset; 4938 (void) do_div(tmpdiv, map->strip_size); 4939 r5or6_last_column = tmpdiv; 4940 #else 4941 first_row_offset = r5or6_first_row_offset = 4942 (u32)((first_block % stripesize) % 4943 r5or6_blocks_per_row); 4944 4945 r5or6_last_row_offset = 4946 (u32)((last_block % stripesize) % 4947 r5or6_blocks_per_row); 4948 4949 first_column = r5or6_first_column = 4950 r5or6_first_row_offset / le16_to_cpu(map->strip_size); 4951 r5or6_last_column = 4952 r5or6_last_row_offset / le16_to_cpu(map->strip_size); 4953 #endif 4954 if (r5or6_first_column != r5or6_last_column) 4955 return IO_ACCEL_INELIGIBLE; 4956 4957 /* Request is eligible */ 4958 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 4959 le16_to_cpu(map->row_cnt); 4960 4961 map_index = (first_group * 4962 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + 4963 (map_row * total_disks_per_row) + first_column; 4964 break; 4965 default: 4966 return IO_ACCEL_INELIGIBLE; 4967 } 4968 4969 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) 4970 return IO_ACCEL_INELIGIBLE; 4971 4972 c->phys_disk = dev->phys_disk[map_index]; 4973 if (!c->phys_disk) 4974 return IO_ACCEL_INELIGIBLE; 4975 4976 disk_handle = dd[map_index].ioaccel_handle; 4977 disk_block = le64_to_cpu(map->disk_starting_blk) + 4978 first_row * le16_to_cpu(map->strip_size) + 4979 (first_row_offset - first_column * 4980 le16_to_cpu(map->strip_size)); 4981 disk_block_cnt = block_cnt; 4982 4983 /* handle differing logical/physical block sizes */ 4984 if (map->phys_blk_shift) { 4985 disk_block <<= map->phys_blk_shift; 4986 disk_block_cnt <<= map->phys_blk_shift; 4987 } 4988 BUG_ON(disk_block_cnt > 0xffff); 4989 4990 /* build the new CDB for the physical disk I/O */ 4991 if (disk_block > 0xffffffff) { 4992 cdb[0] = is_write ? WRITE_16 : READ_16; 4993 cdb[1] = 0; 4994 cdb[2] = (u8) (disk_block >> 56); 4995 cdb[3] = (u8) (disk_block >> 48); 4996 cdb[4] = (u8) (disk_block >> 40); 4997 cdb[5] = (u8) (disk_block >> 32); 4998 cdb[6] = (u8) (disk_block >> 24); 4999 cdb[7] = (u8) (disk_block >> 16); 5000 cdb[8] = (u8) (disk_block >> 8); 5001 cdb[9] = (u8) (disk_block); 5002 cdb[10] = (u8) (disk_block_cnt >> 24); 5003 cdb[11] = (u8) (disk_block_cnt >> 16); 5004 cdb[12] = (u8) (disk_block_cnt >> 8); 5005 cdb[13] = (u8) (disk_block_cnt); 5006 cdb[14] = 0; 5007 cdb[15] = 0; 5008 cdb_len = 16; 5009 } else { 5010 cdb[0] = is_write ? WRITE_10 : READ_10; 5011 cdb[1] = 0; 5012 cdb[2] = (u8) (disk_block >> 24); 5013 cdb[3] = (u8) (disk_block >> 16); 5014 cdb[4] = (u8) (disk_block >> 8); 5015 cdb[5] = (u8) (disk_block); 5016 cdb[6] = 0; 5017 cdb[7] = (u8) (disk_block_cnt >> 8); 5018 cdb[8] = (u8) (disk_block_cnt); 5019 cdb[9] = 0; 5020 cdb_len = 10; 5021 } 5022 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, 5023 dev->scsi3addr, 5024 dev->phys_disk[map_index]); 5025 } 5026 5027 /* 5028 * Submit commands down the "normal" RAID stack path 5029 * All callers to hpsa_ciss_submit must check lockup_detected 5030 * beforehand, before (opt.) and after calling cmd_alloc 5031 */ 5032 static int hpsa_ciss_submit(struct ctlr_info *h, 5033 struct CommandList *c, struct scsi_cmnd *cmd, 5034 unsigned char scsi3addr[]) 5035 { 5036 cmd->host_scribble = (unsigned char *) c; 5037 c->cmd_type = CMD_SCSI; 5038 c->scsi_cmd = cmd; 5039 c->Header.ReplyQueue = 0; /* unused in simple mode */ 5040 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 5041 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); 5042 5043 /* Fill in the request block... */ 5044 5045 c->Request.Timeout = 0; 5046 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 5047 c->Request.CDBLen = cmd->cmd_len; 5048 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 5049 switch (cmd->sc_data_direction) { 5050 case DMA_TO_DEVICE: 5051 c->Request.type_attr_dir = 5052 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); 5053 break; 5054 case DMA_FROM_DEVICE: 5055 c->Request.type_attr_dir = 5056 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); 5057 break; 5058 case DMA_NONE: 5059 c->Request.type_attr_dir = 5060 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); 5061 break; 5062 case DMA_BIDIRECTIONAL: 5063 /* This can happen if a buggy application does a scsi passthru 5064 * and sets both inlen and outlen to non-zero. ( see 5065 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 5066 */ 5067 5068 c->Request.type_attr_dir = 5069 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); 5070 /* This is technically wrong, and hpsa controllers should 5071 * reject it with CMD_INVALID, which is the most correct 5072 * response, but non-fibre backends appear to let it 5073 * slide by, and give the same results as if this field 5074 * were set correctly. Either way is acceptable for 5075 * our purposes here. 5076 */ 5077 5078 break; 5079 5080 default: 5081 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 5082 cmd->sc_data_direction); 5083 BUG(); 5084 break; 5085 } 5086 5087 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 5088 hpsa_cmd_resolve_and_free(h, c); 5089 return SCSI_MLQUEUE_HOST_BUSY; 5090 } 5091 enqueue_cmd_and_start_io(h, c); 5092 /* the cmd'll come back via intr handler in complete_scsi_command() */ 5093 return 0; 5094 } 5095 5096 static void hpsa_cmd_init(struct ctlr_info *h, int index, 5097 struct CommandList *c) 5098 { 5099 dma_addr_t cmd_dma_handle, err_dma_handle; 5100 5101 /* Zero out all of commandlist except the last field, refcount */ 5102 memset(c, 0, offsetof(struct CommandList, refcount)); 5103 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT)); 5104 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); 5105 c->err_info = h->errinfo_pool + index; 5106 memset(c->err_info, 0, sizeof(*c->err_info)); 5107 err_dma_handle = h->errinfo_pool_dhandle 5108 + index * sizeof(*c->err_info); 5109 c->cmdindex = index; 5110 c->busaddr = (u32) cmd_dma_handle; 5111 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); 5112 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); 5113 c->h = h; 5114 c->scsi_cmd = SCSI_CMD_IDLE; 5115 } 5116 5117 static void hpsa_preinitialize_commands(struct ctlr_info *h) 5118 { 5119 int i; 5120 5121 for (i = 0; i < h->nr_cmds; i++) { 5122 struct CommandList *c = h->cmd_pool + i; 5123 5124 hpsa_cmd_init(h, i, c); 5125 atomic_set(&c->refcount, 0); 5126 } 5127 } 5128 5129 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, 5130 struct CommandList *c) 5131 { 5132 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); 5133 5134 BUG_ON(c->cmdindex != index); 5135 5136 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 5137 memset(c->err_info, 0, sizeof(*c->err_info)); 5138 c->busaddr = (u32) cmd_dma_handle; 5139 } 5140 5141 static int hpsa_ioaccel_submit(struct ctlr_info *h, 5142 struct CommandList *c, struct scsi_cmnd *cmd, 5143 unsigned char *scsi3addr) 5144 { 5145 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 5146 int rc = IO_ACCEL_INELIGIBLE; 5147 5148 cmd->host_scribble = (unsigned char *) c; 5149 5150 if (dev->offload_enabled) { 5151 hpsa_cmd_init(h, c->cmdindex, c); 5152 c->cmd_type = CMD_SCSI; 5153 c->scsi_cmd = cmd; 5154 rc = hpsa_scsi_ioaccel_raid_map(h, c); 5155 if (rc < 0) /* scsi_dma_map failed. */ 5156 rc = SCSI_MLQUEUE_HOST_BUSY; 5157 } else if (dev->hba_ioaccel_enabled) { 5158 hpsa_cmd_init(h, c->cmdindex, c); 5159 c->cmd_type = CMD_SCSI; 5160 c->scsi_cmd = cmd; 5161 rc = hpsa_scsi_ioaccel_direct_map(h, c); 5162 if (rc < 0) /* scsi_dma_map failed. */ 5163 rc = SCSI_MLQUEUE_HOST_BUSY; 5164 } 5165 return rc; 5166 } 5167 5168 static void hpsa_command_resubmit_worker(struct work_struct *work) 5169 { 5170 struct scsi_cmnd *cmd; 5171 struct hpsa_scsi_dev_t *dev; 5172 struct CommandList *c = container_of(work, struct CommandList, work); 5173 5174 cmd = c->scsi_cmd; 5175 dev = cmd->device->hostdata; 5176 if (!dev) { 5177 cmd->result = DID_NO_CONNECT << 16; 5178 return hpsa_cmd_free_and_done(c->h, c, cmd); 5179 } 5180 if (c->reset_pending) 5181 return hpsa_cmd_resolve_and_free(c->h, c); 5182 if (c->abort_pending) 5183 return hpsa_cmd_abort_and_free(c->h, c, cmd); 5184 if (c->cmd_type == CMD_IOACCEL2) { 5185 struct ctlr_info *h = c->h; 5186 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 5187 int rc; 5188 5189 if (c2->error_data.serv_response == 5190 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { 5191 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); 5192 if (rc == 0) 5193 return; 5194 if (rc == SCSI_MLQUEUE_HOST_BUSY) { 5195 /* 5196 * If we get here, it means dma mapping failed. 5197 * Try again via scsi mid layer, which will 5198 * then get SCSI_MLQUEUE_HOST_BUSY. 5199 */ 5200 cmd->result = DID_IMM_RETRY << 16; 5201 return hpsa_cmd_free_and_done(h, c, cmd); 5202 } 5203 /* else, fall thru and resubmit down CISS path */ 5204 } 5205 } 5206 hpsa_cmd_partial_init(c->h, c->cmdindex, c); 5207 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { 5208 /* 5209 * If we get here, it means dma mapping failed. Try 5210 * again via scsi mid layer, which will then get 5211 * SCSI_MLQUEUE_HOST_BUSY. 5212 * 5213 * hpsa_ciss_submit will have already freed c 5214 * if it encountered a dma mapping failure. 5215 */ 5216 cmd->result = DID_IMM_RETRY << 16; 5217 cmd->scsi_done(cmd); 5218 } 5219 } 5220 5221 /* Running in struct Scsi_Host->host_lock less mode */ 5222 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 5223 { 5224 struct ctlr_info *h; 5225 struct hpsa_scsi_dev_t *dev; 5226 unsigned char scsi3addr[8]; 5227 struct CommandList *c; 5228 int rc = 0; 5229 5230 /* Get the ptr to our adapter structure out of cmd->host. */ 5231 h = sdev_to_hba(cmd->device); 5232 5233 BUG_ON(cmd->request->tag < 0); 5234 5235 dev = cmd->device->hostdata; 5236 if (!dev) { 5237 cmd->result = DID_NO_CONNECT << 16; 5238 cmd->scsi_done(cmd); 5239 return 0; 5240 } 5241 5242 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 5243 5244 if (unlikely(lockup_detected(h))) { 5245 cmd->result = DID_NO_CONNECT << 16; 5246 cmd->scsi_done(cmd); 5247 return 0; 5248 } 5249 c = cmd_tagged_alloc(h, cmd); 5250 5251 /* 5252 * Call alternate submit routine for I/O accelerated commands. 5253 * Retries always go down the normal I/O path. 5254 */ 5255 if (likely(cmd->retries == 0 && 5256 cmd->request->cmd_type == REQ_TYPE_FS && 5257 h->acciopath_status)) { 5258 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); 5259 if (rc == 0) 5260 return 0; 5261 if (rc == SCSI_MLQUEUE_HOST_BUSY) { 5262 hpsa_cmd_resolve_and_free(h, c); 5263 return SCSI_MLQUEUE_HOST_BUSY; 5264 } 5265 } 5266 return hpsa_ciss_submit(h, c, cmd, scsi3addr); 5267 } 5268 5269 static void hpsa_scan_complete(struct ctlr_info *h) 5270 { 5271 unsigned long flags; 5272 5273 spin_lock_irqsave(&h->scan_lock, flags); 5274 h->scan_finished = 1; 5275 wake_up_all(&h->scan_wait_queue); 5276 spin_unlock_irqrestore(&h->scan_lock, flags); 5277 } 5278 5279 static void hpsa_scan_start(struct Scsi_Host *sh) 5280 { 5281 struct ctlr_info *h = shost_to_hba(sh); 5282 unsigned long flags; 5283 5284 /* 5285 * Don't let rescans be initiated on a controller known to be locked 5286 * up. If the controller locks up *during* a rescan, that thread is 5287 * probably hosed, but at least we can prevent new rescan threads from 5288 * piling up on a locked up controller. 5289 */ 5290 if (unlikely(lockup_detected(h))) 5291 return hpsa_scan_complete(h); 5292 5293 /* wait until any scan already in progress is finished. */ 5294 while (1) { 5295 spin_lock_irqsave(&h->scan_lock, flags); 5296 if (h->scan_finished) 5297 break; 5298 spin_unlock_irqrestore(&h->scan_lock, flags); 5299 wait_event(h->scan_wait_queue, h->scan_finished); 5300 /* Note: We don't need to worry about a race between this 5301 * thread and driver unload because the midlayer will 5302 * have incremented the reference count, so unload won't 5303 * happen if we're in here. 5304 */ 5305 } 5306 h->scan_finished = 0; /* mark scan as in progress */ 5307 spin_unlock_irqrestore(&h->scan_lock, flags); 5308 5309 if (unlikely(lockup_detected(h))) 5310 return hpsa_scan_complete(h); 5311 5312 hpsa_update_scsi_devices(h); 5313 5314 hpsa_scan_complete(h); 5315 } 5316 5317 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) 5318 { 5319 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata; 5320 5321 if (!logical_drive) 5322 return -ENODEV; 5323 5324 if (qdepth < 1) 5325 qdepth = 1; 5326 else if (qdepth > logical_drive->queue_depth) 5327 qdepth = logical_drive->queue_depth; 5328 5329 return scsi_change_queue_depth(sdev, qdepth); 5330 } 5331 5332 static int hpsa_scan_finished(struct Scsi_Host *sh, 5333 unsigned long elapsed_time) 5334 { 5335 struct ctlr_info *h = shost_to_hba(sh); 5336 unsigned long flags; 5337 int finished; 5338 5339 spin_lock_irqsave(&h->scan_lock, flags); 5340 finished = h->scan_finished; 5341 spin_unlock_irqrestore(&h->scan_lock, flags); 5342 return finished; 5343 } 5344 5345 static int hpsa_scsi_host_alloc(struct ctlr_info *h) 5346 { 5347 struct Scsi_Host *sh; 5348 5349 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 5350 if (sh == NULL) { 5351 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); 5352 return -ENOMEM; 5353 } 5354 5355 sh->io_port = 0; 5356 sh->n_io_port = 0; 5357 sh->this_id = -1; 5358 sh->max_channel = 3; 5359 sh->max_cmd_len = MAX_COMMAND_SIZE; 5360 sh->max_lun = HPSA_MAX_LUN; 5361 sh->max_id = HPSA_MAX_LUN; 5362 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; 5363 sh->cmd_per_lun = sh->can_queue; 5364 sh->sg_tablesize = h->maxsgentries; 5365 sh->transportt = hpsa_sas_transport_template; 5366 sh->hostdata[0] = (unsigned long) h; 5367 sh->irq = h->intr[h->intr_mode]; 5368 sh->unique_id = sh->irq; 5369 5370 h->scsi_host = sh; 5371 return 0; 5372 } 5373 5374 static int hpsa_scsi_add_host(struct ctlr_info *h) 5375 { 5376 int rv; 5377 5378 rv = scsi_add_host(h->scsi_host, &h->pdev->dev); 5379 if (rv) { 5380 dev_err(&h->pdev->dev, "scsi_add_host failed\n"); 5381 return rv; 5382 } 5383 scsi_scan_host(h->scsi_host); 5384 return 0; 5385 } 5386 5387 /* 5388 * The block layer has already gone to the trouble of picking out a unique, 5389 * small-integer tag for this request. We use an offset from that value as 5390 * an index to select our command block. (The offset allows us to reserve the 5391 * low-numbered entries for our own uses.) 5392 */ 5393 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) 5394 { 5395 int idx = scmd->request->tag; 5396 5397 if (idx < 0) 5398 return idx; 5399 5400 /* Offset to leave space for internal cmds. */ 5401 return idx += HPSA_NRESERVED_CMDS; 5402 } 5403 5404 /* 5405 * Send a TEST_UNIT_READY command to the specified LUN using the specified 5406 * reply queue; returns zero if the unit is ready, and non-zero otherwise. 5407 */ 5408 static int hpsa_send_test_unit_ready(struct ctlr_info *h, 5409 struct CommandList *c, unsigned char lunaddr[], 5410 int reply_queue) 5411 { 5412 int rc; 5413 5414 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ 5415 (void) fill_cmd(c, TEST_UNIT_READY, h, 5416 NULL, 0, 0, lunaddr, TYPE_CMD); 5417 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 5418 if (rc) 5419 return rc; 5420 /* no unmap needed here because no data xfer. */ 5421 5422 /* Check if the unit is already ready. */ 5423 if (c->err_info->CommandStatus == CMD_SUCCESS) 5424 return 0; 5425 5426 /* 5427 * The first command sent after reset will receive "unit attention" to 5428 * indicate that the LUN has been reset...this is actually what we're 5429 * looking for (but, success is good too). 5430 */ 5431 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 5432 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 5433 (c->err_info->SenseInfo[2] == NO_SENSE || 5434 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 5435 return 0; 5436 5437 return 1; 5438 } 5439 5440 /* 5441 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary; 5442 * returns zero when the unit is ready, and non-zero when giving up. 5443 */ 5444 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, 5445 struct CommandList *c, 5446 unsigned char lunaddr[], int reply_queue) 5447 { 5448 int rc; 5449 int count = 0; 5450 int waittime = 1; /* seconds */ 5451 5452 /* Send test unit ready until device ready, or give up. */ 5453 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) { 5454 5455 /* 5456 * Wait for a bit. do this first, because if we send 5457 * the TUR right away, the reset will just abort it. 5458 */ 5459 msleep(1000 * waittime); 5460 5461 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); 5462 if (!rc) 5463 break; 5464 5465 /* Increase wait time with each try, up to a point. */ 5466 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 5467 waittime *= 2; 5468 5469 dev_warn(&h->pdev->dev, 5470 "waiting %d secs for device to become ready.\n", 5471 waittime); 5472 } 5473 5474 return rc; 5475 } 5476 5477 static int wait_for_device_to_become_ready(struct ctlr_info *h, 5478 unsigned char lunaddr[], 5479 int reply_queue) 5480 { 5481 int first_queue; 5482 int last_queue; 5483 int rq; 5484 int rc = 0; 5485 struct CommandList *c; 5486 5487 c = cmd_alloc(h); 5488 5489 /* 5490 * If no specific reply queue was requested, then send the TUR 5491 * repeatedly, requesting a reply on each reply queue; otherwise execute 5492 * the loop exactly once using only the specified queue. 5493 */ 5494 if (reply_queue == DEFAULT_REPLY_QUEUE) { 5495 first_queue = 0; 5496 last_queue = h->nreply_queues - 1; 5497 } else { 5498 first_queue = reply_queue; 5499 last_queue = reply_queue; 5500 } 5501 5502 for (rq = first_queue; rq <= last_queue; rq++) { 5503 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); 5504 if (rc) 5505 break; 5506 } 5507 5508 if (rc) 5509 dev_warn(&h->pdev->dev, "giving up on device.\n"); 5510 else 5511 dev_warn(&h->pdev->dev, "device is ready.\n"); 5512 5513 cmd_free(h, c); 5514 return rc; 5515 } 5516 5517 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 5518 * complaining. Doing a host- or bus-reset can't do anything good here. 5519 */ 5520 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 5521 { 5522 int rc; 5523 struct ctlr_info *h; 5524 struct hpsa_scsi_dev_t *dev; 5525 u8 reset_type; 5526 char msg[48]; 5527 5528 /* find the controller to which the command to be aborted was sent */ 5529 h = sdev_to_hba(scsicmd->device); 5530 if (h == NULL) /* paranoia */ 5531 return FAILED; 5532 5533 if (lockup_detected(h)) 5534 return FAILED; 5535 5536 dev = scsicmd->device->hostdata; 5537 if (!dev) { 5538 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); 5539 return FAILED; 5540 } 5541 5542 /* if controller locked up, we can guarantee command won't complete */ 5543 if (lockup_detected(h)) { 5544 snprintf(msg, sizeof(msg), 5545 "cmd %d RESET FAILED, lockup detected", 5546 hpsa_get_cmd_index(scsicmd)); 5547 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 5548 return FAILED; 5549 } 5550 5551 /* this reset request might be the result of a lockup; check */ 5552 if (detect_controller_lockup(h)) { 5553 snprintf(msg, sizeof(msg), 5554 "cmd %d RESET FAILED, new lockup detected", 5555 hpsa_get_cmd_index(scsicmd)); 5556 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 5557 return FAILED; 5558 } 5559 5560 /* Do not attempt on controller */ 5561 if (is_hba_lunid(dev->scsi3addr)) 5562 return SUCCESS; 5563 5564 if (is_logical_dev_addr_mode(dev->scsi3addr)) 5565 reset_type = HPSA_DEVICE_RESET_MSG; 5566 else 5567 reset_type = HPSA_PHYS_TARGET_RESET; 5568 5569 sprintf(msg, "resetting %s", 5570 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); 5571 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 5572 5573 h->reset_in_progress = 1; 5574 5575 /* send a reset to the SCSI LUN which the command was sent to */ 5576 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type, 5577 DEFAULT_REPLY_QUEUE); 5578 sprintf(msg, "reset %s %s", 5579 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ", 5580 rc == 0 ? "completed successfully" : "failed"); 5581 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 5582 h->reset_in_progress = 0; 5583 return rc == 0 ? SUCCESS : FAILED; 5584 } 5585 5586 static void swizzle_abort_tag(u8 *tag) 5587 { 5588 u8 original_tag[8]; 5589 5590 memcpy(original_tag, tag, 8); 5591 tag[0] = original_tag[3]; 5592 tag[1] = original_tag[2]; 5593 tag[2] = original_tag[1]; 5594 tag[3] = original_tag[0]; 5595 tag[4] = original_tag[7]; 5596 tag[5] = original_tag[6]; 5597 tag[6] = original_tag[5]; 5598 tag[7] = original_tag[4]; 5599 } 5600 5601 static void hpsa_get_tag(struct ctlr_info *h, 5602 struct CommandList *c, __le32 *taglower, __le32 *tagupper) 5603 { 5604 u64 tag; 5605 if (c->cmd_type == CMD_IOACCEL1) { 5606 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 5607 &h->ioaccel_cmd_pool[c->cmdindex]; 5608 tag = le64_to_cpu(cm1->tag); 5609 *tagupper = cpu_to_le32(tag >> 32); 5610 *taglower = cpu_to_le32(tag); 5611 return; 5612 } 5613 if (c->cmd_type == CMD_IOACCEL2) { 5614 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) 5615 &h->ioaccel2_cmd_pool[c->cmdindex]; 5616 /* upper tag not used in ioaccel2 mode */ 5617 memset(tagupper, 0, sizeof(*tagupper)); 5618 *taglower = cm2->Tag; 5619 return; 5620 } 5621 tag = le64_to_cpu(c->Header.tag); 5622 *tagupper = cpu_to_le32(tag >> 32); 5623 *taglower = cpu_to_le32(tag); 5624 } 5625 5626 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 5627 struct CommandList *abort, int reply_queue) 5628 { 5629 int rc = IO_OK; 5630 struct CommandList *c; 5631 struct ErrorInfo *ei; 5632 __le32 tagupper, taglower; 5633 5634 c = cmd_alloc(h); 5635 5636 /* fill_cmd can't fail here, no buffer to map */ 5637 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag, 5638 0, 0, scsi3addr, TYPE_MSG); 5639 if (h->needs_abort_tags_swizzled) 5640 swizzle_abort_tag(&c->Request.CDB[4]); 5641 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 5642 hpsa_get_tag(h, abort, &taglower, &tagupper); 5643 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", 5644 __func__, tagupper, taglower); 5645 /* no unmap needed here because no data xfer. */ 5646 5647 ei = c->err_info; 5648 switch (ei->CommandStatus) { 5649 case CMD_SUCCESS: 5650 break; 5651 case CMD_TMF_STATUS: 5652 rc = hpsa_evaluate_tmf_status(h, c); 5653 break; 5654 case CMD_UNABORTABLE: /* Very common, don't make noise. */ 5655 rc = -1; 5656 break; 5657 default: 5658 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", 5659 __func__, tagupper, taglower); 5660 hpsa_scsi_interpret_error(h, c); 5661 rc = -1; 5662 break; 5663 } 5664 cmd_free(h, c); 5665 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", 5666 __func__, tagupper, taglower); 5667 return rc; 5668 } 5669 5670 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h, 5671 struct CommandList *command_to_abort, int reply_queue) 5672 { 5673 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 5674 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; 5675 struct io_accel2_cmd *c2a = 5676 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex]; 5677 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd; 5678 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata; 5679 5680 /* 5681 * We're overlaying struct hpsa_tmf_struct on top of something which 5682 * was allocated as a struct io_accel2_cmd, so we better be sure it 5683 * actually fits, and doesn't overrun the error info space. 5684 */ 5685 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) > 5686 sizeof(struct io_accel2_cmd)); 5687 BUG_ON(offsetof(struct io_accel2_cmd, error_data) < 5688 offsetof(struct hpsa_tmf_struct, error_len) + 5689 sizeof(ac->error_len)); 5690 5691 c->cmd_type = IOACCEL2_TMF; 5692 c->scsi_cmd = SCSI_CMD_BUSY; 5693 5694 /* Adjust the DMA address to point to the accelerated command buffer */ 5695 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 5696 (c->cmdindex * sizeof(struct io_accel2_cmd)); 5697 BUG_ON(c->busaddr & 0x0000007F); 5698 5699 memset(ac, 0, sizeof(*c2)); /* yes this is correct */ 5700 ac->iu_type = IOACCEL2_IU_TMF_TYPE; 5701 ac->reply_queue = reply_queue; 5702 ac->tmf = IOACCEL2_TMF_ABORT; 5703 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle); 5704 memset(ac->lun_id, 0, sizeof(ac->lun_id)); 5705 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT); 5706 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag)); 5707 ac->error_ptr = cpu_to_le64(c->busaddr + 5708 offsetof(struct io_accel2_cmd, error_data)); 5709 ac->error_len = cpu_to_le32(sizeof(c2->error_data)); 5710 } 5711 5712 /* ioaccel2 path firmware cannot handle abort task requests. 5713 * Change abort requests to physical target reset, and send to the 5714 * address of the physical disk used for the ioaccel 2 command. 5715 * Return 0 on success (IO_OK) 5716 * -1 on failure 5717 */ 5718 5719 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, 5720 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) 5721 { 5722 int rc = IO_OK; 5723 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 5724 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ 5725 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ 5726 unsigned char *psa = &phys_scsi3addr[0]; 5727 5728 /* Get a pointer to the hpsa logical device. */ 5729 scmd = abort->scsi_cmd; 5730 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); 5731 if (dev == NULL) { 5732 dev_warn(&h->pdev->dev, 5733 "Cannot abort: no device pointer for command.\n"); 5734 return -1; /* not abortable */ 5735 } 5736 5737 if (h->raid_offload_debug > 0) 5738 dev_info(&h->pdev->dev, 5739 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 5740 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 5741 "Reset as abort", 5742 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], 5743 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); 5744 5745 if (!dev->offload_enabled) { 5746 dev_warn(&h->pdev->dev, 5747 "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); 5748 return -1; /* not abortable */ 5749 } 5750 5751 /* Incoming scsi3addr is logical addr. We need physical disk addr. */ 5752 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { 5753 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); 5754 return -1; /* not abortable */ 5755 } 5756 5757 /* send the reset */ 5758 if (h->raid_offload_debug > 0) 5759 dev_info(&h->pdev->dev, 5760 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 5761 psa[0], psa[1], psa[2], psa[3], 5762 psa[4], psa[5], psa[6], psa[7]); 5763 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue); 5764 if (rc != 0) { 5765 dev_warn(&h->pdev->dev, 5766 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 5767 psa[0], psa[1], psa[2], psa[3], 5768 psa[4], psa[5], psa[6], psa[7]); 5769 return rc; /* failed to reset */ 5770 } 5771 5772 /* wait for device to recover */ 5773 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) { 5774 dev_warn(&h->pdev->dev, 5775 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 5776 psa[0], psa[1], psa[2], psa[3], 5777 psa[4], psa[5], psa[6], psa[7]); 5778 return -1; /* failed to recover */ 5779 } 5780 5781 /* device recovered */ 5782 dev_info(&h->pdev->dev, 5783 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 5784 psa[0], psa[1], psa[2], psa[3], 5785 psa[4], psa[5], psa[6], psa[7]); 5786 5787 return rc; /* success */ 5788 } 5789 5790 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, 5791 struct CommandList *abort, int reply_queue) 5792 { 5793 int rc = IO_OK; 5794 struct CommandList *c; 5795 __le32 taglower, tagupper; 5796 struct hpsa_scsi_dev_t *dev; 5797 struct io_accel2_cmd *c2; 5798 5799 dev = abort->scsi_cmd->device->hostdata; 5800 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled) 5801 return -1; 5802 5803 c = cmd_alloc(h); 5804 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); 5805 c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 5806 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 5807 hpsa_get_tag(h, abort, &taglower, &tagupper); 5808 dev_dbg(&h->pdev->dev, 5809 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", 5810 __func__, tagupper, taglower); 5811 /* no unmap needed here because no data xfer. */ 5812 5813 dev_dbg(&h->pdev->dev, 5814 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n", 5815 __func__, tagupper, taglower, c2->error_data.serv_response); 5816 switch (c2->error_data.serv_response) { 5817 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 5818 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 5819 rc = 0; 5820 break; 5821 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 5822 case IOACCEL2_SERV_RESPONSE_FAILURE: 5823 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 5824 rc = -1; 5825 break; 5826 default: 5827 dev_warn(&h->pdev->dev, 5828 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n", 5829 __func__, tagupper, taglower, 5830 c2->error_data.serv_response); 5831 rc = -1; 5832 } 5833 cmd_free(h, c); 5834 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, 5835 tagupper, taglower); 5836 return rc; 5837 } 5838 5839 static int hpsa_send_abort_both_ways(struct ctlr_info *h, 5840 struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue) 5841 { 5842 /* 5843 * ioccelerator mode 2 commands should be aborted via the 5844 * accelerated path, since RAID path is unaware of these commands, 5845 * but not all underlying firmware can handle abort TMF. 5846 * Change abort to physical device reset when abort TMF is unsupported. 5847 */ 5848 if (abort->cmd_type == CMD_IOACCEL2) { 5849 if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) || 5850 dev->physical_device) 5851 return hpsa_send_abort_ioaccel2(h, abort, 5852 reply_queue); 5853 else 5854 return hpsa_send_reset_as_abort_ioaccel2(h, 5855 dev->scsi3addr, 5856 abort, reply_queue); 5857 } 5858 return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue); 5859 } 5860 5861 /* Find out which reply queue a command was meant to return on */ 5862 static int hpsa_extract_reply_queue(struct ctlr_info *h, 5863 struct CommandList *c) 5864 { 5865 if (c->cmd_type == CMD_IOACCEL2) 5866 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue; 5867 return c->Header.ReplyQueue; 5868 } 5869 5870 /* 5871 * Limit concurrency of abort commands to prevent 5872 * over-subscription of commands 5873 */ 5874 static inline int wait_for_available_abort_cmd(struct ctlr_info *h) 5875 { 5876 #define ABORT_CMD_WAIT_MSECS 5000 5877 return !wait_event_timeout(h->abort_cmd_wait_queue, 5878 atomic_dec_if_positive(&h->abort_cmds_available) >= 0, 5879 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS)); 5880 } 5881 5882 /* Send an abort for the specified command. 5883 * If the device and controller support it, 5884 * send a task abort request. 5885 */ 5886 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) 5887 { 5888 5889 int rc; 5890 struct ctlr_info *h; 5891 struct hpsa_scsi_dev_t *dev; 5892 struct CommandList *abort; /* pointer to command to be aborted */ 5893 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ 5894 char msg[256]; /* For debug messaging. */ 5895 int ml = 0; 5896 __le32 tagupper, taglower; 5897 int refcount, reply_queue; 5898 5899 if (sc == NULL) 5900 return FAILED; 5901 5902 if (sc->device == NULL) 5903 return FAILED; 5904 5905 /* Find the controller of the command to be aborted */ 5906 h = sdev_to_hba(sc->device); 5907 if (h == NULL) 5908 return FAILED; 5909 5910 /* Find the device of the command to be aborted */ 5911 dev = sc->device->hostdata; 5912 if (!dev) { 5913 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", 5914 msg); 5915 return FAILED; 5916 } 5917 5918 /* If controller locked up, we can guarantee command won't complete */ 5919 if (lockup_detected(h)) { 5920 hpsa_show_dev_msg(KERN_WARNING, h, dev, 5921 "ABORT FAILED, lockup detected"); 5922 return FAILED; 5923 } 5924 5925 /* This is a good time to check if controller lockup has occurred */ 5926 if (detect_controller_lockup(h)) { 5927 hpsa_show_dev_msg(KERN_WARNING, h, dev, 5928 "ABORT FAILED, new lockup detected"); 5929 return FAILED; 5930 } 5931 5932 /* Check that controller supports some kind of task abort */ 5933 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && 5934 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 5935 return FAILED; 5936 5937 memset(msg, 0, sizeof(msg)); 5938 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p", 5939 h->scsi_host->host_no, sc->device->channel, 5940 sc->device->id, sc->device->lun, 5941 "Aborting command", sc); 5942 5943 /* Get SCSI command to be aborted */ 5944 abort = (struct CommandList *) sc->host_scribble; 5945 if (abort == NULL) { 5946 /* This can happen if the command already completed. */ 5947 return SUCCESS; 5948 } 5949 refcount = atomic_inc_return(&abort->refcount); 5950 if (refcount == 1) { /* Command is done already. */ 5951 cmd_free(h, abort); 5952 return SUCCESS; 5953 } 5954 5955 /* Don't bother trying the abort if we know it won't work. */ 5956 if (abort->cmd_type != CMD_IOACCEL2 && 5957 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) { 5958 cmd_free(h, abort); 5959 return FAILED; 5960 } 5961 5962 /* 5963 * Check that we're aborting the right command. 5964 * It's possible the CommandList already completed and got re-used. 5965 */ 5966 if (abort->scsi_cmd != sc) { 5967 cmd_free(h, abort); 5968 return SUCCESS; 5969 } 5970 5971 abort->abort_pending = true; 5972 hpsa_get_tag(h, abort, &taglower, &tagupper); 5973 reply_queue = hpsa_extract_reply_queue(h, abort); 5974 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); 5975 as = abort->scsi_cmd; 5976 if (as != NULL) 5977 ml += sprintf(msg+ml, 5978 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ", 5979 as->cmd_len, as->cmnd[0], as->cmnd[1], 5980 as->serial_number); 5981 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg); 5982 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command"); 5983 5984 /* 5985 * Command is in flight, or possibly already completed 5986 * by the firmware (but not to the scsi mid layer) but we can't 5987 * distinguish which. Send the abort down. 5988 */ 5989 if (wait_for_available_abort_cmd(h)) { 5990 dev_warn(&h->pdev->dev, 5991 "%s FAILED, timeout waiting for an abort command to become available.\n", 5992 msg); 5993 cmd_free(h, abort); 5994 return FAILED; 5995 } 5996 rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue); 5997 atomic_inc(&h->abort_cmds_available); 5998 wake_up_all(&h->abort_cmd_wait_queue); 5999 if (rc != 0) { 6000 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg); 6001 hpsa_show_dev_msg(KERN_WARNING, h, dev, 6002 "FAILED to abort command"); 6003 cmd_free(h, abort); 6004 return FAILED; 6005 } 6006 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg); 6007 wait_event(h->event_sync_wait_queue, 6008 abort->scsi_cmd != sc || lockup_detected(h)); 6009 cmd_free(h, abort); 6010 return !lockup_detected(h) ? SUCCESS : FAILED; 6011 } 6012 6013 /* 6014 * For operations with an associated SCSI command, a command block is allocated 6015 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the 6016 * block request tag as an index into a table of entries. cmd_tagged_free() is 6017 * the complement, although cmd_free() may be called instead. 6018 */ 6019 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, 6020 struct scsi_cmnd *scmd) 6021 { 6022 int idx = hpsa_get_cmd_index(scmd); 6023 struct CommandList *c = h->cmd_pool + idx; 6024 6025 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { 6026 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n", 6027 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); 6028 /* The index value comes from the block layer, so if it's out of 6029 * bounds, it's probably not our bug. 6030 */ 6031 BUG(); 6032 } 6033 6034 atomic_inc(&c->refcount); 6035 if (unlikely(!hpsa_is_cmd_idle(c))) { 6036 /* 6037 * We expect that the SCSI layer will hand us a unique tag 6038 * value. Thus, there should never be a collision here between 6039 * two requests...because if the selected command isn't idle 6040 * then someone is going to be very disappointed. 6041 */ 6042 dev_err(&h->pdev->dev, 6043 "tag collision (tag=%d) in cmd_tagged_alloc().\n", 6044 idx); 6045 if (c->scsi_cmd != NULL) 6046 scsi_print_command(c->scsi_cmd); 6047 scsi_print_command(scmd); 6048 } 6049 6050 hpsa_cmd_partial_init(h, idx, c); 6051 return c; 6052 } 6053 6054 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) 6055 { 6056 /* 6057 * Release our reference to the block. We don't need to do anything 6058 * else to free it, because it is accessed by index. (There's no point 6059 * in checking the result of the decrement, since we cannot guarantee 6060 * that there isn't a concurrent abort which is also accessing it.) 6061 */ 6062 (void)atomic_dec(&c->refcount); 6063 } 6064 6065 /* 6066 * For operations that cannot sleep, a command block is allocated at init, 6067 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 6068 * which ones are free or in use. Lock must be held when calling this. 6069 * cmd_free() is the complement. 6070 * This function never gives up and returns NULL. If it hangs, 6071 * another thread must call cmd_free() to free some tags. 6072 */ 6073 6074 static struct CommandList *cmd_alloc(struct ctlr_info *h) 6075 { 6076 struct CommandList *c; 6077 int refcount, i; 6078 int offset = 0; 6079 6080 /* 6081 * There is some *extremely* small but non-zero chance that that 6082 * multiple threads could get in here, and one thread could 6083 * be scanning through the list of bits looking for a free 6084 * one, but the free ones are always behind him, and other 6085 * threads sneak in behind him and eat them before he can 6086 * get to them, so that while there is always a free one, a 6087 * very unlucky thread might be starved anyway, never able to 6088 * beat the other threads. In reality, this happens so 6089 * infrequently as to be indistinguishable from never. 6090 * 6091 * Note that we start allocating commands before the SCSI host structure 6092 * is initialized. Since the search starts at bit zero, this 6093 * all works, since we have at least one command structure available; 6094 * however, it means that the structures with the low indexes have to be 6095 * reserved for driver-initiated requests, while requests from the block 6096 * layer will use the higher indexes. 6097 */ 6098 6099 for (;;) { 6100 i = find_next_zero_bit(h->cmd_pool_bits, 6101 HPSA_NRESERVED_CMDS, 6102 offset); 6103 if (unlikely(i >= HPSA_NRESERVED_CMDS)) { 6104 offset = 0; 6105 continue; 6106 } 6107 c = h->cmd_pool + i; 6108 refcount = atomic_inc_return(&c->refcount); 6109 if (unlikely(refcount > 1)) { 6110 cmd_free(h, c); /* already in use */ 6111 offset = (i + 1) % HPSA_NRESERVED_CMDS; 6112 continue; 6113 } 6114 set_bit(i & (BITS_PER_LONG - 1), 6115 h->cmd_pool_bits + (i / BITS_PER_LONG)); 6116 break; /* it's ours now. */ 6117 } 6118 hpsa_cmd_partial_init(h, i, c); 6119 return c; 6120 } 6121 6122 /* 6123 * This is the complementary operation to cmd_alloc(). Note, however, in some 6124 * corner cases it may also be used to free blocks allocated by 6125 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and 6126 * the clear-bit is harmless. 6127 */ 6128 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 6129 { 6130 if (atomic_dec_and_test(&c->refcount)) { 6131 int i; 6132 6133 i = c - h->cmd_pool; 6134 clear_bit(i & (BITS_PER_LONG - 1), 6135 h->cmd_pool_bits + (i / BITS_PER_LONG)); 6136 } 6137 } 6138 6139 #ifdef CONFIG_COMPAT 6140 6141 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, 6142 void __user *arg) 6143 { 6144 IOCTL32_Command_struct __user *arg32 = 6145 (IOCTL32_Command_struct __user *) arg; 6146 IOCTL_Command_struct arg64; 6147 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 6148 int err; 6149 u32 cp; 6150 6151 memset(&arg64, 0, sizeof(arg64)); 6152 err = 0; 6153 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 6154 sizeof(arg64.LUN_info)); 6155 err |= copy_from_user(&arg64.Request, &arg32->Request, 6156 sizeof(arg64.Request)); 6157 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 6158 sizeof(arg64.error_info)); 6159 err |= get_user(arg64.buf_size, &arg32->buf_size); 6160 err |= get_user(cp, &arg32->buf); 6161 arg64.buf = compat_ptr(cp); 6162 err |= copy_to_user(p, &arg64, sizeof(arg64)); 6163 6164 if (err) 6165 return -EFAULT; 6166 6167 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p); 6168 if (err) 6169 return err; 6170 err |= copy_in_user(&arg32->error_info, &p->error_info, 6171 sizeof(arg32->error_info)); 6172 if (err) 6173 return -EFAULT; 6174 return err; 6175 } 6176 6177 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 6178 int cmd, void __user *arg) 6179 { 6180 BIG_IOCTL32_Command_struct __user *arg32 = 6181 (BIG_IOCTL32_Command_struct __user *) arg; 6182 BIG_IOCTL_Command_struct arg64; 6183 BIG_IOCTL_Command_struct __user *p = 6184 compat_alloc_user_space(sizeof(arg64)); 6185 int err; 6186 u32 cp; 6187 6188 memset(&arg64, 0, sizeof(arg64)); 6189 err = 0; 6190 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 6191 sizeof(arg64.LUN_info)); 6192 err |= copy_from_user(&arg64.Request, &arg32->Request, 6193 sizeof(arg64.Request)); 6194 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 6195 sizeof(arg64.error_info)); 6196 err |= get_user(arg64.buf_size, &arg32->buf_size); 6197 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 6198 err |= get_user(cp, &arg32->buf); 6199 arg64.buf = compat_ptr(cp); 6200 err |= copy_to_user(p, &arg64, sizeof(arg64)); 6201 6202 if (err) 6203 return -EFAULT; 6204 6205 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p); 6206 if (err) 6207 return err; 6208 err |= copy_in_user(&arg32->error_info, &p->error_info, 6209 sizeof(arg32->error_info)); 6210 if (err) 6211 return -EFAULT; 6212 return err; 6213 } 6214 6215 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 6216 { 6217 switch (cmd) { 6218 case CCISS_GETPCIINFO: 6219 case CCISS_GETINTINFO: 6220 case CCISS_SETINTINFO: 6221 case CCISS_GETNODENAME: 6222 case CCISS_SETNODENAME: 6223 case CCISS_GETHEARTBEAT: 6224 case CCISS_GETBUSTYPES: 6225 case CCISS_GETFIRMVER: 6226 case CCISS_GETDRIVVER: 6227 case CCISS_REVALIDVOLS: 6228 case CCISS_DEREGDISK: 6229 case CCISS_REGNEWDISK: 6230 case CCISS_REGNEWD: 6231 case CCISS_RESCANDISK: 6232 case CCISS_GETLUNINFO: 6233 return hpsa_ioctl(dev, cmd, arg); 6234 6235 case CCISS_PASSTHRU32: 6236 return hpsa_ioctl32_passthru(dev, cmd, arg); 6237 case CCISS_BIG_PASSTHRU32: 6238 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 6239 6240 default: 6241 return -ENOIOCTLCMD; 6242 } 6243 } 6244 #endif 6245 6246 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 6247 { 6248 struct hpsa_pci_info pciinfo; 6249 6250 if (!argp) 6251 return -EINVAL; 6252 pciinfo.domain = pci_domain_nr(h->pdev->bus); 6253 pciinfo.bus = h->pdev->bus->number; 6254 pciinfo.dev_fn = h->pdev->devfn; 6255 pciinfo.board_id = h->board_id; 6256 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 6257 return -EFAULT; 6258 return 0; 6259 } 6260 6261 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 6262 { 6263 DriverVer_type DriverVer; 6264 unsigned char vmaj, vmin, vsubmin; 6265 int rc; 6266 6267 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 6268 &vmaj, &vmin, &vsubmin); 6269 if (rc != 3) { 6270 dev_info(&h->pdev->dev, "driver version string '%s' " 6271 "unrecognized.", HPSA_DRIVER_VERSION); 6272 vmaj = 0; 6273 vmin = 0; 6274 vsubmin = 0; 6275 } 6276 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 6277 if (!argp) 6278 return -EINVAL; 6279 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 6280 return -EFAULT; 6281 return 0; 6282 } 6283 6284 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 6285 { 6286 IOCTL_Command_struct iocommand; 6287 struct CommandList *c; 6288 char *buff = NULL; 6289 u64 temp64; 6290 int rc = 0; 6291 6292 if (!argp) 6293 return -EINVAL; 6294 if (!capable(CAP_SYS_RAWIO)) 6295 return -EPERM; 6296 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 6297 return -EFAULT; 6298 if ((iocommand.buf_size < 1) && 6299 (iocommand.Request.Type.Direction != XFER_NONE)) { 6300 return -EINVAL; 6301 } 6302 if (iocommand.buf_size > 0) { 6303 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 6304 if (buff == NULL) 6305 return -ENOMEM; 6306 if (iocommand.Request.Type.Direction & XFER_WRITE) { 6307 /* Copy the data into the buffer we created */ 6308 if (copy_from_user(buff, iocommand.buf, 6309 iocommand.buf_size)) { 6310 rc = -EFAULT; 6311 goto out_kfree; 6312 } 6313 } else { 6314 memset(buff, 0, iocommand.buf_size); 6315 } 6316 } 6317 c = cmd_alloc(h); 6318 6319 /* Fill in the command type */ 6320 c->cmd_type = CMD_IOCTL_PEND; 6321 c->scsi_cmd = SCSI_CMD_BUSY; 6322 /* Fill in Command Header */ 6323 c->Header.ReplyQueue = 0; /* unused in simple mode */ 6324 if (iocommand.buf_size > 0) { /* buffer to fill */ 6325 c->Header.SGList = 1; 6326 c->Header.SGTotal = cpu_to_le16(1); 6327 } else { /* no buffers to fill */ 6328 c->Header.SGList = 0; 6329 c->Header.SGTotal = cpu_to_le16(0); 6330 } 6331 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 6332 6333 /* Fill in Request block */ 6334 memcpy(&c->Request, &iocommand.Request, 6335 sizeof(c->Request)); 6336 6337 /* Fill in the scatter gather information */ 6338 if (iocommand.buf_size > 0) { 6339 temp64 = pci_map_single(h->pdev, buff, 6340 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 6341 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { 6342 c->SG[0].Addr = cpu_to_le64(0); 6343 c->SG[0].Len = cpu_to_le32(0); 6344 rc = -ENOMEM; 6345 goto out; 6346 } 6347 c->SG[0].Addr = cpu_to_le64(temp64); 6348 c->SG[0].Len = cpu_to_le32(iocommand.buf_size); 6349 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ 6350 } 6351 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); 6352 if (iocommand.buf_size > 0) 6353 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 6354 check_ioctl_unit_attention(h, c); 6355 if (rc) { 6356 rc = -EIO; 6357 goto out; 6358 } 6359 6360 /* Copy the error information out */ 6361 memcpy(&iocommand.error_info, c->err_info, 6362 sizeof(iocommand.error_info)); 6363 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 6364 rc = -EFAULT; 6365 goto out; 6366 } 6367 if ((iocommand.Request.Type.Direction & XFER_READ) && 6368 iocommand.buf_size > 0) { 6369 /* Copy the data out of the buffer we created */ 6370 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 6371 rc = -EFAULT; 6372 goto out; 6373 } 6374 } 6375 out: 6376 cmd_free(h, c); 6377 out_kfree: 6378 kfree(buff); 6379 return rc; 6380 } 6381 6382 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 6383 { 6384 BIG_IOCTL_Command_struct *ioc; 6385 struct CommandList *c; 6386 unsigned char **buff = NULL; 6387 int *buff_size = NULL; 6388 u64 temp64; 6389 BYTE sg_used = 0; 6390 int status = 0; 6391 u32 left; 6392 u32 sz; 6393 BYTE __user *data_ptr; 6394 6395 if (!argp) 6396 return -EINVAL; 6397 if (!capable(CAP_SYS_RAWIO)) 6398 return -EPERM; 6399 ioc = (BIG_IOCTL_Command_struct *) 6400 kmalloc(sizeof(*ioc), GFP_KERNEL); 6401 if (!ioc) { 6402 status = -ENOMEM; 6403 goto cleanup1; 6404 } 6405 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 6406 status = -EFAULT; 6407 goto cleanup1; 6408 } 6409 if ((ioc->buf_size < 1) && 6410 (ioc->Request.Type.Direction != XFER_NONE)) { 6411 status = -EINVAL; 6412 goto cleanup1; 6413 } 6414 /* Check kmalloc limits using all SGs */ 6415 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 6416 status = -EINVAL; 6417 goto cleanup1; 6418 } 6419 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { 6420 status = -EINVAL; 6421 goto cleanup1; 6422 } 6423 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL); 6424 if (!buff) { 6425 status = -ENOMEM; 6426 goto cleanup1; 6427 } 6428 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL); 6429 if (!buff_size) { 6430 status = -ENOMEM; 6431 goto cleanup1; 6432 } 6433 left = ioc->buf_size; 6434 data_ptr = ioc->buf; 6435 while (left) { 6436 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 6437 buff_size[sg_used] = sz; 6438 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 6439 if (buff[sg_used] == NULL) { 6440 status = -ENOMEM; 6441 goto cleanup1; 6442 } 6443 if (ioc->Request.Type.Direction & XFER_WRITE) { 6444 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 6445 status = -EFAULT; 6446 goto cleanup1; 6447 } 6448 } else 6449 memset(buff[sg_used], 0, sz); 6450 left -= sz; 6451 data_ptr += sz; 6452 sg_used++; 6453 } 6454 c = cmd_alloc(h); 6455 6456 c->cmd_type = CMD_IOCTL_PEND; 6457 c->scsi_cmd = SCSI_CMD_BUSY; 6458 c->Header.ReplyQueue = 0; 6459 c->Header.SGList = (u8) sg_used; 6460 c->Header.SGTotal = cpu_to_le16(sg_used); 6461 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 6462 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 6463 if (ioc->buf_size > 0) { 6464 int i; 6465 for (i = 0; i < sg_used; i++) { 6466 temp64 = pci_map_single(h->pdev, buff[i], 6467 buff_size[i], PCI_DMA_BIDIRECTIONAL); 6468 if (dma_mapping_error(&h->pdev->dev, 6469 (dma_addr_t) temp64)) { 6470 c->SG[i].Addr = cpu_to_le64(0); 6471 c->SG[i].Len = cpu_to_le32(0); 6472 hpsa_pci_unmap(h->pdev, c, i, 6473 PCI_DMA_BIDIRECTIONAL); 6474 status = -ENOMEM; 6475 goto cleanup0; 6476 } 6477 c->SG[i].Addr = cpu_to_le64(temp64); 6478 c->SG[i].Len = cpu_to_le32(buff_size[i]); 6479 c->SG[i].Ext = cpu_to_le32(0); 6480 } 6481 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); 6482 } 6483 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); 6484 if (sg_used) 6485 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 6486 check_ioctl_unit_attention(h, c); 6487 if (status) { 6488 status = -EIO; 6489 goto cleanup0; 6490 } 6491 6492 /* Copy the error information out */ 6493 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 6494 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 6495 status = -EFAULT; 6496 goto cleanup0; 6497 } 6498 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { 6499 int i; 6500 6501 /* Copy the data out of the buffer we created */ 6502 BYTE __user *ptr = ioc->buf; 6503 for (i = 0; i < sg_used; i++) { 6504 if (copy_to_user(ptr, buff[i], buff_size[i])) { 6505 status = -EFAULT; 6506 goto cleanup0; 6507 } 6508 ptr += buff_size[i]; 6509 } 6510 } 6511 status = 0; 6512 cleanup0: 6513 cmd_free(h, c); 6514 cleanup1: 6515 if (buff) { 6516 int i; 6517 6518 for (i = 0; i < sg_used; i++) 6519 kfree(buff[i]); 6520 kfree(buff); 6521 } 6522 kfree(buff_size); 6523 kfree(ioc); 6524 return status; 6525 } 6526 6527 static void check_ioctl_unit_attention(struct ctlr_info *h, 6528 struct CommandList *c) 6529 { 6530 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 6531 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 6532 (void) check_for_unit_attention(h, c); 6533 } 6534 6535 /* 6536 * ioctl 6537 */ 6538 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 6539 { 6540 struct ctlr_info *h; 6541 void __user *argp = (void __user *)arg; 6542 int rc; 6543 6544 h = sdev_to_hba(dev); 6545 6546 switch (cmd) { 6547 case CCISS_DEREGDISK: 6548 case CCISS_REGNEWDISK: 6549 case CCISS_REGNEWD: 6550 hpsa_scan_start(h->scsi_host); 6551 return 0; 6552 case CCISS_GETPCIINFO: 6553 return hpsa_getpciinfo_ioctl(h, argp); 6554 case CCISS_GETDRIVVER: 6555 return hpsa_getdrivver_ioctl(h, argp); 6556 case CCISS_PASSTHRU: 6557 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) 6558 return -EAGAIN; 6559 rc = hpsa_passthru_ioctl(h, argp); 6560 atomic_inc(&h->passthru_cmds_avail); 6561 return rc; 6562 case CCISS_BIG_PASSTHRU: 6563 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) 6564 return -EAGAIN; 6565 rc = hpsa_big_passthru_ioctl(h, argp); 6566 atomic_inc(&h->passthru_cmds_avail); 6567 return rc; 6568 default: 6569 return -ENOTTY; 6570 } 6571 } 6572 6573 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, 6574 u8 reset_type) 6575 { 6576 struct CommandList *c; 6577 6578 c = cmd_alloc(h); 6579 6580 /* fill_cmd can't fail here, no data buffer to map */ 6581 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 6582 RAID_CTLR_LUNID, TYPE_MSG); 6583 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 6584 c->waiting = NULL; 6585 enqueue_cmd_and_start_io(h, c); 6586 /* Don't wait for completion, the reset won't complete. Don't free 6587 * the command either. This is the last command we will send before 6588 * re-initializing everything, so it doesn't matter and won't leak. 6589 */ 6590 return; 6591 } 6592 6593 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 6594 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 6595 int cmd_type) 6596 { 6597 int pci_dir = XFER_NONE; 6598 u64 tag; /* for commands to be aborted */ 6599 6600 c->cmd_type = CMD_IOCTL_PEND; 6601 c->scsi_cmd = SCSI_CMD_BUSY; 6602 c->Header.ReplyQueue = 0; 6603 if (buff != NULL && size > 0) { 6604 c->Header.SGList = 1; 6605 c->Header.SGTotal = cpu_to_le16(1); 6606 } else { 6607 c->Header.SGList = 0; 6608 c->Header.SGTotal = cpu_to_le16(0); 6609 } 6610 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 6611 6612 if (cmd_type == TYPE_CMD) { 6613 switch (cmd) { 6614 case HPSA_INQUIRY: 6615 /* are we trying to read a vital product page */ 6616 if (page_code & VPD_PAGE) { 6617 c->Request.CDB[1] = 0x01; 6618 c->Request.CDB[2] = (page_code & 0xff); 6619 } 6620 c->Request.CDBLen = 6; 6621 c->Request.type_attr_dir = 6622 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6623 c->Request.Timeout = 0; 6624 c->Request.CDB[0] = HPSA_INQUIRY; 6625 c->Request.CDB[4] = size & 0xFF; 6626 break; 6627 case HPSA_REPORT_LOG: 6628 case HPSA_REPORT_PHYS: 6629 /* Talking to controller so It's a physical command 6630 mode = 00 target = 0. Nothing to write. 6631 */ 6632 c->Request.CDBLen = 12; 6633 c->Request.type_attr_dir = 6634 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6635 c->Request.Timeout = 0; 6636 c->Request.CDB[0] = cmd; 6637 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 6638 c->Request.CDB[7] = (size >> 16) & 0xFF; 6639 c->Request.CDB[8] = (size >> 8) & 0xFF; 6640 c->Request.CDB[9] = size & 0xFF; 6641 break; 6642 case BMIC_SENSE_DIAG_OPTIONS: 6643 c->Request.CDBLen = 16; 6644 c->Request.type_attr_dir = 6645 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6646 c->Request.Timeout = 0; 6647 /* Spec says this should be BMIC_WRITE */ 6648 c->Request.CDB[0] = BMIC_READ; 6649 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS; 6650 break; 6651 case BMIC_SET_DIAG_OPTIONS: 6652 c->Request.CDBLen = 16; 6653 c->Request.type_attr_dir = 6654 TYPE_ATTR_DIR(cmd_type, 6655 ATTR_SIMPLE, XFER_WRITE); 6656 c->Request.Timeout = 0; 6657 c->Request.CDB[0] = BMIC_WRITE; 6658 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS; 6659 break; 6660 case HPSA_CACHE_FLUSH: 6661 c->Request.CDBLen = 12; 6662 c->Request.type_attr_dir = 6663 TYPE_ATTR_DIR(cmd_type, 6664 ATTR_SIMPLE, XFER_WRITE); 6665 c->Request.Timeout = 0; 6666 c->Request.CDB[0] = BMIC_WRITE; 6667 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 6668 c->Request.CDB[7] = (size >> 8) & 0xFF; 6669 c->Request.CDB[8] = size & 0xFF; 6670 break; 6671 case TEST_UNIT_READY: 6672 c->Request.CDBLen = 6; 6673 c->Request.type_attr_dir = 6674 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 6675 c->Request.Timeout = 0; 6676 break; 6677 case HPSA_GET_RAID_MAP: 6678 c->Request.CDBLen = 12; 6679 c->Request.type_attr_dir = 6680 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6681 c->Request.Timeout = 0; 6682 c->Request.CDB[0] = HPSA_CISS_READ; 6683 c->Request.CDB[1] = cmd; 6684 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 6685 c->Request.CDB[7] = (size >> 16) & 0xFF; 6686 c->Request.CDB[8] = (size >> 8) & 0xFF; 6687 c->Request.CDB[9] = size & 0xFF; 6688 break; 6689 case BMIC_SENSE_CONTROLLER_PARAMETERS: 6690 c->Request.CDBLen = 10; 6691 c->Request.type_attr_dir = 6692 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6693 c->Request.Timeout = 0; 6694 c->Request.CDB[0] = BMIC_READ; 6695 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; 6696 c->Request.CDB[7] = (size >> 16) & 0xFF; 6697 c->Request.CDB[8] = (size >> 8) & 0xFF; 6698 break; 6699 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 6700 c->Request.CDBLen = 10; 6701 c->Request.type_attr_dir = 6702 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6703 c->Request.Timeout = 0; 6704 c->Request.CDB[0] = BMIC_READ; 6705 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; 6706 c->Request.CDB[7] = (size >> 16) & 0xFF; 6707 c->Request.CDB[8] = (size >> 8) & 0XFF; 6708 break; 6709 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 6710 c->Request.CDBLen = 10; 6711 c->Request.type_attr_dir = 6712 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6713 c->Request.Timeout = 0; 6714 c->Request.CDB[0] = BMIC_READ; 6715 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION; 6716 c->Request.CDB[7] = (size >> 16) & 0xFF; 6717 c->Request.CDB[8] = (size >> 8) & 0XFF; 6718 break; 6719 case BMIC_SENSE_STORAGE_BOX_PARAMS: 6720 c->Request.CDBLen = 10; 6721 c->Request.type_attr_dir = 6722 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6723 c->Request.Timeout = 0; 6724 c->Request.CDB[0] = BMIC_READ; 6725 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS; 6726 c->Request.CDB[7] = (size >> 16) & 0xFF; 6727 c->Request.CDB[8] = (size >> 8) & 0XFF; 6728 break; 6729 case BMIC_IDENTIFY_CONTROLLER: 6730 c->Request.CDBLen = 10; 6731 c->Request.type_attr_dir = 6732 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 6733 c->Request.Timeout = 0; 6734 c->Request.CDB[0] = BMIC_READ; 6735 c->Request.CDB[1] = 0; 6736 c->Request.CDB[2] = 0; 6737 c->Request.CDB[3] = 0; 6738 c->Request.CDB[4] = 0; 6739 c->Request.CDB[5] = 0; 6740 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER; 6741 c->Request.CDB[7] = (size >> 16) & 0xFF; 6742 c->Request.CDB[8] = (size >> 8) & 0XFF; 6743 c->Request.CDB[9] = 0; 6744 break; 6745 default: 6746 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 6747 BUG(); 6748 return -1; 6749 } 6750 } else if (cmd_type == TYPE_MSG) { 6751 switch (cmd) { 6752 6753 case HPSA_PHYS_TARGET_RESET: 6754 c->Request.CDBLen = 16; 6755 c->Request.type_attr_dir = 6756 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 6757 c->Request.Timeout = 0; /* Don't time out */ 6758 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 6759 c->Request.CDB[0] = HPSA_RESET; 6760 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE; 6761 /* Physical target reset needs no control bytes 4-7*/ 6762 c->Request.CDB[4] = 0x00; 6763 c->Request.CDB[5] = 0x00; 6764 c->Request.CDB[6] = 0x00; 6765 c->Request.CDB[7] = 0x00; 6766 break; 6767 case HPSA_DEVICE_RESET_MSG: 6768 c->Request.CDBLen = 16; 6769 c->Request.type_attr_dir = 6770 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 6771 c->Request.Timeout = 0; /* Don't time out */ 6772 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 6773 c->Request.CDB[0] = cmd; 6774 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; 6775 /* If bytes 4-7 are zero, it means reset the */ 6776 /* LunID device */ 6777 c->Request.CDB[4] = 0x00; 6778 c->Request.CDB[5] = 0x00; 6779 c->Request.CDB[6] = 0x00; 6780 c->Request.CDB[7] = 0x00; 6781 break; 6782 case HPSA_ABORT_MSG: 6783 memcpy(&tag, buff, sizeof(tag)); 6784 dev_dbg(&h->pdev->dev, 6785 "Abort Tag:0x%016llx using rqst Tag:0x%016llx", 6786 tag, c->Header.tag); 6787 c->Request.CDBLen = 16; 6788 c->Request.type_attr_dir = 6789 TYPE_ATTR_DIR(cmd_type, 6790 ATTR_SIMPLE, XFER_WRITE); 6791 c->Request.Timeout = 0; /* Don't time out */ 6792 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; 6793 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; 6794 c->Request.CDB[2] = 0x00; /* reserved */ 6795 c->Request.CDB[3] = 0x00; /* reserved */ 6796 /* Tag to abort goes in CDB[4]-CDB[11] */ 6797 memcpy(&c->Request.CDB[4], &tag, sizeof(tag)); 6798 c->Request.CDB[12] = 0x00; /* reserved */ 6799 c->Request.CDB[13] = 0x00; /* reserved */ 6800 c->Request.CDB[14] = 0x00; /* reserved */ 6801 c->Request.CDB[15] = 0x00; /* reserved */ 6802 break; 6803 default: 6804 dev_warn(&h->pdev->dev, "unknown message type %d\n", 6805 cmd); 6806 BUG(); 6807 } 6808 } else { 6809 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 6810 BUG(); 6811 } 6812 6813 switch (GET_DIR(c->Request.type_attr_dir)) { 6814 case XFER_READ: 6815 pci_dir = PCI_DMA_FROMDEVICE; 6816 break; 6817 case XFER_WRITE: 6818 pci_dir = PCI_DMA_TODEVICE; 6819 break; 6820 case XFER_NONE: 6821 pci_dir = PCI_DMA_NONE; 6822 break; 6823 default: 6824 pci_dir = PCI_DMA_BIDIRECTIONAL; 6825 } 6826 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) 6827 return -1; 6828 return 0; 6829 } 6830 6831 /* 6832 * Map (physical) PCI mem into (virtual) kernel space 6833 */ 6834 static void __iomem *remap_pci_mem(ulong base, ulong size) 6835 { 6836 ulong page_base = ((ulong) base) & PAGE_MASK; 6837 ulong page_offs = ((ulong) base) - page_base; 6838 void __iomem *page_remapped = ioremap_nocache(page_base, 6839 page_offs + size); 6840 6841 return page_remapped ? (page_remapped + page_offs) : NULL; 6842 } 6843 6844 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) 6845 { 6846 return h->access.command_completed(h, q); 6847 } 6848 6849 static inline bool interrupt_pending(struct ctlr_info *h) 6850 { 6851 return h->access.intr_pending(h); 6852 } 6853 6854 static inline long interrupt_not_for_us(struct ctlr_info *h) 6855 { 6856 return (h->access.intr_pending(h) == 0) || 6857 (h->interrupts_enabled == 0); 6858 } 6859 6860 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 6861 u32 raw_tag) 6862 { 6863 if (unlikely(tag_index >= h->nr_cmds)) { 6864 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 6865 return 1; 6866 } 6867 return 0; 6868 } 6869 6870 static inline void finish_cmd(struct CommandList *c) 6871 { 6872 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 6873 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 6874 || c->cmd_type == CMD_IOACCEL2)) 6875 complete_scsi_command(c); 6876 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF) 6877 complete(c->waiting); 6878 } 6879 6880 /* process completion of an indexed ("direct lookup") command */ 6881 static inline void process_indexed_cmd(struct ctlr_info *h, 6882 u32 raw_tag) 6883 { 6884 u32 tag_index; 6885 struct CommandList *c; 6886 6887 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; 6888 if (!bad_tag(h, tag_index, raw_tag)) { 6889 c = h->cmd_pool + tag_index; 6890 finish_cmd(c); 6891 } 6892 } 6893 6894 /* Some controllers, like p400, will give us one interrupt 6895 * after a soft reset, even if we turned interrupts off. 6896 * Only need to check for this in the hpsa_xxx_discard_completions 6897 * functions. 6898 */ 6899 static int ignore_bogus_interrupt(struct ctlr_info *h) 6900 { 6901 if (likely(!reset_devices)) 6902 return 0; 6903 6904 if (likely(h->interrupts_enabled)) 6905 return 0; 6906 6907 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 6908 "(known firmware bug.) Ignoring.\n"); 6909 6910 return 1; 6911 } 6912 6913 /* 6914 * Convert &h->q[x] (passed to interrupt handlers) back to h. 6915 * Relies on (h-q[x] == x) being true for x such that 6916 * 0 <= x < MAX_REPLY_QUEUES. 6917 */ 6918 static struct ctlr_info *queue_to_hba(u8 *queue) 6919 { 6920 return container_of((queue - *queue), struct ctlr_info, q[0]); 6921 } 6922 6923 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) 6924 { 6925 struct ctlr_info *h = queue_to_hba(queue); 6926 u8 q = *(u8 *) queue; 6927 u32 raw_tag; 6928 6929 if (ignore_bogus_interrupt(h)) 6930 return IRQ_NONE; 6931 6932 if (interrupt_not_for_us(h)) 6933 return IRQ_NONE; 6934 h->last_intr_timestamp = get_jiffies_64(); 6935 while (interrupt_pending(h)) { 6936 raw_tag = get_next_completion(h, q); 6937 while (raw_tag != FIFO_EMPTY) 6938 raw_tag = next_command(h, q); 6939 } 6940 return IRQ_HANDLED; 6941 } 6942 6943 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) 6944 { 6945 struct ctlr_info *h = queue_to_hba(queue); 6946 u32 raw_tag; 6947 u8 q = *(u8 *) queue; 6948 6949 if (ignore_bogus_interrupt(h)) 6950 return IRQ_NONE; 6951 6952 h->last_intr_timestamp = get_jiffies_64(); 6953 raw_tag = get_next_completion(h, q); 6954 while (raw_tag != FIFO_EMPTY) 6955 raw_tag = next_command(h, q); 6956 return IRQ_HANDLED; 6957 } 6958 6959 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) 6960 { 6961 struct ctlr_info *h = queue_to_hba((u8 *) queue); 6962 u32 raw_tag; 6963 u8 q = *(u8 *) queue; 6964 6965 if (interrupt_not_for_us(h)) 6966 return IRQ_NONE; 6967 h->last_intr_timestamp = get_jiffies_64(); 6968 while (interrupt_pending(h)) { 6969 raw_tag = get_next_completion(h, q); 6970 while (raw_tag != FIFO_EMPTY) { 6971 process_indexed_cmd(h, raw_tag); 6972 raw_tag = next_command(h, q); 6973 } 6974 } 6975 return IRQ_HANDLED; 6976 } 6977 6978 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) 6979 { 6980 struct ctlr_info *h = queue_to_hba(queue); 6981 u32 raw_tag; 6982 u8 q = *(u8 *) queue; 6983 6984 h->last_intr_timestamp = get_jiffies_64(); 6985 raw_tag = get_next_completion(h, q); 6986 while (raw_tag != FIFO_EMPTY) { 6987 process_indexed_cmd(h, raw_tag); 6988 raw_tag = next_command(h, q); 6989 } 6990 return IRQ_HANDLED; 6991 } 6992 6993 /* Send a message CDB to the firmware. Careful, this only works 6994 * in simple mode, not performant mode due to the tag lookup. 6995 * We only ever use this immediately after a controller reset. 6996 */ 6997 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 6998 unsigned char type) 6999 { 7000 struct Command { 7001 struct CommandListHeader CommandHeader; 7002 struct RequestBlock Request; 7003 struct ErrDescriptor ErrorDescriptor; 7004 }; 7005 struct Command *cmd; 7006 static const size_t cmd_sz = sizeof(*cmd) + 7007 sizeof(cmd->ErrorDescriptor); 7008 dma_addr_t paddr64; 7009 __le32 paddr32; 7010 u32 tag; 7011 void __iomem *vaddr; 7012 int i, err; 7013 7014 vaddr = pci_ioremap_bar(pdev, 0); 7015 if (vaddr == NULL) 7016 return -ENOMEM; 7017 7018 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 7019 * CCISS commands, so they must be allocated from the lower 4GiB of 7020 * memory. 7021 */ 7022 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 7023 if (err) { 7024 iounmap(vaddr); 7025 return err; 7026 } 7027 7028 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 7029 if (cmd == NULL) { 7030 iounmap(vaddr); 7031 return -ENOMEM; 7032 } 7033 7034 /* This must fit, because of the 32-bit consistent DMA mask. Also, 7035 * although there's no guarantee, we assume that the address is at 7036 * least 4-byte aligned (most likely, it's page-aligned). 7037 */ 7038 paddr32 = cpu_to_le32(paddr64); 7039 7040 cmd->CommandHeader.ReplyQueue = 0; 7041 cmd->CommandHeader.SGList = 0; 7042 cmd->CommandHeader.SGTotal = cpu_to_le16(0); 7043 cmd->CommandHeader.tag = cpu_to_le64(paddr64); 7044 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 7045 7046 cmd->Request.CDBLen = 16; 7047 cmd->Request.type_attr_dir = 7048 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); 7049 cmd->Request.Timeout = 0; /* Don't time out */ 7050 cmd->Request.CDB[0] = opcode; 7051 cmd->Request.CDB[1] = type; 7052 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 7053 cmd->ErrorDescriptor.Addr = 7054 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); 7055 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); 7056 7057 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); 7058 7059 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 7060 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 7061 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) 7062 break; 7063 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 7064 } 7065 7066 iounmap(vaddr); 7067 7068 /* we leak the DMA buffer here ... no choice since the controller could 7069 * still complete the command. 7070 */ 7071 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 7072 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 7073 opcode, type); 7074 return -ETIMEDOUT; 7075 } 7076 7077 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 7078 7079 if (tag & HPSA_ERROR_BIT) { 7080 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 7081 opcode, type); 7082 return -EIO; 7083 } 7084 7085 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 7086 opcode, type); 7087 return 0; 7088 } 7089 7090 #define hpsa_noop(p) hpsa_message(p, 3, 0) 7091 7092 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 7093 void __iomem *vaddr, u32 use_doorbell) 7094 { 7095 7096 if (use_doorbell) { 7097 /* For everything after the P600, the PCI power state method 7098 * of resetting the controller doesn't work, so we have this 7099 * other way using the doorbell register. 7100 */ 7101 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 7102 writel(use_doorbell, vaddr + SA5_DOORBELL); 7103 7104 /* PMC hardware guys tell us we need a 10 second delay after 7105 * doorbell reset and before any attempt to talk to the board 7106 * at all to ensure that this actually works and doesn't fall 7107 * over in some weird corner cases. 7108 */ 7109 msleep(10000); 7110 } else { /* Try to do it the PCI power state way */ 7111 7112 /* Quoting from the Open CISS Specification: "The Power 7113 * Management Control/Status Register (CSR) controls the power 7114 * state of the device. The normal operating state is D0, 7115 * CSR=00h. The software off state is D3, CSR=03h. To reset 7116 * the controller, place the interface device in D3 then to D0, 7117 * this causes a secondary PCI reset which will reset the 7118 * controller." */ 7119 7120 int rc = 0; 7121 7122 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 7123 7124 /* enter the D3hot power management state */ 7125 rc = pci_set_power_state(pdev, PCI_D3hot); 7126 if (rc) 7127 return rc; 7128 7129 msleep(500); 7130 7131 /* enter the D0 power management state */ 7132 rc = pci_set_power_state(pdev, PCI_D0); 7133 if (rc) 7134 return rc; 7135 7136 /* 7137 * The P600 requires a small delay when changing states. 7138 * Otherwise we may think the board did not reset and we bail. 7139 * This for kdump only and is particular to the P600. 7140 */ 7141 msleep(500); 7142 } 7143 return 0; 7144 } 7145 7146 static void init_driver_version(char *driver_version, int len) 7147 { 7148 memset(driver_version, 0, len); 7149 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); 7150 } 7151 7152 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) 7153 { 7154 char *driver_version; 7155 int i, size = sizeof(cfgtable->driver_version); 7156 7157 driver_version = kmalloc(size, GFP_KERNEL); 7158 if (!driver_version) 7159 return -ENOMEM; 7160 7161 init_driver_version(driver_version, size); 7162 for (i = 0; i < size; i++) 7163 writeb(driver_version[i], &cfgtable->driver_version[i]); 7164 kfree(driver_version); 7165 return 0; 7166 } 7167 7168 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, 7169 unsigned char *driver_ver) 7170 { 7171 int i; 7172 7173 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 7174 driver_ver[i] = readb(&cfgtable->driver_version[i]); 7175 } 7176 7177 static int controller_reset_failed(struct CfgTable __iomem *cfgtable) 7178 { 7179 7180 char *driver_ver, *old_driver_ver; 7181 int rc, size = sizeof(cfgtable->driver_version); 7182 7183 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 7184 if (!old_driver_ver) 7185 return -ENOMEM; 7186 driver_ver = old_driver_ver + size; 7187 7188 /* After a reset, the 32 bytes of "driver version" in the cfgtable 7189 * should have been changed, otherwise we know the reset failed. 7190 */ 7191 init_driver_version(old_driver_ver, size); 7192 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 7193 rc = !memcmp(driver_ver, old_driver_ver, size); 7194 kfree(old_driver_ver); 7195 return rc; 7196 } 7197 /* This does a hard reset of the controller using PCI power management 7198 * states or the using the doorbell register. 7199 */ 7200 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id) 7201 { 7202 u64 cfg_offset; 7203 u32 cfg_base_addr; 7204 u64 cfg_base_addr_index; 7205 void __iomem *vaddr; 7206 unsigned long paddr; 7207 u32 misc_fw_support; 7208 int rc; 7209 struct CfgTable __iomem *cfgtable; 7210 u32 use_doorbell; 7211 u16 command_register; 7212 7213 /* For controllers as old as the P600, this is very nearly 7214 * the same thing as 7215 * 7216 * pci_save_state(pci_dev); 7217 * pci_set_power_state(pci_dev, PCI_D3hot); 7218 * pci_set_power_state(pci_dev, PCI_D0); 7219 * pci_restore_state(pci_dev); 7220 * 7221 * For controllers newer than the P600, the pci power state 7222 * method of resetting doesn't work so we have another way 7223 * using the doorbell register. 7224 */ 7225 7226 if (!ctlr_is_resettable(board_id)) { 7227 dev_warn(&pdev->dev, "Controller not resettable\n"); 7228 return -ENODEV; 7229 } 7230 7231 /* if controller is soft- but not hard resettable... */ 7232 if (!ctlr_is_hard_resettable(board_id)) 7233 return -ENOTSUPP; /* try soft reset later. */ 7234 7235 /* Save the PCI command register */ 7236 pci_read_config_word(pdev, 4, &command_register); 7237 pci_save_state(pdev); 7238 7239 /* find the first memory BAR, so we can find the cfg table */ 7240 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 7241 if (rc) 7242 return rc; 7243 vaddr = remap_pci_mem(paddr, 0x250); 7244 if (!vaddr) 7245 return -ENOMEM; 7246 7247 /* find cfgtable in order to check if reset via doorbell is supported */ 7248 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 7249 &cfg_base_addr_index, &cfg_offset); 7250 if (rc) 7251 goto unmap_vaddr; 7252 cfgtable = remap_pci_mem(pci_resource_start(pdev, 7253 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 7254 if (!cfgtable) { 7255 rc = -ENOMEM; 7256 goto unmap_vaddr; 7257 } 7258 rc = write_driver_ver_to_cfgtable(cfgtable); 7259 if (rc) 7260 goto unmap_cfgtable; 7261 7262 /* If reset via doorbell register is supported, use that. 7263 * There are two such methods. Favor the newest method. 7264 */ 7265 misc_fw_support = readl(&cfgtable->misc_fw_support); 7266 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 7267 if (use_doorbell) { 7268 use_doorbell = DOORBELL_CTLR_RESET2; 7269 } else { 7270 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 7271 if (use_doorbell) { 7272 dev_warn(&pdev->dev, 7273 "Soft reset not supported. Firmware update is required.\n"); 7274 rc = -ENOTSUPP; /* try soft reset */ 7275 goto unmap_cfgtable; 7276 } 7277 } 7278 7279 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 7280 if (rc) 7281 goto unmap_cfgtable; 7282 7283 pci_restore_state(pdev); 7284 pci_write_config_word(pdev, 4, command_register); 7285 7286 /* Some devices (notably the HP Smart Array 5i Controller) 7287 need a little pause here */ 7288 msleep(HPSA_POST_RESET_PAUSE_MSECS); 7289 7290 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 7291 if (rc) { 7292 dev_warn(&pdev->dev, 7293 "Failed waiting for board to become ready after hard reset\n"); 7294 goto unmap_cfgtable; 7295 } 7296 7297 rc = controller_reset_failed(vaddr); 7298 if (rc < 0) 7299 goto unmap_cfgtable; 7300 if (rc) { 7301 dev_warn(&pdev->dev, "Unable to successfully reset " 7302 "controller. Will try soft reset.\n"); 7303 rc = -ENOTSUPP; 7304 } else { 7305 dev_info(&pdev->dev, "board ready after hard reset.\n"); 7306 } 7307 7308 unmap_cfgtable: 7309 iounmap(cfgtable); 7310 7311 unmap_vaddr: 7312 iounmap(vaddr); 7313 return rc; 7314 } 7315 7316 /* 7317 * We cannot read the structure directly, for portability we must use 7318 * the io functions. 7319 * This is for debug only. 7320 */ 7321 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) 7322 { 7323 #ifdef HPSA_DEBUG 7324 int i; 7325 char temp_name[17]; 7326 7327 dev_info(dev, "Controller Configuration information\n"); 7328 dev_info(dev, "------------------------------------\n"); 7329 for (i = 0; i < 4; i++) 7330 temp_name[i] = readb(&(tb->Signature[i])); 7331 temp_name[4] = '\0'; 7332 dev_info(dev, " Signature = %s\n", temp_name); 7333 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 7334 dev_info(dev, " Transport methods supported = 0x%x\n", 7335 readl(&(tb->TransportSupport))); 7336 dev_info(dev, " Transport methods active = 0x%x\n", 7337 readl(&(tb->TransportActive))); 7338 dev_info(dev, " Requested transport Method = 0x%x\n", 7339 readl(&(tb->HostWrite.TransportRequest))); 7340 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 7341 readl(&(tb->HostWrite.CoalIntDelay))); 7342 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 7343 readl(&(tb->HostWrite.CoalIntCount))); 7344 dev_info(dev, " Max outstanding commands = %d\n", 7345 readl(&(tb->CmdsOutMax))); 7346 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 7347 for (i = 0; i < 16; i++) 7348 temp_name[i] = readb(&(tb->ServerName[i])); 7349 temp_name[16] = '\0'; 7350 dev_info(dev, " Server Name = %s\n", temp_name); 7351 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 7352 readl(&(tb->HeartBeat))); 7353 #endif /* HPSA_DEBUG */ 7354 } 7355 7356 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 7357 { 7358 int i, offset, mem_type, bar_type; 7359 7360 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 7361 return 0; 7362 offset = 0; 7363 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 7364 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 7365 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 7366 offset += 4; 7367 else { 7368 mem_type = pci_resource_flags(pdev, i) & 7369 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 7370 switch (mem_type) { 7371 case PCI_BASE_ADDRESS_MEM_TYPE_32: 7372 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 7373 offset += 4; /* 32 bit */ 7374 break; 7375 case PCI_BASE_ADDRESS_MEM_TYPE_64: 7376 offset += 8; 7377 break; 7378 default: /* reserved in PCI 2.2 */ 7379 dev_warn(&pdev->dev, 7380 "base address is invalid\n"); 7381 return -1; 7382 break; 7383 } 7384 } 7385 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 7386 return i + 1; 7387 } 7388 return -1; 7389 } 7390 7391 static void hpsa_disable_interrupt_mode(struct ctlr_info *h) 7392 { 7393 if (h->msix_vector) { 7394 if (h->pdev->msix_enabled) 7395 pci_disable_msix(h->pdev); 7396 h->msix_vector = 0; 7397 } else if (h->msi_vector) { 7398 if (h->pdev->msi_enabled) 7399 pci_disable_msi(h->pdev); 7400 h->msi_vector = 0; 7401 } 7402 } 7403 7404 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 7405 * controllers that are capable. If not, we use legacy INTx mode. 7406 */ 7407 static void hpsa_interrupt_mode(struct ctlr_info *h) 7408 { 7409 #ifdef CONFIG_PCI_MSI 7410 int err, i; 7411 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES]; 7412 7413 for (i = 0; i < MAX_REPLY_QUEUES; i++) { 7414 hpsa_msix_entries[i].vector = 0; 7415 hpsa_msix_entries[i].entry = i; 7416 } 7417 7418 /* Some boards advertise MSI but don't really support it */ 7419 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 7420 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 7421 goto default_int_mode; 7422 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 7423 dev_info(&h->pdev->dev, "MSI-X capable controller\n"); 7424 h->msix_vector = MAX_REPLY_QUEUES; 7425 if (h->msix_vector > num_online_cpus()) 7426 h->msix_vector = num_online_cpus(); 7427 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, 7428 1, h->msix_vector); 7429 if (err < 0) { 7430 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); 7431 h->msix_vector = 0; 7432 goto single_msi_mode; 7433 } else if (err < h->msix_vector) { 7434 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 7435 "available\n", err); 7436 } 7437 h->msix_vector = err; 7438 for (i = 0; i < h->msix_vector; i++) 7439 h->intr[i] = hpsa_msix_entries[i].vector; 7440 return; 7441 } 7442 single_msi_mode: 7443 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 7444 dev_info(&h->pdev->dev, "MSI capable controller\n"); 7445 if (!pci_enable_msi(h->pdev)) 7446 h->msi_vector = 1; 7447 else 7448 dev_warn(&h->pdev->dev, "MSI init failed\n"); 7449 } 7450 default_int_mode: 7451 #endif /* CONFIG_PCI_MSI */ 7452 /* if we get here we're going to use the default interrupt mode */ 7453 h->intr[h->intr_mode] = h->pdev->irq; 7454 } 7455 7456 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 7457 { 7458 int i; 7459 u32 subsystem_vendor_id, subsystem_device_id; 7460 7461 subsystem_vendor_id = pdev->subsystem_vendor; 7462 subsystem_device_id = pdev->subsystem_device; 7463 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 7464 subsystem_vendor_id; 7465 7466 for (i = 0; i < ARRAY_SIZE(products); i++) 7467 if (*board_id == products[i].board_id) 7468 return i; 7469 7470 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 7471 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 7472 !hpsa_allow_any) { 7473 dev_warn(&pdev->dev, "unrecognized board ID: " 7474 "0x%08x, ignoring.\n", *board_id); 7475 return -ENODEV; 7476 } 7477 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 7478 } 7479 7480 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 7481 unsigned long *memory_bar) 7482 { 7483 int i; 7484 7485 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 7486 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 7487 /* addressing mode bits already removed */ 7488 *memory_bar = pci_resource_start(pdev, i); 7489 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 7490 *memory_bar); 7491 return 0; 7492 } 7493 dev_warn(&pdev->dev, "no memory BAR found\n"); 7494 return -ENODEV; 7495 } 7496 7497 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 7498 int wait_for_ready) 7499 { 7500 int i, iterations; 7501 u32 scratchpad; 7502 if (wait_for_ready) 7503 iterations = HPSA_BOARD_READY_ITERATIONS; 7504 else 7505 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 7506 7507 for (i = 0; i < iterations; i++) { 7508 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 7509 if (wait_for_ready) { 7510 if (scratchpad == HPSA_FIRMWARE_READY) 7511 return 0; 7512 } else { 7513 if (scratchpad != HPSA_FIRMWARE_READY) 7514 return 0; 7515 } 7516 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 7517 } 7518 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 7519 return -ENODEV; 7520 } 7521 7522 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 7523 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 7524 u64 *cfg_offset) 7525 { 7526 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 7527 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 7528 *cfg_base_addr &= (u32) 0x0000ffff; 7529 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 7530 if (*cfg_base_addr_index == -1) { 7531 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 7532 return -ENODEV; 7533 } 7534 return 0; 7535 } 7536 7537 static void hpsa_free_cfgtables(struct ctlr_info *h) 7538 { 7539 if (h->transtable) { 7540 iounmap(h->transtable); 7541 h->transtable = NULL; 7542 } 7543 if (h->cfgtable) { 7544 iounmap(h->cfgtable); 7545 h->cfgtable = NULL; 7546 } 7547 } 7548 7549 /* Find and map CISS config table and transfer table 7550 + * several items must be unmapped (freed) later 7551 + * */ 7552 static int hpsa_find_cfgtables(struct ctlr_info *h) 7553 { 7554 u64 cfg_offset; 7555 u32 cfg_base_addr; 7556 u64 cfg_base_addr_index; 7557 u32 trans_offset; 7558 int rc; 7559 7560 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 7561 &cfg_base_addr_index, &cfg_offset); 7562 if (rc) 7563 return rc; 7564 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 7565 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 7566 if (!h->cfgtable) { 7567 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); 7568 return -ENOMEM; 7569 } 7570 rc = write_driver_ver_to_cfgtable(h->cfgtable); 7571 if (rc) 7572 return rc; 7573 /* Find performant mode table. */ 7574 trans_offset = readl(&h->cfgtable->TransMethodOffset); 7575 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 7576 cfg_base_addr_index)+cfg_offset+trans_offset, 7577 sizeof(*h->transtable)); 7578 if (!h->transtable) { 7579 dev_err(&h->pdev->dev, "Failed mapping transfer table\n"); 7580 hpsa_free_cfgtables(h); 7581 return -ENOMEM; 7582 } 7583 return 0; 7584 } 7585 7586 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 7587 { 7588 #define MIN_MAX_COMMANDS 16 7589 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS); 7590 7591 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands); 7592 7593 /* Limit commands in memory limited kdump scenario. */ 7594 if (reset_devices && h->max_commands > 32) 7595 h->max_commands = 32; 7596 7597 if (h->max_commands < MIN_MAX_COMMANDS) { 7598 dev_warn(&h->pdev->dev, 7599 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n", 7600 h->max_commands, 7601 MIN_MAX_COMMANDS); 7602 h->max_commands = MIN_MAX_COMMANDS; 7603 } 7604 } 7605 7606 /* If the controller reports that the total max sg entries is greater than 512, 7607 * then we know that chained SG blocks work. (Original smart arrays did not 7608 * support chained SG blocks and would return zero for max sg entries.) 7609 */ 7610 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) 7611 { 7612 return h->maxsgentries > 512; 7613 } 7614 7615 /* Interrogate the hardware for some limits: 7616 * max commands, max SG elements without chaining, and with chaining, 7617 * SG chain block size, etc. 7618 */ 7619 static void hpsa_find_board_params(struct ctlr_info *h) 7620 { 7621 hpsa_get_max_perf_mode_cmds(h); 7622 h->nr_cmds = h->max_commands; 7623 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 7624 h->fw_support = readl(&(h->cfgtable->misc_fw_support)); 7625 if (hpsa_supports_chained_sg_blocks(h)) { 7626 /* Limit in-command s/g elements to 32 save dma'able memory. */ 7627 h->max_cmd_sg_entries = 32; 7628 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; 7629 h->maxsgentries--; /* save one for chain pointer */ 7630 } else { 7631 /* 7632 * Original smart arrays supported at most 31 s/g entries 7633 * embedded inline in the command (trying to use more 7634 * would lock up the controller) 7635 */ 7636 h->max_cmd_sg_entries = 31; 7637 h->maxsgentries = 31; /* default to traditional values */ 7638 h->chainsize = 0; 7639 } 7640 7641 /* Find out what task management functions are supported and cache */ 7642 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); 7643 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) 7644 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); 7645 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 7646 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); 7647 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) 7648 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n"); 7649 } 7650 7651 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 7652 { 7653 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { 7654 dev_err(&h->pdev->dev, "not a valid CISS config table\n"); 7655 return false; 7656 } 7657 return true; 7658 } 7659 7660 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) 7661 { 7662 u32 driver_support; 7663 7664 driver_support = readl(&(h->cfgtable->driver_support)); 7665 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 7666 #ifdef CONFIG_X86 7667 driver_support |= ENABLE_SCSI_PREFETCH; 7668 #endif 7669 driver_support |= ENABLE_UNIT_ATTN; 7670 writel(driver_support, &(h->cfgtable->driver_support)); 7671 } 7672 7673 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 7674 * in a prefetch beyond physical memory. 7675 */ 7676 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 7677 { 7678 u32 dma_prefetch; 7679 7680 if (h->board_id != 0x3225103C) 7681 return; 7682 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 7683 dma_prefetch |= 0x8000; 7684 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 7685 } 7686 7687 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) 7688 { 7689 int i; 7690 u32 doorbell_value; 7691 unsigned long flags; 7692 /* wait until the clear_event_notify bit 6 is cleared by controller. */ 7693 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) { 7694 spin_lock_irqsave(&h->lock, flags); 7695 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 7696 spin_unlock_irqrestore(&h->lock, flags); 7697 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) 7698 goto done; 7699 /* delay and try again */ 7700 msleep(CLEAR_EVENT_WAIT_INTERVAL); 7701 } 7702 return -ENODEV; 7703 done: 7704 return 0; 7705 } 7706 7707 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 7708 { 7709 int i; 7710 u32 doorbell_value; 7711 unsigned long flags; 7712 7713 /* under certain very rare conditions, this can take awhile. 7714 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 7715 * as we enter this code.) 7716 */ 7717 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { 7718 if (h->remove_in_progress) 7719 goto done; 7720 spin_lock_irqsave(&h->lock, flags); 7721 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 7722 spin_unlock_irqrestore(&h->lock, flags); 7723 if (!(doorbell_value & CFGTBL_ChangeReq)) 7724 goto done; 7725 /* delay and try again */ 7726 msleep(MODE_CHANGE_WAIT_INTERVAL); 7727 } 7728 return -ENODEV; 7729 done: 7730 return 0; 7731 } 7732 7733 /* return -ENODEV or other reason on error, 0 on success */ 7734 static int hpsa_enter_simple_mode(struct ctlr_info *h) 7735 { 7736 u32 trans_support; 7737 7738 trans_support = readl(&(h->cfgtable->TransportSupport)); 7739 if (!(trans_support & SIMPLE_MODE)) 7740 return -ENOTSUPP; 7741 7742 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 7743 7744 /* Update the field, and then ring the doorbell */ 7745 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 7746 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 7747 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7748 if (hpsa_wait_for_mode_change_ack(h)) 7749 goto error; 7750 print_cfg_table(&h->pdev->dev, h->cfgtable); 7751 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) 7752 goto error; 7753 h->transMethod = CFGTBL_Trans_Simple; 7754 return 0; 7755 error: 7756 dev_err(&h->pdev->dev, "failed to enter simple mode\n"); 7757 return -ENODEV; 7758 } 7759 7760 /* free items allocated or mapped by hpsa_pci_init */ 7761 static void hpsa_free_pci_init(struct ctlr_info *h) 7762 { 7763 hpsa_free_cfgtables(h); /* pci_init 4 */ 7764 iounmap(h->vaddr); /* pci_init 3 */ 7765 h->vaddr = NULL; 7766 hpsa_disable_interrupt_mode(h); /* pci_init 2 */ 7767 /* 7768 * call pci_disable_device before pci_release_regions per 7769 * Documentation/PCI/pci.txt 7770 */ 7771 pci_disable_device(h->pdev); /* pci_init 1 */ 7772 pci_release_regions(h->pdev); /* pci_init 2 */ 7773 } 7774 7775 /* several items must be freed later */ 7776 static int hpsa_pci_init(struct ctlr_info *h) 7777 { 7778 int prod_index, err; 7779 7780 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 7781 if (prod_index < 0) 7782 return prod_index; 7783 h->product_name = products[prod_index].product_name; 7784 h->access = *(products[prod_index].access); 7785 7786 h->needs_abort_tags_swizzled = 7787 ctlr_needs_abort_tags_swizzled(h->board_id); 7788 7789 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 7790 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 7791 7792 err = pci_enable_device(h->pdev); 7793 if (err) { 7794 dev_err(&h->pdev->dev, "failed to enable PCI device\n"); 7795 pci_disable_device(h->pdev); 7796 return err; 7797 } 7798 7799 err = pci_request_regions(h->pdev, HPSA); 7800 if (err) { 7801 dev_err(&h->pdev->dev, 7802 "failed to obtain PCI resources\n"); 7803 pci_disable_device(h->pdev); 7804 return err; 7805 } 7806 7807 pci_set_master(h->pdev); 7808 7809 hpsa_interrupt_mode(h); 7810 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 7811 if (err) 7812 goto clean2; /* intmode+region, pci */ 7813 h->vaddr = remap_pci_mem(h->paddr, 0x250); 7814 if (!h->vaddr) { 7815 dev_err(&h->pdev->dev, "failed to remap PCI mem\n"); 7816 err = -ENOMEM; 7817 goto clean2; /* intmode+region, pci */ 7818 } 7819 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 7820 if (err) 7821 goto clean3; /* vaddr, intmode+region, pci */ 7822 err = hpsa_find_cfgtables(h); 7823 if (err) 7824 goto clean3; /* vaddr, intmode+region, pci */ 7825 hpsa_find_board_params(h); 7826 7827 if (!hpsa_CISS_signature_present(h)) { 7828 err = -ENODEV; 7829 goto clean4; /* cfgtables, vaddr, intmode+region, pci */ 7830 } 7831 hpsa_set_driver_support_bits(h); 7832 hpsa_p600_dma_prefetch_quirk(h); 7833 err = hpsa_enter_simple_mode(h); 7834 if (err) 7835 goto clean4; /* cfgtables, vaddr, intmode+region, pci */ 7836 return 0; 7837 7838 clean4: /* cfgtables, vaddr, intmode+region, pci */ 7839 hpsa_free_cfgtables(h); 7840 clean3: /* vaddr, intmode+region, pci */ 7841 iounmap(h->vaddr); 7842 h->vaddr = NULL; 7843 clean2: /* intmode+region, pci */ 7844 hpsa_disable_interrupt_mode(h); 7845 /* 7846 * call pci_disable_device before pci_release_regions per 7847 * Documentation/PCI/pci.txt 7848 */ 7849 pci_disable_device(h->pdev); 7850 pci_release_regions(h->pdev); 7851 return err; 7852 } 7853 7854 static void hpsa_hba_inquiry(struct ctlr_info *h) 7855 { 7856 int rc; 7857 7858 #define HBA_INQUIRY_BYTE_COUNT 64 7859 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 7860 if (!h->hba_inquiry_data) 7861 return; 7862 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 7863 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 7864 if (rc != 0) { 7865 kfree(h->hba_inquiry_data); 7866 h->hba_inquiry_data = NULL; 7867 } 7868 } 7869 7870 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id) 7871 { 7872 int rc, i; 7873 void __iomem *vaddr; 7874 7875 if (!reset_devices) 7876 return 0; 7877 7878 /* kdump kernel is loading, we don't know in which state is 7879 * the pci interface. The dev->enable_cnt is equal zero 7880 * so we call enable+disable, wait a while and switch it on. 7881 */ 7882 rc = pci_enable_device(pdev); 7883 if (rc) { 7884 dev_warn(&pdev->dev, "Failed to enable PCI device\n"); 7885 return -ENODEV; 7886 } 7887 pci_disable_device(pdev); 7888 msleep(260); /* a randomly chosen number */ 7889 rc = pci_enable_device(pdev); 7890 if (rc) { 7891 dev_warn(&pdev->dev, "failed to enable device.\n"); 7892 return -ENODEV; 7893 } 7894 7895 pci_set_master(pdev); 7896 7897 vaddr = pci_ioremap_bar(pdev, 0); 7898 if (vaddr == NULL) { 7899 rc = -ENOMEM; 7900 goto out_disable; 7901 } 7902 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); 7903 iounmap(vaddr); 7904 7905 /* Reset the controller with a PCI power-cycle or via doorbell */ 7906 rc = hpsa_kdump_hard_reset_controller(pdev, board_id); 7907 7908 /* -ENOTSUPP here means we cannot reset the controller 7909 * but it's already (and still) up and running in 7910 * "performant mode". Or, it might be 640x, which can't reset 7911 * due to concerns about shared bbwc between 6402/6404 pair. 7912 */ 7913 if (rc) 7914 goto out_disable; 7915 7916 /* Now try to get the controller to respond to a no-op */ 7917 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); 7918 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 7919 if (hpsa_noop(pdev) == 0) 7920 break; 7921 else 7922 dev_warn(&pdev->dev, "no-op failed%s\n", 7923 (i < 11 ? "; re-trying" : "")); 7924 } 7925 7926 out_disable: 7927 7928 pci_disable_device(pdev); 7929 return rc; 7930 } 7931 7932 static void hpsa_free_cmd_pool(struct ctlr_info *h) 7933 { 7934 kfree(h->cmd_pool_bits); 7935 h->cmd_pool_bits = NULL; 7936 if (h->cmd_pool) { 7937 pci_free_consistent(h->pdev, 7938 h->nr_cmds * sizeof(struct CommandList), 7939 h->cmd_pool, 7940 h->cmd_pool_dhandle); 7941 h->cmd_pool = NULL; 7942 h->cmd_pool_dhandle = 0; 7943 } 7944 if (h->errinfo_pool) { 7945 pci_free_consistent(h->pdev, 7946 h->nr_cmds * sizeof(struct ErrorInfo), 7947 h->errinfo_pool, 7948 h->errinfo_pool_dhandle); 7949 h->errinfo_pool = NULL; 7950 h->errinfo_pool_dhandle = 0; 7951 } 7952 } 7953 7954 static int hpsa_alloc_cmd_pool(struct ctlr_info *h) 7955 { 7956 h->cmd_pool_bits = kzalloc( 7957 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 7958 sizeof(unsigned long), GFP_KERNEL); 7959 h->cmd_pool = pci_alloc_consistent(h->pdev, 7960 h->nr_cmds * sizeof(*h->cmd_pool), 7961 &(h->cmd_pool_dhandle)); 7962 h->errinfo_pool = pci_alloc_consistent(h->pdev, 7963 h->nr_cmds * sizeof(*h->errinfo_pool), 7964 &(h->errinfo_pool_dhandle)); 7965 if ((h->cmd_pool_bits == NULL) 7966 || (h->cmd_pool == NULL) 7967 || (h->errinfo_pool == NULL)) { 7968 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 7969 goto clean_up; 7970 } 7971 hpsa_preinitialize_commands(h); 7972 return 0; 7973 clean_up: 7974 hpsa_free_cmd_pool(h); 7975 return -ENOMEM; 7976 } 7977 7978 static void hpsa_irq_affinity_hints(struct ctlr_info *h) 7979 { 7980 int i, cpu; 7981 7982 cpu = cpumask_first(cpu_online_mask); 7983 for (i = 0; i < h->msix_vector; i++) { 7984 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); 7985 cpu = cpumask_next(cpu, cpu_online_mask); 7986 } 7987 } 7988 7989 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ 7990 static void hpsa_free_irqs(struct ctlr_info *h) 7991 { 7992 int i; 7993 7994 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 7995 /* Single reply queue, only one irq to free */ 7996 i = h->intr_mode; 7997 irq_set_affinity_hint(h->intr[i], NULL); 7998 free_irq(h->intr[i], &h->q[i]); 7999 h->q[i] = 0; 8000 return; 8001 } 8002 8003 for (i = 0; i < h->msix_vector; i++) { 8004 irq_set_affinity_hint(h->intr[i], NULL); 8005 free_irq(h->intr[i], &h->q[i]); 8006 h->q[i] = 0; 8007 } 8008 for (; i < MAX_REPLY_QUEUES; i++) 8009 h->q[i] = 0; 8010 } 8011 8012 /* returns 0 on success; cleans up and returns -Enn on error */ 8013 static int hpsa_request_irqs(struct ctlr_info *h, 8014 irqreturn_t (*msixhandler)(int, void *), 8015 irqreturn_t (*intxhandler)(int, void *)) 8016 { 8017 int rc, i; 8018 8019 /* 8020 * initialize h->q[x] = x so that interrupt handlers know which 8021 * queue to process. 8022 */ 8023 for (i = 0; i < MAX_REPLY_QUEUES; i++) 8024 h->q[i] = (u8) i; 8025 8026 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { 8027 /* If performant mode and MSI-X, use multiple reply queues */ 8028 for (i = 0; i < h->msix_vector; i++) { 8029 sprintf(h->intrname[i], "%s-msix%d", h->devname, i); 8030 rc = request_irq(h->intr[i], msixhandler, 8031 0, h->intrname[i], 8032 &h->q[i]); 8033 if (rc) { 8034 int j; 8035 8036 dev_err(&h->pdev->dev, 8037 "failed to get irq %d for %s\n", 8038 h->intr[i], h->devname); 8039 for (j = 0; j < i; j++) { 8040 free_irq(h->intr[j], &h->q[j]); 8041 h->q[j] = 0; 8042 } 8043 for (; j < MAX_REPLY_QUEUES; j++) 8044 h->q[j] = 0; 8045 return rc; 8046 } 8047 } 8048 hpsa_irq_affinity_hints(h); 8049 } else { 8050 /* Use single reply pool */ 8051 if (h->msix_vector > 0 || h->msi_vector) { 8052 if (h->msix_vector) 8053 sprintf(h->intrname[h->intr_mode], 8054 "%s-msix", h->devname); 8055 else 8056 sprintf(h->intrname[h->intr_mode], 8057 "%s-msi", h->devname); 8058 rc = request_irq(h->intr[h->intr_mode], 8059 msixhandler, 0, 8060 h->intrname[h->intr_mode], 8061 &h->q[h->intr_mode]); 8062 } else { 8063 sprintf(h->intrname[h->intr_mode], 8064 "%s-intx", h->devname); 8065 rc = request_irq(h->intr[h->intr_mode], 8066 intxhandler, IRQF_SHARED, 8067 h->intrname[h->intr_mode], 8068 &h->q[h->intr_mode]); 8069 } 8070 irq_set_affinity_hint(h->intr[h->intr_mode], NULL); 8071 } 8072 if (rc) { 8073 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", 8074 h->intr[h->intr_mode], h->devname); 8075 hpsa_free_irqs(h); 8076 return -ENODEV; 8077 } 8078 return 0; 8079 } 8080 8081 static int hpsa_kdump_soft_reset(struct ctlr_info *h) 8082 { 8083 int rc; 8084 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER); 8085 8086 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 8087 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); 8088 if (rc) { 8089 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 8090 return rc; 8091 } 8092 8093 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 8094 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 8095 if (rc) { 8096 dev_warn(&h->pdev->dev, "Board failed to become ready " 8097 "after soft reset.\n"); 8098 return rc; 8099 } 8100 8101 return 0; 8102 } 8103 8104 static void hpsa_free_reply_queues(struct ctlr_info *h) 8105 { 8106 int i; 8107 8108 for (i = 0; i < h->nreply_queues; i++) { 8109 if (!h->reply_queue[i].head) 8110 continue; 8111 pci_free_consistent(h->pdev, 8112 h->reply_queue_size, 8113 h->reply_queue[i].head, 8114 h->reply_queue[i].busaddr); 8115 h->reply_queue[i].head = NULL; 8116 h->reply_queue[i].busaddr = 0; 8117 } 8118 h->reply_queue_size = 0; 8119 } 8120 8121 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 8122 { 8123 hpsa_free_performant_mode(h); /* init_one 7 */ 8124 hpsa_free_sg_chain_blocks(h); /* init_one 6 */ 8125 hpsa_free_cmd_pool(h); /* init_one 5 */ 8126 hpsa_free_irqs(h); /* init_one 4 */ 8127 scsi_host_put(h->scsi_host); /* init_one 3 */ 8128 h->scsi_host = NULL; /* init_one 3 */ 8129 hpsa_free_pci_init(h); /* init_one 2_5 */ 8130 free_percpu(h->lockup_detected); /* init_one 2 */ 8131 h->lockup_detected = NULL; /* init_one 2 */ 8132 if (h->resubmit_wq) { 8133 destroy_workqueue(h->resubmit_wq); /* init_one 1 */ 8134 h->resubmit_wq = NULL; 8135 } 8136 if (h->rescan_ctlr_wq) { 8137 destroy_workqueue(h->rescan_ctlr_wq); 8138 h->rescan_ctlr_wq = NULL; 8139 } 8140 kfree(h); /* init_one 1 */ 8141 } 8142 8143 /* Called when controller lockup detected. */ 8144 static void fail_all_outstanding_cmds(struct ctlr_info *h) 8145 { 8146 int i, refcount; 8147 struct CommandList *c; 8148 int failcount = 0; 8149 8150 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ 8151 for (i = 0; i < h->nr_cmds; i++) { 8152 c = h->cmd_pool + i; 8153 refcount = atomic_inc_return(&c->refcount); 8154 if (refcount > 1) { 8155 c->err_info->CommandStatus = CMD_CTLR_LOCKUP; 8156 finish_cmd(c); 8157 atomic_dec(&h->commands_outstanding); 8158 failcount++; 8159 } 8160 cmd_free(h, c); 8161 } 8162 dev_warn(&h->pdev->dev, 8163 "failed %d commands in fail_all\n", failcount); 8164 } 8165 8166 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) 8167 { 8168 int cpu; 8169 8170 for_each_online_cpu(cpu) { 8171 u32 *lockup_detected; 8172 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 8173 *lockup_detected = value; 8174 } 8175 wmb(); /* be sure the per-cpu variables are out to memory */ 8176 } 8177 8178 static void controller_lockup_detected(struct ctlr_info *h) 8179 { 8180 unsigned long flags; 8181 u32 lockup_detected; 8182 8183 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8184 spin_lock_irqsave(&h->lock, flags); 8185 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 8186 if (!lockup_detected) { 8187 /* no heartbeat, but controller gave us a zero. */ 8188 dev_warn(&h->pdev->dev, 8189 "lockup detected after %d but scratchpad register is zero\n", 8190 h->heartbeat_sample_interval / HZ); 8191 lockup_detected = 0xffffffff; 8192 } 8193 set_lockup_detected_for_all_cpus(h, lockup_detected); 8194 spin_unlock_irqrestore(&h->lock, flags); 8195 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", 8196 lockup_detected, h->heartbeat_sample_interval / HZ); 8197 pci_disable_device(h->pdev); 8198 fail_all_outstanding_cmds(h); 8199 } 8200 8201 static int detect_controller_lockup(struct ctlr_info *h) 8202 { 8203 u64 now; 8204 u32 heartbeat; 8205 unsigned long flags; 8206 8207 now = get_jiffies_64(); 8208 /* If we've received an interrupt recently, we're ok. */ 8209 if (time_after64(h->last_intr_timestamp + 8210 (h->heartbeat_sample_interval), now)) 8211 return false; 8212 8213 /* 8214 * If we've already checked the heartbeat recently, we're ok. 8215 * This could happen if someone sends us a signal. We 8216 * otherwise don't care about signals in this thread. 8217 */ 8218 if (time_after64(h->last_heartbeat_timestamp + 8219 (h->heartbeat_sample_interval), now)) 8220 return false; 8221 8222 /* If heartbeat has not changed since we last looked, we're not ok. */ 8223 spin_lock_irqsave(&h->lock, flags); 8224 heartbeat = readl(&h->cfgtable->HeartBeat); 8225 spin_unlock_irqrestore(&h->lock, flags); 8226 if (h->last_heartbeat == heartbeat) { 8227 controller_lockup_detected(h); 8228 return true; 8229 } 8230 8231 /* We're ok. */ 8232 h->last_heartbeat = heartbeat; 8233 h->last_heartbeat_timestamp = now; 8234 return false; 8235 } 8236 8237 static void hpsa_ack_ctlr_events(struct ctlr_info *h) 8238 { 8239 int i; 8240 char *event_type; 8241 8242 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 8243 return; 8244 8245 /* Ask the controller to clear the events we're handling. */ 8246 if ((h->transMethod & (CFGTBL_Trans_io_accel1 8247 | CFGTBL_Trans_io_accel2)) && 8248 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || 8249 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { 8250 8251 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) 8252 event_type = "state change"; 8253 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) 8254 event_type = "configuration change"; 8255 /* Stop sending new RAID offload reqs via the IO accelerator */ 8256 scsi_block_requests(h->scsi_host); 8257 for (i = 0; i < h->ndevices; i++) 8258 h->dev[i]->offload_enabled = 0; 8259 hpsa_drain_accel_commands(h); 8260 /* Set 'accelerator path config change' bit */ 8261 dev_warn(&h->pdev->dev, 8262 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", 8263 h->events, event_type); 8264 writel(h->events, &(h->cfgtable->clear_event_notify)); 8265 /* Set the "clear event notify field update" bit 6 */ 8266 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 8267 /* Wait until ctlr clears 'clear event notify field', bit 6 */ 8268 hpsa_wait_for_clear_event_notify_ack(h); 8269 scsi_unblock_requests(h->scsi_host); 8270 } else { 8271 /* Acknowledge controller notification events. */ 8272 writel(h->events, &(h->cfgtable->clear_event_notify)); 8273 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 8274 hpsa_wait_for_clear_event_notify_ack(h); 8275 #if 0 8276 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 8277 hpsa_wait_for_mode_change_ack(h); 8278 #endif 8279 } 8280 return; 8281 } 8282 8283 /* Check a register on the controller to see if there are configuration 8284 * changes (added/changed/removed logical drives, etc.) which mean that 8285 * we should rescan the controller for devices. 8286 * Also check flag for driver-initiated rescan. 8287 */ 8288 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) 8289 { 8290 if (h->drv_req_rescan) { 8291 h->drv_req_rescan = 0; 8292 return 1; 8293 } 8294 8295 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 8296 return 0; 8297 8298 h->events = readl(&(h->cfgtable->event_notify)); 8299 return h->events & RESCAN_REQUIRED_EVENT_BITS; 8300 } 8301 8302 /* 8303 * Check if any of the offline devices have become ready 8304 */ 8305 static int hpsa_offline_devices_ready(struct ctlr_info *h) 8306 { 8307 unsigned long flags; 8308 struct offline_device_entry *d; 8309 struct list_head *this, *tmp; 8310 8311 spin_lock_irqsave(&h->offline_device_lock, flags); 8312 list_for_each_safe(this, tmp, &h->offline_device_list) { 8313 d = list_entry(this, struct offline_device_entry, 8314 offline_list); 8315 spin_unlock_irqrestore(&h->offline_device_lock, flags); 8316 if (!hpsa_volume_offline(h, d->scsi3addr)) { 8317 spin_lock_irqsave(&h->offline_device_lock, flags); 8318 list_del(&d->offline_list); 8319 spin_unlock_irqrestore(&h->offline_device_lock, flags); 8320 return 1; 8321 } 8322 spin_lock_irqsave(&h->offline_device_lock, flags); 8323 } 8324 spin_unlock_irqrestore(&h->offline_device_lock, flags); 8325 return 0; 8326 } 8327 8328 static int hpsa_luns_changed(struct ctlr_info *h) 8329 { 8330 int rc = 1; /* assume there are changes */ 8331 struct ReportLUNdata *logdev = NULL; 8332 8333 /* if we can't find out if lun data has changed, 8334 * assume that it has. 8335 */ 8336 8337 if (!h->lastlogicals) 8338 goto out; 8339 8340 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL); 8341 if (!logdev) { 8342 dev_warn(&h->pdev->dev, 8343 "Out of memory, can't track lun changes.\n"); 8344 goto out; 8345 } 8346 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) { 8347 dev_warn(&h->pdev->dev, 8348 "report luns failed, can't track lun changes.\n"); 8349 goto out; 8350 } 8351 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) { 8352 dev_info(&h->pdev->dev, 8353 "Lun changes detected.\n"); 8354 memcpy(h->lastlogicals, logdev, sizeof(*logdev)); 8355 goto out; 8356 } else 8357 rc = 0; /* no changes detected. */ 8358 out: 8359 kfree(logdev); 8360 return rc; 8361 } 8362 8363 static void hpsa_rescan_ctlr_worker(struct work_struct *work) 8364 { 8365 unsigned long flags; 8366 struct ctlr_info *h = container_of(to_delayed_work(work), 8367 struct ctlr_info, rescan_ctlr_work); 8368 8369 8370 if (h->remove_in_progress) 8371 return; 8372 8373 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 8374 scsi_host_get(h->scsi_host); 8375 hpsa_ack_ctlr_events(h); 8376 hpsa_scan_start(h->scsi_host); 8377 scsi_host_put(h->scsi_host); 8378 } else if (h->discovery_polling) { 8379 hpsa_disable_rld_caching(h); 8380 if (hpsa_luns_changed(h)) { 8381 struct Scsi_Host *sh = NULL; 8382 8383 dev_info(&h->pdev->dev, 8384 "driver discovery polling rescan.\n"); 8385 sh = scsi_host_get(h->scsi_host); 8386 if (sh != NULL) { 8387 hpsa_scan_start(sh); 8388 scsi_host_put(sh); 8389 } 8390 } 8391 } 8392 spin_lock_irqsave(&h->lock, flags); 8393 if (!h->remove_in_progress) 8394 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, 8395 h->heartbeat_sample_interval); 8396 spin_unlock_irqrestore(&h->lock, flags); 8397 } 8398 8399 static void hpsa_monitor_ctlr_worker(struct work_struct *work) 8400 { 8401 unsigned long flags; 8402 struct ctlr_info *h = container_of(to_delayed_work(work), 8403 struct ctlr_info, monitor_ctlr_work); 8404 8405 detect_controller_lockup(h); 8406 if (lockup_detected(h)) 8407 return; 8408 8409 spin_lock_irqsave(&h->lock, flags); 8410 if (!h->remove_in_progress) 8411 schedule_delayed_work(&h->monitor_ctlr_work, 8412 h->heartbeat_sample_interval); 8413 spin_unlock_irqrestore(&h->lock, flags); 8414 } 8415 8416 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, 8417 char *name) 8418 { 8419 struct workqueue_struct *wq = NULL; 8420 8421 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); 8422 if (!wq) 8423 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); 8424 8425 return wq; 8426 } 8427 8428 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8429 { 8430 int dac, rc; 8431 struct ctlr_info *h; 8432 int try_soft_reset = 0; 8433 unsigned long flags; 8434 u32 board_id; 8435 8436 if (number_of_controllers == 0) 8437 printk(KERN_INFO DRIVER_NAME "\n"); 8438 8439 rc = hpsa_lookup_board_id(pdev, &board_id); 8440 if (rc < 0) { 8441 dev_warn(&pdev->dev, "Board ID not found\n"); 8442 return rc; 8443 } 8444 8445 rc = hpsa_init_reset_devices(pdev, board_id); 8446 if (rc) { 8447 if (rc != -ENOTSUPP) 8448 return rc; 8449 /* If the reset fails in a particular way (it has no way to do 8450 * a proper hard reset, so returns -ENOTSUPP) we can try to do 8451 * a soft reset once we get the controller configured up to the 8452 * point that it can accept a command. 8453 */ 8454 try_soft_reset = 1; 8455 rc = 0; 8456 } 8457 8458 reinit_after_soft_reset: 8459 8460 /* Command structures must be aligned on a 32-byte boundary because 8461 * the 5 lower bits of the address are used by the hardware. and by 8462 * the driver. See comments in hpsa.h for more info. 8463 */ 8464 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 8465 h = kzalloc(sizeof(*h), GFP_KERNEL); 8466 if (!h) { 8467 dev_err(&pdev->dev, "Failed to allocate controller head\n"); 8468 return -ENOMEM; 8469 } 8470 8471 h->pdev = pdev; 8472 8473 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 8474 INIT_LIST_HEAD(&h->offline_device_list); 8475 spin_lock_init(&h->lock); 8476 spin_lock_init(&h->offline_device_lock); 8477 spin_lock_init(&h->scan_lock); 8478 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); 8479 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS); 8480 8481 /* Allocate and clear per-cpu variable lockup_detected */ 8482 h->lockup_detected = alloc_percpu(u32); 8483 if (!h->lockup_detected) { 8484 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); 8485 rc = -ENOMEM; 8486 goto clean1; /* aer/h */ 8487 } 8488 set_lockup_detected_for_all_cpus(h, 0); 8489 8490 rc = hpsa_pci_init(h); 8491 if (rc) 8492 goto clean2; /* lu, aer/h */ 8493 8494 /* relies on h-> settings made by hpsa_pci_init, including 8495 * interrupt_mode h->intr */ 8496 rc = hpsa_scsi_host_alloc(h); 8497 if (rc) 8498 goto clean2_5; /* pci, lu, aer/h */ 8499 8500 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no); 8501 h->ctlr = number_of_controllers; 8502 number_of_controllers++; 8503 8504 /* configure PCI DMA stuff */ 8505 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 8506 if (rc == 0) { 8507 dac = 1; 8508 } else { 8509 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 8510 if (rc == 0) { 8511 dac = 0; 8512 } else { 8513 dev_err(&pdev->dev, "no suitable DMA available\n"); 8514 goto clean3; /* shost, pci, lu, aer/h */ 8515 } 8516 } 8517 8518 /* make sure the board interrupts are off */ 8519 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8520 8521 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); 8522 if (rc) 8523 goto clean3; /* shost, pci, lu, aer/h */ 8524 rc = hpsa_alloc_cmd_pool(h); 8525 if (rc) 8526 goto clean4; /* irq, shost, pci, lu, aer/h */ 8527 rc = hpsa_alloc_sg_chain_blocks(h); 8528 if (rc) 8529 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ 8530 init_waitqueue_head(&h->scan_wait_queue); 8531 init_waitqueue_head(&h->abort_cmd_wait_queue); 8532 init_waitqueue_head(&h->event_sync_wait_queue); 8533 mutex_init(&h->reset_mutex); 8534 h->scan_finished = 1; /* no scan currently in progress */ 8535 8536 pci_set_drvdata(pdev, h); 8537 h->ndevices = 0; 8538 8539 spin_lock_init(&h->devlock); 8540 rc = hpsa_put_ctlr_into_performant_mode(h); 8541 if (rc) 8542 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ 8543 8544 /* hook into SCSI subsystem */ 8545 rc = hpsa_scsi_add_host(h); 8546 if (rc) 8547 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ 8548 8549 /* create the resubmit workqueue */ 8550 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); 8551 if (!h->rescan_ctlr_wq) { 8552 rc = -ENOMEM; 8553 goto clean7; 8554 } 8555 8556 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); 8557 if (!h->resubmit_wq) { 8558 rc = -ENOMEM; 8559 goto clean7; /* aer/h */ 8560 } 8561 8562 /* 8563 * At this point, the controller is ready to take commands. 8564 * Now, if reset_devices and the hard reset didn't work, try 8565 * the soft reset and see if that works. 8566 */ 8567 if (try_soft_reset) { 8568 8569 /* This is kind of gross. We may or may not get a completion 8570 * from the soft reset command, and if we do, then the value 8571 * from the fifo may or may not be valid. So, we wait 10 secs 8572 * after the reset throwing away any completions we get during 8573 * that time. Unregister the interrupt handler and register 8574 * fake ones to scoop up any residual completions. 8575 */ 8576 spin_lock_irqsave(&h->lock, flags); 8577 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8578 spin_unlock_irqrestore(&h->lock, flags); 8579 hpsa_free_irqs(h); 8580 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, 8581 hpsa_intx_discard_completions); 8582 if (rc) { 8583 dev_warn(&h->pdev->dev, 8584 "Failed to request_irq after soft reset.\n"); 8585 /* 8586 * cannot goto clean7 or free_irqs will be called 8587 * again. Instead, do its work 8588 */ 8589 hpsa_free_performant_mode(h); /* clean7 */ 8590 hpsa_free_sg_chain_blocks(h); /* clean6 */ 8591 hpsa_free_cmd_pool(h); /* clean5 */ 8592 /* 8593 * skip hpsa_free_irqs(h) clean4 since that 8594 * was just called before request_irqs failed 8595 */ 8596 goto clean3; 8597 } 8598 8599 rc = hpsa_kdump_soft_reset(h); 8600 if (rc) 8601 /* Neither hard nor soft reset worked, we're hosed. */ 8602 goto clean7; 8603 8604 dev_info(&h->pdev->dev, "Board READY.\n"); 8605 dev_info(&h->pdev->dev, 8606 "Waiting for stale completions to drain.\n"); 8607 h->access.set_intr_mask(h, HPSA_INTR_ON); 8608 msleep(10000); 8609 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8610 8611 rc = controller_reset_failed(h->cfgtable); 8612 if (rc) 8613 dev_info(&h->pdev->dev, 8614 "Soft reset appears to have failed.\n"); 8615 8616 /* since the controller's reset, we have to go back and re-init 8617 * everything. Easiest to just forget what we've done and do it 8618 * all over again. 8619 */ 8620 hpsa_undo_allocations_after_kdump_soft_reset(h); 8621 try_soft_reset = 0; 8622 if (rc) 8623 /* don't goto clean, we already unallocated */ 8624 return -ENODEV; 8625 8626 goto reinit_after_soft_reset; 8627 } 8628 8629 /* Enable Accelerated IO path at driver layer */ 8630 h->acciopath_status = 1; 8631 /* Disable discovery polling.*/ 8632 h->discovery_polling = 0; 8633 8634 8635 /* Turn the interrupts on so we can service requests */ 8636 h->access.set_intr_mask(h, HPSA_INTR_ON); 8637 8638 hpsa_hba_inquiry(h); 8639 8640 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL); 8641 if (!h->lastlogicals) 8642 dev_info(&h->pdev->dev, 8643 "Can't track change to report lun data\n"); 8644 8645 /* Monitor the controller for firmware lockups */ 8646 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 8647 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); 8648 schedule_delayed_work(&h->monitor_ctlr_work, 8649 h->heartbeat_sample_interval); 8650 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); 8651 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, 8652 h->heartbeat_sample_interval); 8653 return 0; 8654 8655 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ 8656 hpsa_free_performant_mode(h); 8657 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8658 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ 8659 hpsa_free_sg_chain_blocks(h); 8660 clean5: /* cmd, irq, shost, pci, lu, aer/h */ 8661 hpsa_free_cmd_pool(h); 8662 clean4: /* irq, shost, pci, lu, aer/h */ 8663 hpsa_free_irqs(h); 8664 clean3: /* shost, pci, lu, aer/h */ 8665 scsi_host_put(h->scsi_host); 8666 h->scsi_host = NULL; 8667 clean2_5: /* pci, lu, aer/h */ 8668 hpsa_free_pci_init(h); 8669 clean2: /* lu, aer/h */ 8670 if (h->lockup_detected) { 8671 free_percpu(h->lockup_detected); 8672 h->lockup_detected = NULL; 8673 } 8674 clean1: /* wq/aer/h */ 8675 if (h->resubmit_wq) { 8676 destroy_workqueue(h->resubmit_wq); 8677 h->resubmit_wq = NULL; 8678 } 8679 if (h->rescan_ctlr_wq) { 8680 destroy_workqueue(h->rescan_ctlr_wq); 8681 h->rescan_ctlr_wq = NULL; 8682 } 8683 kfree(h); 8684 return rc; 8685 } 8686 8687 static void hpsa_flush_cache(struct ctlr_info *h) 8688 { 8689 char *flush_buf; 8690 struct CommandList *c; 8691 int rc; 8692 8693 if (unlikely(lockup_detected(h))) 8694 return; 8695 flush_buf = kzalloc(4, GFP_KERNEL); 8696 if (!flush_buf) 8697 return; 8698 8699 c = cmd_alloc(h); 8700 8701 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 8702 RAID_CTLR_LUNID, TYPE_CMD)) { 8703 goto out; 8704 } 8705 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8706 PCI_DMA_TODEVICE, NO_TIMEOUT); 8707 if (rc) 8708 goto out; 8709 if (c->err_info->CommandStatus != 0) 8710 out: 8711 dev_warn(&h->pdev->dev, 8712 "error flushing cache on controller\n"); 8713 cmd_free(h, c); 8714 kfree(flush_buf); 8715 } 8716 8717 /* Make controller gather fresh report lun data each time we 8718 * send down a report luns request 8719 */ 8720 static void hpsa_disable_rld_caching(struct ctlr_info *h) 8721 { 8722 u32 *options; 8723 struct CommandList *c; 8724 int rc; 8725 8726 /* Don't bother trying to set diag options if locked up */ 8727 if (unlikely(h->lockup_detected)) 8728 return; 8729 8730 options = kzalloc(sizeof(*options), GFP_KERNEL); 8731 if (!options) { 8732 dev_err(&h->pdev->dev, 8733 "Error: failed to disable rld caching, during alloc.\n"); 8734 return; 8735 } 8736 8737 c = cmd_alloc(h); 8738 8739 /* first, get the current diag options settings */ 8740 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, 8741 RAID_CTLR_LUNID, TYPE_CMD)) 8742 goto errout; 8743 8744 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8745 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 8746 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8747 goto errout; 8748 8749 /* Now, set the bit for disabling the RLD caching */ 8750 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING; 8751 8752 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0, 8753 RAID_CTLR_LUNID, TYPE_CMD)) 8754 goto errout; 8755 8756 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8757 PCI_DMA_TODEVICE, NO_TIMEOUT); 8758 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8759 goto errout; 8760 8761 /* Now verify that it got set: */ 8762 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, 8763 RAID_CTLR_LUNID, TYPE_CMD)) 8764 goto errout; 8765 8766 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8767 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 8768 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8769 goto errout; 8770 8771 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) 8772 goto out; 8773 8774 errout: 8775 dev_err(&h->pdev->dev, 8776 "Error: failed to disable report lun data caching.\n"); 8777 out: 8778 cmd_free(h, c); 8779 kfree(options); 8780 } 8781 8782 static void hpsa_shutdown(struct pci_dev *pdev) 8783 { 8784 struct ctlr_info *h; 8785 8786 h = pci_get_drvdata(pdev); 8787 /* Turn board interrupts off and send the flush cache command 8788 * sendcmd will turn off interrupt, and send the flush... 8789 * To write all data in the battery backed cache to disks 8790 */ 8791 hpsa_flush_cache(h); 8792 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8793 hpsa_free_irqs(h); /* init_one 4 */ 8794 hpsa_disable_interrupt_mode(h); /* pci_init 2 */ 8795 } 8796 8797 static void hpsa_free_device_info(struct ctlr_info *h) 8798 { 8799 int i; 8800 8801 for (i = 0; i < h->ndevices; i++) { 8802 kfree(h->dev[i]); 8803 h->dev[i] = NULL; 8804 } 8805 } 8806 8807 static void hpsa_remove_one(struct pci_dev *pdev) 8808 { 8809 struct ctlr_info *h; 8810 unsigned long flags; 8811 8812 if (pci_get_drvdata(pdev) == NULL) { 8813 dev_err(&pdev->dev, "unable to remove device\n"); 8814 return; 8815 } 8816 h = pci_get_drvdata(pdev); 8817 8818 /* Get rid of any controller monitoring work items */ 8819 spin_lock_irqsave(&h->lock, flags); 8820 h->remove_in_progress = 1; 8821 spin_unlock_irqrestore(&h->lock, flags); 8822 cancel_delayed_work_sync(&h->monitor_ctlr_work); 8823 cancel_delayed_work_sync(&h->rescan_ctlr_work); 8824 destroy_workqueue(h->rescan_ctlr_wq); 8825 destroy_workqueue(h->resubmit_wq); 8826 8827 /* 8828 * Call before disabling interrupts. 8829 * scsi_remove_host can trigger I/O operations especially 8830 * when multipath is enabled. There can be SYNCHRONIZE CACHE 8831 * operations which cannot complete and will hang the system. 8832 */ 8833 if (h->scsi_host) 8834 scsi_remove_host(h->scsi_host); /* init_one 8 */ 8835 /* includes hpsa_free_irqs - init_one 4 */ 8836 /* includes hpsa_disable_interrupt_mode - pci_init 2 */ 8837 hpsa_shutdown(pdev); 8838 8839 hpsa_free_device_info(h); /* scan */ 8840 8841 kfree(h->hba_inquiry_data); /* init_one 10 */ 8842 h->hba_inquiry_data = NULL; /* init_one 10 */ 8843 hpsa_free_ioaccel2_sg_chain_blocks(h); 8844 hpsa_free_performant_mode(h); /* init_one 7 */ 8845 hpsa_free_sg_chain_blocks(h); /* init_one 6 */ 8846 hpsa_free_cmd_pool(h); /* init_one 5 */ 8847 kfree(h->lastlogicals); 8848 8849 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */ 8850 8851 scsi_host_put(h->scsi_host); /* init_one 3 */ 8852 h->scsi_host = NULL; /* init_one 3 */ 8853 8854 /* includes hpsa_disable_interrupt_mode - pci_init 2 */ 8855 hpsa_free_pci_init(h); /* init_one 2.5 */ 8856 8857 free_percpu(h->lockup_detected); /* init_one 2 */ 8858 h->lockup_detected = NULL; /* init_one 2 */ 8859 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ 8860 8861 hpsa_delete_sas_host(h); 8862 8863 kfree(h); /* init_one 1 */ 8864 } 8865 8866 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 8867 __attribute__((unused)) pm_message_t state) 8868 { 8869 return -ENOSYS; 8870 } 8871 8872 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 8873 { 8874 return -ENOSYS; 8875 } 8876 8877 static struct pci_driver hpsa_pci_driver = { 8878 .name = HPSA, 8879 .probe = hpsa_init_one, 8880 .remove = hpsa_remove_one, 8881 .id_table = hpsa_pci_device_id, /* id_table */ 8882 .shutdown = hpsa_shutdown, 8883 .suspend = hpsa_suspend, 8884 .resume = hpsa_resume, 8885 }; 8886 8887 /* Fill in bucket_map[], given nsgs (the max number of 8888 * scatter gather elements supported) and bucket[], 8889 * which is an array of 8 integers. The bucket[] array 8890 * contains 8 different DMA transfer sizes (in 16 8891 * byte increments) which the controller uses to fetch 8892 * commands. This function fills in bucket_map[], which 8893 * maps a given number of scatter gather elements to one of 8894 * the 8 DMA transfer sizes. The point of it is to allow the 8895 * controller to only do as much DMA as needed to fetch the 8896 * command, with the DMA transfer size encoded in the lower 8897 * bits of the command address. 8898 */ 8899 static void calc_bucket_map(int bucket[], int num_buckets, 8900 int nsgs, int min_blocks, u32 *bucket_map) 8901 { 8902 int i, j, b, size; 8903 8904 /* Note, bucket_map must have nsgs+1 entries. */ 8905 for (i = 0; i <= nsgs; i++) { 8906 /* Compute size of a command with i SG entries */ 8907 size = i + min_blocks; 8908 b = num_buckets; /* Assume the biggest bucket */ 8909 /* Find the bucket that is just big enough */ 8910 for (j = 0; j < num_buckets; j++) { 8911 if (bucket[j] >= size) { 8912 b = j; 8913 break; 8914 } 8915 } 8916 /* for a command with i SG entries, use bucket b. */ 8917 bucket_map[i] = b; 8918 } 8919 } 8920 8921 /* 8922 * return -ENODEV on err, 0 on success (or no action) 8923 * allocates numerous items that must be freed later 8924 */ 8925 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) 8926 { 8927 int i; 8928 unsigned long register_value; 8929 unsigned long transMethod = CFGTBL_Trans_Performant | 8930 (trans_support & CFGTBL_Trans_use_short_tags) | 8931 CFGTBL_Trans_enable_directed_msix | 8932 (trans_support & (CFGTBL_Trans_io_accel1 | 8933 CFGTBL_Trans_io_accel2)); 8934 struct access_method access = SA5_performant_access; 8935 8936 /* This is a bit complicated. There are 8 registers on 8937 * the controller which we write to to tell it 8 different 8938 * sizes of commands which there may be. It's a way of 8939 * reducing the DMA done to fetch each command. Encoded into 8940 * each command's tag are 3 bits which communicate to the controller 8941 * which of the eight sizes that command fits within. The size of 8942 * each command depends on how many scatter gather entries there are. 8943 * Each SG entry requires 16 bytes. The eight registers are programmed 8944 * with the number of 16-byte blocks a command of that size requires. 8945 * The smallest command possible requires 5 such 16 byte blocks. 8946 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte 8947 * blocks. Note, this only extends to the SG entries contained 8948 * within the command block, and does not extend to chained blocks 8949 * of SG elements. bft[] contains the eight values we write to 8950 * the registers. They are not evenly distributed, but have more 8951 * sizes for small commands, and fewer sizes for larger commands. 8952 */ 8953 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; 8954 #define MIN_IOACCEL2_BFT_ENTRY 5 8955 #define HPSA_IOACCEL2_HEADER_SZ 4 8956 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, 8957 13, 14, 15, 16, 17, 18, 19, 8958 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; 8959 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); 8960 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); 8961 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > 8962 16 * MIN_IOACCEL2_BFT_ENTRY); 8963 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); 8964 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); 8965 /* 5 = 1 s/g entry or 4k 8966 * 6 = 2 s/g entry or 8k 8967 * 8 = 4 s/g entry or 16k 8968 * 10 = 6 s/g entry or 24k 8969 */ 8970 8971 /* If the controller supports either ioaccel method then 8972 * we can also use the RAID stack submit path that does not 8973 * perform the superfluous readl() after each command submission. 8974 */ 8975 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) 8976 access = SA5_performant_access_no_read; 8977 8978 /* Controller spec: zero out this buffer. */ 8979 for (i = 0; i < h->nreply_queues; i++) 8980 memset(h->reply_queue[i].head, 0, h->reply_queue_size); 8981 8982 bft[7] = SG_ENTRIES_IN_CMD + 4; 8983 calc_bucket_map(bft, ARRAY_SIZE(bft), 8984 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); 8985 for (i = 0; i < 8; i++) 8986 writel(bft[i], &h->transtable->BlockFetch[i]); 8987 8988 /* size of controller ring buffer */ 8989 writel(h->max_commands, &h->transtable->RepQSize); 8990 writel(h->nreply_queues, &h->transtable->RepQCount); 8991 writel(0, &h->transtable->RepQCtrAddrLow32); 8992 writel(0, &h->transtable->RepQCtrAddrHigh32); 8993 8994 for (i = 0; i < h->nreply_queues; i++) { 8995 writel(0, &h->transtable->RepQAddr[i].upper); 8996 writel(h->reply_queue[i].busaddr, 8997 &h->transtable->RepQAddr[i].lower); 8998 } 8999 9000 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 9001 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); 9002 /* 9003 * enable outbound interrupt coalescing in accelerator mode; 9004 */ 9005 if (trans_support & CFGTBL_Trans_io_accel1) { 9006 access = SA5_ioaccel_mode1_access; 9007 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 9008 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 9009 } else { 9010 if (trans_support & CFGTBL_Trans_io_accel2) { 9011 access = SA5_ioaccel_mode2_access; 9012 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 9013 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 9014 } 9015 } 9016 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 9017 if (hpsa_wait_for_mode_change_ack(h)) { 9018 dev_err(&h->pdev->dev, 9019 "performant mode problem - doorbell timeout\n"); 9020 return -ENODEV; 9021 } 9022 register_value = readl(&(h->cfgtable->TransportActive)); 9023 if (!(register_value & CFGTBL_Trans_Performant)) { 9024 dev_err(&h->pdev->dev, 9025 "performant mode problem - transport not active\n"); 9026 return -ENODEV; 9027 } 9028 /* Change the access methods to the performant access methods */ 9029 h->access = access; 9030 h->transMethod = transMethod; 9031 9032 if (!((trans_support & CFGTBL_Trans_io_accel1) || 9033 (trans_support & CFGTBL_Trans_io_accel2))) 9034 return 0; 9035 9036 if (trans_support & CFGTBL_Trans_io_accel1) { 9037 /* Set up I/O accelerator mode */ 9038 for (i = 0; i < h->nreply_queues; i++) { 9039 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); 9040 h->reply_queue[i].current_entry = 9041 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); 9042 } 9043 bft[7] = h->ioaccel_maxsg + 8; 9044 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, 9045 h->ioaccel1_blockFetchTable); 9046 9047 /* initialize all reply queue entries to unused */ 9048 for (i = 0; i < h->nreply_queues; i++) 9049 memset(h->reply_queue[i].head, 9050 (u8) IOACCEL_MODE1_REPLY_UNUSED, 9051 h->reply_queue_size); 9052 9053 /* set all the constant fields in the accelerator command 9054 * frames once at init time to save CPU cycles later. 9055 */ 9056 for (i = 0; i < h->nr_cmds; i++) { 9057 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; 9058 9059 cp->function = IOACCEL1_FUNCTION_SCSIIO; 9060 cp->err_info = (u32) (h->errinfo_pool_dhandle + 9061 (i * sizeof(struct ErrorInfo))); 9062 cp->err_info_len = sizeof(struct ErrorInfo); 9063 cp->sgl_offset = IOACCEL1_SGLOFFSET; 9064 cp->host_context_flags = 9065 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); 9066 cp->timeout_sec = 0; 9067 cp->ReplyQueue = 0; 9068 cp->tag = 9069 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); 9070 cp->host_addr = 9071 cpu_to_le64(h->ioaccel_cmd_pool_dhandle + 9072 (i * sizeof(struct io_accel1_cmd))); 9073 } 9074 } else if (trans_support & CFGTBL_Trans_io_accel2) { 9075 u64 cfg_offset, cfg_base_addr_index; 9076 u32 bft2_offset, cfg_base_addr; 9077 int rc; 9078 9079 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 9080 &cfg_base_addr_index, &cfg_offset); 9081 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); 9082 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; 9083 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, 9084 4, h->ioaccel2_blockFetchTable); 9085 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); 9086 BUILD_BUG_ON(offsetof(struct CfgTable, 9087 io_accel_request_size_offset) != 0xb8); 9088 h->ioaccel2_bft2_regs = 9089 remap_pci_mem(pci_resource_start(h->pdev, 9090 cfg_base_addr_index) + 9091 cfg_offset + bft2_offset, 9092 ARRAY_SIZE(bft2) * 9093 sizeof(*h->ioaccel2_bft2_regs)); 9094 for (i = 0; i < ARRAY_SIZE(bft2); i++) 9095 writel(bft2[i], &h->ioaccel2_bft2_regs[i]); 9096 } 9097 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 9098 if (hpsa_wait_for_mode_change_ack(h)) { 9099 dev_err(&h->pdev->dev, 9100 "performant mode problem - enabling ioaccel mode\n"); 9101 return -ENODEV; 9102 } 9103 return 0; 9104 } 9105 9106 /* Free ioaccel1 mode command blocks and block fetch table */ 9107 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) 9108 { 9109 if (h->ioaccel_cmd_pool) { 9110 pci_free_consistent(h->pdev, 9111 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 9112 h->ioaccel_cmd_pool, 9113 h->ioaccel_cmd_pool_dhandle); 9114 h->ioaccel_cmd_pool = NULL; 9115 h->ioaccel_cmd_pool_dhandle = 0; 9116 } 9117 kfree(h->ioaccel1_blockFetchTable); 9118 h->ioaccel1_blockFetchTable = NULL; 9119 } 9120 9121 /* Allocate ioaccel1 mode command blocks and block fetch table */ 9122 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) 9123 { 9124 h->ioaccel_maxsg = 9125 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 9126 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) 9127 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; 9128 9129 /* Command structures must be aligned on a 128-byte boundary 9130 * because the 7 lower bits of the address are used by the 9131 * hardware. 9132 */ 9133 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 9134 IOACCEL1_COMMANDLIST_ALIGNMENT); 9135 h->ioaccel_cmd_pool = 9136 pci_alloc_consistent(h->pdev, 9137 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 9138 &(h->ioaccel_cmd_pool_dhandle)); 9139 9140 h->ioaccel1_blockFetchTable = 9141 kmalloc(((h->ioaccel_maxsg + 1) * 9142 sizeof(u32)), GFP_KERNEL); 9143 9144 if ((h->ioaccel_cmd_pool == NULL) || 9145 (h->ioaccel1_blockFetchTable == NULL)) 9146 goto clean_up; 9147 9148 memset(h->ioaccel_cmd_pool, 0, 9149 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); 9150 return 0; 9151 9152 clean_up: 9153 hpsa_free_ioaccel1_cmd_and_bft(h); 9154 return -ENOMEM; 9155 } 9156 9157 /* Free ioaccel2 mode command blocks and block fetch table */ 9158 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) 9159 { 9160 hpsa_free_ioaccel2_sg_chain_blocks(h); 9161 9162 if (h->ioaccel2_cmd_pool) { 9163 pci_free_consistent(h->pdev, 9164 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 9165 h->ioaccel2_cmd_pool, 9166 h->ioaccel2_cmd_pool_dhandle); 9167 h->ioaccel2_cmd_pool = NULL; 9168 h->ioaccel2_cmd_pool_dhandle = 0; 9169 } 9170 kfree(h->ioaccel2_blockFetchTable); 9171 h->ioaccel2_blockFetchTable = NULL; 9172 } 9173 9174 /* Allocate ioaccel2 mode command blocks and block fetch table */ 9175 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) 9176 { 9177 int rc; 9178 9179 /* Allocate ioaccel2 mode command blocks and block fetch table */ 9180 9181 h->ioaccel_maxsg = 9182 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 9183 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 9184 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 9185 9186 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 9187 IOACCEL2_COMMANDLIST_ALIGNMENT); 9188 h->ioaccel2_cmd_pool = 9189 pci_alloc_consistent(h->pdev, 9190 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 9191 &(h->ioaccel2_cmd_pool_dhandle)); 9192 9193 h->ioaccel2_blockFetchTable = 9194 kmalloc(((h->ioaccel_maxsg + 1) * 9195 sizeof(u32)), GFP_KERNEL); 9196 9197 if ((h->ioaccel2_cmd_pool == NULL) || 9198 (h->ioaccel2_blockFetchTable == NULL)) { 9199 rc = -ENOMEM; 9200 goto clean_up; 9201 } 9202 9203 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); 9204 if (rc) 9205 goto clean_up; 9206 9207 memset(h->ioaccel2_cmd_pool, 0, 9208 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); 9209 return 0; 9210 9211 clean_up: 9212 hpsa_free_ioaccel2_cmd_and_bft(h); 9213 return rc; 9214 } 9215 9216 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */ 9217 static void hpsa_free_performant_mode(struct ctlr_info *h) 9218 { 9219 kfree(h->blockFetchTable); 9220 h->blockFetchTable = NULL; 9221 hpsa_free_reply_queues(h); 9222 hpsa_free_ioaccel1_cmd_and_bft(h); 9223 hpsa_free_ioaccel2_cmd_and_bft(h); 9224 } 9225 9226 /* return -ENODEV on error, 0 on success (or no action) 9227 * allocates numerous items that must be freed later 9228 */ 9229 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 9230 { 9231 u32 trans_support; 9232 unsigned long transMethod = CFGTBL_Trans_Performant | 9233 CFGTBL_Trans_use_short_tags; 9234 int i, rc; 9235 9236 if (hpsa_simple_mode) 9237 return 0; 9238 9239 trans_support = readl(&(h->cfgtable->TransportSupport)); 9240 if (!(trans_support & PERFORMANT_MODE)) 9241 return 0; 9242 9243 /* Check for I/O accelerator mode support */ 9244 if (trans_support & CFGTBL_Trans_io_accel1) { 9245 transMethod |= CFGTBL_Trans_io_accel1 | 9246 CFGTBL_Trans_enable_directed_msix; 9247 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); 9248 if (rc) 9249 return rc; 9250 } else if (trans_support & CFGTBL_Trans_io_accel2) { 9251 transMethod |= CFGTBL_Trans_io_accel2 | 9252 CFGTBL_Trans_enable_directed_msix; 9253 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); 9254 if (rc) 9255 return rc; 9256 } 9257 9258 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 9259 hpsa_get_max_perf_mode_cmds(h); 9260 /* Performant mode ring buffer and supporting data structures */ 9261 h->reply_queue_size = h->max_commands * sizeof(u64); 9262 9263 for (i = 0; i < h->nreply_queues; i++) { 9264 h->reply_queue[i].head = pci_alloc_consistent(h->pdev, 9265 h->reply_queue_size, 9266 &(h->reply_queue[i].busaddr)); 9267 if (!h->reply_queue[i].head) { 9268 rc = -ENOMEM; 9269 goto clean1; /* rq, ioaccel */ 9270 } 9271 h->reply_queue[i].size = h->max_commands; 9272 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 9273 h->reply_queue[i].current_entry = 0; 9274 } 9275 9276 /* Need a block fetch table for performant mode */ 9277 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 9278 sizeof(u32)), GFP_KERNEL); 9279 if (!h->blockFetchTable) { 9280 rc = -ENOMEM; 9281 goto clean1; /* rq, ioaccel */ 9282 } 9283 9284 rc = hpsa_enter_performant_mode(h, trans_support); 9285 if (rc) 9286 goto clean2; /* bft, rq, ioaccel */ 9287 return 0; 9288 9289 clean2: /* bft, rq, ioaccel */ 9290 kfree(h->blockFetchTable); 9291 h->blockFetchTable = NULL; 9292 clean1: /* rq, ioaccel */ 9293 hpsa_free_reply_queues(h); 9294 hpsa_free_ioaccel1_cmd_and_bft(h); 9295 hpsa_free_ioaccel2_cmd_and_bft(h); 9296 return rc; 9297 } 9298 9299 static int is_accelerated_cmd(struct CommandList *c) 9300 { 9301 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; 9302 } 9303 9304 static void hpsa_drain_accel_commands(struct ctlr_info *h) 9305 { 9306 struct CommandList *c = NULL; 9307 int i, accel_cmds_out; 9308 int refcount; 9309 9310 do { /* wait for all outstanding ioaccel commands to drain out */ 9311 accel_cmds_out = 0; 9312 for (i = 0; i < h->nr_cmds; i++) { 9313 c = h->cmd_pool + i; 9314 refcount = atomic_inc_return(&c->refcount); 9315 if (refcount > 1) /* Command is allocated */ 9316 accel_cmds_out += is_accelerated_cmd(c); 9317 cmd_free(h, c); 9318 } 9319 if (accel_cmds_out <= 0) 9320 break; 9321 msleep(100); 9322 } while (1); 9323 } 9324 9325 static struct hpsa_sas_phy *hpsa_alloc_sas_phy( 9326 struct hpsa_sas_port *hpsa_sas_port) 9327 { 9328 struct hpsa_sas_phy *hpsa_sas_phy; 9329 struct sas_phy *phy; 9330 9331 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL); 9332 if (!hpsa_sas_phy) 9333 return NULL; 9334 9335 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev, 9336 hpsa_sas_port->next_phy_index); 9337 if (!phy) { 9338 kfree(hpsa_sas_phy); 9339 return NULL; 9340 } 9341 9342 hpsa_sas_port->next_phy_index++; 9343 hpsa_sas_phy->phy = phy; 9344 hpsa_sas_phy->parent_port = hpsa_sas_port; 9345 9346 return hpsa_sas_phy; 9347 } 9348 9349 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) 9350 { 9351 struct sas_phy *phy = hpsa_sas_phy->phy; 9352 9353 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); 9354 sas_phy_free(phy); 9355 if (hpsa_sas_phy->added_to_port) 9356 list_del(&hpsa_sas_phy->phy_list_entry); 9357 kfree(hpsa_sas_phy); 9358 } 9359 9360 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy) 9361 { 9362 int rc; 9363 struct hpsa_sas_port *hpsa_sas_port; 9364 struct sas_phy *phy; 9365 struct sas_identify *identify; 9366 9367 hpsa_sas_port = hpsa_sas_phy->parent_port; 9368 phy = hpsa_sas_phy->phy; 9369 9370 identify = &phy->identify; 9371 memset(identify, 0, sizeof(*identify)); 9372 identify->sas_address = hpsa_sas_port->sas_address; 9373 identify->device_type = SAS_END_DEVICE; 9374 identify->initiator_port_protocols = SAS_PROTOCOL_STP; 9375 identify->target_port_protocols = SAS_PROTOCOL_STP; 9376 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; 9377 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; 9378 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; 9379 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; 9380 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 9381 9382 rc = sas_phy_add(hpsa_sas_phy->phy); 9383 if (rc) 9384 return rc; 9385 9386 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy); 9387 list_add_tail(&hpsa_sas_phy->phy_list_entry, 9388 &hpsa_sas_port->phy_list_head); 9389 hpsa_sas_phy->added_to_port = true; 9390 9391 return 0; 9392 } 9393 9394 static int 9395 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port, 9396 struct sas_rphy *rphy) 9397 { 9398 struct sas_identify *identify; 9399 9400 identify = &rphy->identify; 9401 identify->sas_address = hpsa_sas_port->sas_address; 9402 identify->initiator_port_protocols = SAS_PROTOCOL_STP; 9403 identify->target_port_protocols = SAS_PROTOCOL_STP; 9404 9405 return sas_rphy_add(rphy); 9406 } 9407 9408 static struct hpsa_sas_port 9409 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node, 9410 u64 sas_address) 9411 { 9412 int rc; 9413 struct hpsa_sas_port *hpsa_sas_port; 9414 struct sas_port *port; 9415 9416 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL); 9417 if (!hpsa_sas_port) 9418 return NULL; 9419 9420 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head); 9421 hpsa_sas_port->parent_node = hpsa_sas_node; 9422 9423 port = sas_port_alloc_num(hpsa_sas_node->parent_dev); 9424 if (!port) 9425 goto free_hpsa_port; 9426 9427 rc = sas_port_add(port); 9428 if (rc) 9429 goto free_sas_port; 9430 9431 hpsa_sas_port->port = port; 9432 hpsa_sas_port->sas_address = sas_address; 9433 list_add_tail(&hpsa_sas_port->port_list_entry, 9434 &hpsa_sas_node->port_list_head); 9435 9436 return hpsa_sas_port; 9437 9438 free_sas_port: 9439 sas_port_free(port); 9440 free_hpsa_port: 9441 kfree(hpsa_sas_port); 9442 9443 return NULL; 9444 } 9445 9446 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port) 9447 { 9448 struct hpsa_sas_phy *hpsa_sas_phy; 9449 struct hpsa_sas_phy *next; 9450 9451 list_for_each_entry_safe(hpsa_sas_phy, next, 9452 &hpsa_sas_port->phy_list_head, phy_list_entry) 9453 hpsa_free_sas_phy(hpsa_sas_phy); 9454 9455 sas_port_delete(hpsa_sas_port->port); 9456 list_del(&hpsa_sas_port->port_list_entry); 9457 kfree(hpsa_sas_port); 9458 } 9459 9460 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev) 9461 { 9462 struct hpsa_sas_node *hpsa_sas_node; 9463 9464 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL); 9465 if (hpsa_sas_node) { 9466 hpsa_sas_node->parent_dev = parent_dev; 9467 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head); 9468 } 9469 9470 return hpsa_sas_node; 9471 } 9472 9473 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node) 9474 { 9475 struct hpsa_sas_port *hpsa_sas_port; 9476 struct hpsa_sas_port *next; 9477 9478 if (!hpsa_sas_node) 9479 return; 9480 9481 list_for_each_entry_safe(hpsa_sas_port, next, 9482 &hpsa_sas_node->port_list_head, port_list_entry) 9483 hpsa_free_sas_port(hpsa_sas_port); 9484 9485 kfree(hpsa_sas_node); 9486 } 9487 9488 static struct hpsa_scsi_dev_t 9489 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, 9490 struct sas_rphy *rphy) 9491 { 9492 int i; 9493 struct hpsa_scsi_dev_t *device; 9494 9495 for (i = 0; i < h->ndevices; i++) { 9496 device = h->dev[i]; 9497 if (!device->sas_port) 9498 continue; 9499 if (device->sas_port->rphy == rphy) 9500 return device; 9501 } 9502 9503 return NULL; 9504 } 9505 9506 static int hpsa_add_sas_host(struct ctlr_info *h) 9507 { 9508 int rc; 9509 struct device *parent_dev; 9510 struct hpsa_sas_node *hpsa_sas_node; 9511 struct hpsa_sas_port *hpsa_sas_port; 9512 struct hpsa_sas_phy *hpsa_sas_phy; 9513 9514 parent_dev = &h->scsi_host->shost_gendev; 9515 9516 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev); 9517 if (!hpsa_sas_node) 9518 return -ENOMEM; 9519 9520 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address); 9521 if (!hpsa_sas_port) { 9522 rc = -ENODEV; 9523 goto free_sas_node; 9524 } 9525 9526 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port); 9527 if (!hpsa_sas_phy) { 9528 rc = -ENODEV; 9529 goto free_sas_port; 9530 } 9531 9532 rc = hpsa_sas_port_add_phy(hpsa_sas_phy); 9533 if (rc) 9534 goto free_sas_phy; 9535 9536 h->sas_host = hpsa_sas_node; 9537 9538 return 0; 9539 9540 free_sas_phy: 9541 hpsa_free_sas_phy(hpsa_sas_phy); 9542 free_sas_port: 9543 hpsa_free_sas_port(hpsa_sas_port); 9544 free_sas_node: 9545 hpsa_free_sas_node(hpsa_sas_node); 9546 9547 return rc; 9548 } 9549 9550 static void hpsa_delete_sas_host(struct ctlr_info *h) 9551 { 9552 hpsa_free_sas_node(h->sas_host); 9553 } 9554 9555 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, 9556 struct hpsa_scsi_dev_t *device) 9557 { 9558 int rc; 9559 struct hpsa_sas_port *hpsa_sas_port; 9560 struct sas_rphy *rphy; 9561 9562 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address); 9563 if (!hpsa_sas_port) 9564 return -ENOMEM; 9565 9566 rphy = sas_end_device_alloc(hpsa_sas_port->port); 9567 if (!rphy) { 9568 rc = -ENODEV; 9569 goto free_sas_port; 9570 } 9571 9572 hpsa_sas_port->rphy = rphy; 9573 device->sas_port = hpsa_sas_port; 9574 9575 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); 9576 if (rc) 9577 goto free_sas_port; 9578 9579 return 0; 9580 9581 free_sas_port: 9582 hpsa_free_sas_port(hpsa_sas_port); 9583 device->sas_port = NULL; 9584 9585 return rc; 9586 } 9587 9588 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device) 9589 { 9590 if (device->sas_port) { 9591 hpsa_free_sas_port(device->sas_port); 9592 device->sas_port = NULL; 9593 } 9594 } 9595 9596 static int 9597 hpsa_sas_get_linkerrors(struct sas_phy *phy) 9598 { 9599 return 0; 9600 } 9601 9602 static int 9603 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 9604 { 9605 return 0; 9606 } 9607 9608 static int 9609 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy) 9610 { 9611 return -ENXIO; 9612 } 9613 9614 static int 9615 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset) 9616 { 9617 return 0; 9618 } 9619 9620 static int 9621 hpsa_sas_phy_enable(struct sas_phy *phy, int enable) 9622 { 9623 return 0; 9624 } 9625 9626 static int 9627 hpsa_sas_phy_setup(struct sas_phy *phy) 9628 { 9629 return 0; 9630 } 9631 9632 static void 9633 hpsa_sas_phy_release(struct sas_phy *phy) 9634 { 9635 } 9636 9637 static int 9638 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) 9639 { 9640 return -EINVAL; 9641 } 9642 9643 /* SMP = Serial Management Protocol */ 9644 static int 9645 hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, 9646 struct request *req) 9647 { 9648 return -EINVAL; 9649 } 9650 9651 static struct sas_function_template hpsa_sas_transport_functions = { 9652 .get_linkerrors = hpsa_sas_get_linkerrors, 9653 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier, 9654 .get_bay_identifier = hpsa_sas_get_bay_identifier, 9655 .phy_reset = hpsa_sas_phy_reset, 9656 .phy_enable = hpsa_sas_phy_enable, 9657 .phy_setup = hpsa_sas_phy_setup, 9658 .phy_release = hpsa_sas_phy_release, 9659 .set_phy_speed = hpsa_sas_phy_speed, 9660 .smp_handler = hpsa_sas_smp_handler, 9661 }; 9662 9663 /* 9664 * This is it. Register the PCI driver information for the cards we control 9665 * the OS will call our registered routines when it finds one of our cards. 9666 */ 9667 static int __init hpsa_init(void) 9668 { 9669 int rc; 9670 9671 hpsa_sas_transport_template = 9672 sas_attach_transport(&hpsa_sas_transport_functions); 9673 if (!hpsa_sas_transport_template) 9674 return -ENODEV; 9675 9676 rc = pci_register_driver(&hpsa_pci_driver); 9677 9678 if (rc) 9679 sas_release_transport(hpsa_sas_transport_template); 9680 9681 return rc; 9682 } 9683 9684 static void __exit hpsa_cleanup(void) 9685 { 9686 pci_unregister_driver(&hpsa_pci_driver); 9687 sas_release_transport(hpsa_sas_transport_template); 9688 } 9689 9690 static void __attribute__((unused)) verify_offsets(void) 9691 { 9692 #define VERIFY_OFFSET(member, offset) \ 9693 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) 9694 9695 VERIFY_OFFSET(structure_size, 0); 9696 VERIFY_OFFSET(volume_blk_size, 4); 9697 VERIFY_OFFSET(volume_blk_cnt, 8); 9698 VERIFY_OFFSET(phys_blk_shift, 16); 9699 VERIFY_OFFSET(parity_rotation_shift, 17); 9700 VERIFY_OFFSET(strip_size, 18); 9701 VERIFY_OFFSET(disk_starting_blk, 20); 9702 VERIFY_OFFSET(disk_blk_cnt, 28); 9703 VERIFY_OFFSET(data_disks_per_row, 36); 9704 VERIFY_OFFSET(metadata_disks_per_row, 38); 9705 VERIFY_OFFSET(row_cnt, 40); 9706 VERIFY_OFFSET(layout_map_count, 42); 9707 VERIFY_OFFSET(flags, 44); 9708 VERIFY_OFFSET(dekindex, 46); 9709 /* VERIFY_OFFSET(reserved, 48 */ 9710 VERIFY_OFFSET(data, 64); 9711 9712 #undef VERIFY_OFFSET 9713 9714 #define VERIFY_OFFSET(member, offset) \ 9715 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) 9716 9717 VERIFY_OFFSET(IU_type, 0); 9718 VERIFY_OFFSET(direction, 1); 9719 VERIFY_OFFSET(reply_queue, 2); 9720 /* VERIFY_OFFSET(reserved1, 3); */ 9721 VERIFY_OFFSET(scsi_nexus, 4); 9722 VERIFY_OFFSET(Tag, 8); 9723 VERIFY_OFFSET(cdb, 16); 9724 VERIFY_OFFSET(cciss_lun, 32); 9725 VERIFY_OFFSET(data_len, 40); 9726 VERIFY_OFFSET(cmd_priority_task_attr, 44); 9727 VERIFY_OFFSET(sg_count, 45); 9728 /* VERIFY_OFFSET(reserved3 */ 9729 VERIFY_OFFSET(err_ptr, 48); 9730 VERIFY_OFFSET(err_len, 56); 9731 /* VERIFY_OFFSET(reserved4 */ 9732 VERIFY_OFFSET(sg, 64); 9733 9734 #undef VERIFY_OFFSET 9735 9736 #define VERIFY_OFFSET(member, offset) \ 9737 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) 9738 9739 VERIFY_OFFSET(dev_handle, 0x00); 9740 VERIFY_OFFSET(reserved1, 0x02); 9741 VERIFY_OFFSET(function, 0x03); 9742 VERIFY_OFFSET(reserved2, 0x04); 9743 VERIFY_OFFSET(err_info, 0x0C); 9744 VERIFY_OFFSET(reserved3, 0x10); 9745 VERIFY_OFFSET(err_info_len, 0x12); 9746 VERIFY_OFFSET(reserved4, 0x13); 9747 VERIFY_OFFSET(sgl_offset, 0x14); 9748 VERIFY_OFFSET(reserved5, 0x15); 9749 VERIFY_OFFSET(transfer_len, 0x1C); 9750 VERIFY_OFFSET(reserved6, 0x20); 9751 VERIFY_OFFSET(io_flags, 0x24); 9752 VERIFY_OFFSET(reserved7, 0x26); 9753 VERIFY_OFFSET(LUN, 0x34); 9754 VERIFY_OFFSET(control, 0x3C); 9755 VERIFY_OFFSET(CDB, 0x40); 9756 VERIFY_OFFSET(reserved8, 0x50); 9757 VERIFY_OFFSET(host_context_flags, 0x60); 9758 VERIFY_OFFSET(timeout_sec, 0x62); 9759 VERIFY_OFFSET(ReplyQueue, 0x64); 9760 VERIFY_OFFSET(reserved9, 0x65); 9761 VERIFY_OFFSET(tag, 0x68); 9762 VERIFY_OFFSET(host_addr, 0x70); 9763 VERIFY_OFFSET(CISS_LUN, 0x78); 9764 VERIFY_OFFSET(SG, 0x78 + 8); 9765 #undef VERIFY_OFFSET 9766 } 9767 9768 module_init(hpsa_init); 9769 module_exit(hpsa_cleanup); 9770