1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/pci-aspm.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/delay.h> 30 #include <linux/fs.h> 31 #include <linux/timer.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/compat.h> 35 #include <linux/blktrace_api.h> 36 #include <linux/uaccess.h> 37 #include <linux/io.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/completion.h> 40 #include <linux/moduleparam.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_tcq.h> 46 #include <linux/cciss_ioctl.h> 47 #include <linux/string.h> 48 #include <linux/bitmap.h> 49 #include <linux/atomic.h> 50 #include <linux/jiffies.h> 51 #include <linux/percpu-defs.h> 52 #include <linux/percpu.h> 53 #include <asm/unaligned.h> 54 #include <asm/div64.h> 55 #include "hpsa_cmd.h" 56 #include "hpsa.h" 57 58 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 59 #define HPSA_DRIVER_VERSION "3.4.4-1" 60 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 61 #define HPSA "hpsa" 62 63 /* How long to wait (in milliseconds) for board to go into simple mode */ 64 #define MAX_CONFIG_WAIT 30000 65 #define MAX_IOCTL_CONFIG_WAIT 1000 66 67 /*define how many times we will try a command because of bus resets */ 68 #define MAX_CMD_RETRIES 3 69 70 /* Embedded module documentation macros - see modules.h */ 71 MODULE_AUTHOR("Hewlett-Packard Company"); 72 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 73 HPSA_DRIVER_VERSION); 74 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 75 MODULE_VERSION(HPSA_DRIVER_VERSION); 76 MODULE_LICENSE("GPL"); 77 78 static int hpsa_allow_any; 79 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 80 MODULE_PARM_DESC(hpsa_allow_any, 81 "Allow hpsa driver to access unknown HP Smart Array hardware"); 82 static int hpsa_simple_mode; 83 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 84 MODULE_PARM_DESC(hpsa_simple_mode, 85 "Use 'simple mode' rather than 'performant mode'"); 86 87 /* define the PCI info for the cards we can control */ 88 static const struct pci_device_id hpsa_pci_device_id[] = { 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, 112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, 113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, 114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, 115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, 116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, 117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, 118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, 119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, 120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, 121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, 125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, 126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, 127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, 128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, 129 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, 130 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, 131 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, 132 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, 133 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, 134 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 135 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 136 {0,} 137 }; 138 139 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 140 141 /* board_id = Subsystem Device ID & Vendor ID 142 * product = Marketing Name for the board 143 * access = Address of the struct of function pointers 144 */ 145 static struct board_type products[] = { 146 {0x3241103C, "Smart Array P212", &SA5_access}, 147 {0x3243103C, "Smart Array P410", &SA5_access}, 148 {0x3245103C, "Smart Array P410i", &SA5_access}, 149 {0x3247103C, "Smart Array P411", &SA5_access}, 150 {0x3249103C, "Smart Array P812", &SA5_access}, 151 {0x324A103C, "Smart Array P712m", &SA5_access}, 152 {0x324B103C, "Smart Array P711m", &SA5_access}, 153 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ 154 {0x3350103C, "Smart Array P222", &SA5_access}, 155 {0x3351103C, "Smart Array P420", &SA5_access}, 156 {0x3352103C, "Smart Array P421", &SA5_access}, 157 {0x3353103C, "Smart Array P822", &SA5_access}, 158 {0x3354103C, "Smart Array P420i", &SA5_access}, 159 {0x3355103C, "Smart Array P220i", &SA5_access}, 160 {0x3356103C, "Smart Array P721m", &SA5_access}, 161 {0x1921103C, "Smart Array P830i", &SA5_access}, 162 {0x1922103C, "Smart Array P430", &SA5_access}, 163 {0x1923103C, "Smart Array P431", &SA5_access}, 164 {0x1924103C, "Smart Array P830", &SA5_access}, 165 {0x1926103C, "Smart Array P731m", &SA5_access}, 166 {0x1928103C, "Smart Array P230i", &SA5_access}, 167 {0x1929103C, "Smart Array P530", &SA5_access}, 168 {0x21BD103C, "Smart Array", &SA5_access}, 169 {0x21BE103C, "Smart Array", &SA5_access}, 170 {0x21BF103C, "Smart Array", &SA5_access}, 171 {0x21C0103C, "Smart Array", &SA5_access}, 172 {0x21C1103C, "Smart Array", &SA5_access}, 173 {0x21C2103C, "Smart Array", &SA5_access}, 174 {0x21C3103C, "Smart Array", &SA5_access}, 175 {0x21C4103C, "Smart Array", &SA5_access}, 176 {0x21C5103C, "Smart Array", &SA5_access}, 177 {0x21C6103C, "Smart Array", &SA5_access}, 178 {0x21C7103C, "Smart Array", &SA5_access}, 179 {0x21C8103C, "Smart Array", &SA5_access}, 180 {0x21C9103C, "Smart Array", &SA5_access}, 181 {0x21CA103C, "Smart Array", &SA5_access}, 182 {0x21CB103C, "Smart Array", &SA5_access}, 183 {0x21CC103C, "Smart Array", &SA5_access}, 184 {0x21CD103C, "Smart Array", &SA5_access}, 185 {0x21CE103C, "Smart Array", &SA5_access}, 186 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, 187 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, 188 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, 189 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, 190 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, 191 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 192 }; 193 194 static int number_of_controllers; 195 196 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 197 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 198 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 199 static void lock_and_start_io(struct ctlr_info *h); 200 static void start_io(struct ctlr_info *h, unsigned long *flags); 201 202 #ifdef CONFIG_COMPAT 203 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, 204 void __user *arg); 205 #endif 206 207 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 208 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); 209 static struct CommandList *cmd_alloc(struct ctlr_info *h); 210 static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 211 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 212 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 213 int cmd_type); 214 #define VPD_PAGE (1 << 8) 215 216 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 217 static void hpsa_scan_start(struct Scsi_Host *); 218 static int hpsa_scan_finished(struct Scsi_Host *sh, 219 unsigned long elapsed_time); 220 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); 221 222 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 223 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); 224 static int hpsa_slave_alloc(struct scsi_device *sdev); 225 static void hpsa_slave_destroy(struct scsi_device *sdev); 226 227 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 228 static int check_for_unit_attention(struct ctlr_info *h, 229 struct CommandList *c); 230 static void check_ioctl_unit_attention(struct ctlr_info *h, 231 struct CommandList *c); 232 /* performant mode helper functions */ 233 static void calc_bucket_map(int *bucket, int num_buckets, 234 int nsgs, int min_blocks, u32 *bucket_map); 235 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 236 static inline u32 next_command(struct ctlr_info *h, u8 q); 237 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 238 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 239 u64 *cfg_offset); 240 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 241 unsigned long *memory_bar); 242 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 243 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 244 int wait_for_ready); 245 static inline void finish_cmd(struct CommandList *c); 246 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h); 247 #define BOARD_NOT_READY 0 248 #define BOARD_READY 1 249 static void hpsa_drain_accel_commands(struct ctlr_info *h); 250 static void hpsa_flush_cache(struct ctlr_info *h); 251 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 252 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 253 u8 *scsi3addr); 254 255 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 256 { 257 unsigned long *priv = shost_priv(sdev->host); 258 return (struct ctlr_info *) *priv; 259 } 260 261 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 262 { 263 unsigned long *priv = shost_priv(sh); 264 return (struct ctlr_info *) *priv; 265 } 266 267 static int check_for_unit_attention(struct ctlr_info *h, 268 struct CommandList *c) 269 { 270 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 271 return 0; 272 273 switch (c->err_info->SenseInfo[12]) { 274 case STATE_CHANGED: 275 dev_warn(&h->pdev->dev, HPSA "%d: a state change " 276 "detected, command retried\n", h->ctlr); 277 break; 278 case LUN_FAILED: 279 dev_warn(&h->pdev->dev, 280 HPSA "%d: LUN failure detected\n", h->ctlr); 281 break; 282 case REPORT_LUNS_CHANGED: 283 dev_warn(&h->pdev->dev, 284 HPSA "%d: report LUN data changed\n", h->ctlr); 285 /* 286 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 287 * target (array) devices. 288 */ 289 break; 290 case POWER_OR_RESET: 291 dev_warn(&h->pdev->dev, HPSA "%d: a power on " 292 "or device reset detected\n", h->ctlr); 293 break; 294 case UNIT_ATTENTION_CLEARED: 295 dev_warn(&h->pdev->dev, HPSA "%d: unit attention " 296 "cleared by another initiator\n", h->ctlr); 297 break; 298 default: 299 dev_warn(&h->pdev->dev, HPSA "%d: unknown " 300 "unit attention detected\n", h->ctlr); 301 break; 302 } 303 return 1; 304 } 305 306 static int check_for_busy(struct ctlr_info *h, struct CommandList *c) 307 { 308 if (c->err_info->CommandStatus != CMD_TARGET_STATUS || 309 (c->err_info->ScsiStatus != SAM_STAT_BUSY && 310 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) 311 return 0; 312 dev_warn(&h->pdev->dev, HPSA "device busy"); 313 return 1; 314 } 315 316 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, 317 struct device_attribute *attr, 318 const char *buf, size_t count) 319 { 320 int status, len; 321 struct ctlr_info *h; 322 struct Scsi_Host *shost = class_to_shost(dev); 323 char tmpbuf[10]; 324 325 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 326 return -EACCES; 327 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 328 strncpy(tmpbuf, buf, len); 329 tmpbuf[len] = '\0'; 330 if (sscanf(tmpbuf, "%d", &status) != 1) 331 return -EINVAL; 332 h = shost_to_hba(shost); 333 h->acciopath_status = !!status; 334 dev_warn(&h->pdev->dev, 335 "hpsa: HP SSD Smart Path %s via sysfs update.\n", 336 h->acciopath_status ? "enabled" : "disabled"); 337 return count; 338 } 339 340 static ssize_t host_store_raid_offload_debug(struct device *dev, 341 struct device_attribute *attr, 342 const char *buf, size_t count) 343 { 344 int debug_level, len; 345 struct ctlr_info *h; 346 struct Scsi_Host *shost = class_to_shost(dev); 347 char tmpbuf[10]; 348 349 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 350 return -EACCES; 351 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 352 strncpy(tmpbuf, buf, len); 353 tmpbuf[len] = '\0'; 354 if (sscanf(tmpbuf, "%d", &debug_level) != 1) 355 return -EINVAL; 356 if (debug_level < 0) 357 debug_level = 0; 358 h = shost_to_hba(shost); 359 h->raid_offload_debug = debug_level; 360 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", 361 h->raid_offload_debug); 362 return count; 363 } 364 365 static ssize_t host_store_rescan(struct device *dev, 366 struct device_attribute *attr, 367 const char *buf, size_t count) 368 { 369 struct ctlr_info *h; 370 struct Scsi_Host *shost = class_to_shost(dev); 371 h = shost_to_hba(shost); 372 hpsa_scan_start(h->scsi_host); 373 return count; 374 } 375 376 static ssize_t host_show_firmware_revision(struct device *dev, 377 struct device_attribute *attr, char *buf) 378 { 379 struct ctlr_info *h; 380 struct Scsi_Host *shost = class_to_shost(dev); 381 unsigned char *fwrev; 382 383 h = shost_to_hba(shost); 384 if (!h->hba_inquiry_data) 385 return 0; 386 fwrev = &h->hba_inquiry_data[32]; 387 return snprintf(buf, 20, "%c%c%c%c\n", 388 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 389 } 390 391 static ssize_t host_show_commands_outstanding(struct device *dev, 392 struct device_attribute *attr, char *buf) 393 { 394 struct Scsi_Host *shost = class_to_shost(dev); 395 struct ctlr_info *h = shost_to_hba(shost); 396 397 return snprintf(buf, 20, "%d\n", 398 atomic_read(&h->commands_outstanding)); 399 } 400 401 static ssize_t host_show_transport_mode(struct device *dev, 402 struct device_attribute *attr, char *buf) 403 { 404 struct ctlr_info *h; 405 struct Scsi_Host *shost = class_to_shost(dev); 406 407 h = shost_to_hba(shost); 408 return snprintf(buf, 20, "%s\n", 409 h->transMethod & CFGTBL_Trans_Performant ? 410 "performant" : "simple"); 411 } 412 413 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, 414 struct device_attribute *attr, char *buf) 415 { 416 struct ctlr_info *h; 417 struct Scsi_Host *shost = class_to_shost(dev); 418 419 h = shost_to_hba(shost); 420 return snprintf(buf, 30, "HP SSD Smart Path %s\n", 421 (h->acciopath_status == 1) ? "enabled" : "disabled"); 422 } 423 424 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 425 static u32 unresettable_controller[] = { 426 0x324a103C, /* Smart Array P712m */ 427 0x324b103C, /* SmartArray P711m */ 428 0x3223103C, /* Smart Array P800 */ 429 0x3234103C, /* Smart Array P400 */ 430 0x3235103C, /* Smart Array P400i */ 431 0x3211103C, /* Smart Array E200i */ 432 0x3212103C, /* Smart Array E200 */ 433 0x3213103C, /* Smart Array E200i */ 434 0x3214103C, /* Smart Array E200i */ 435 0x3215103C, /* Smart Array E200i */ 436 0x3237103C, /* Smart Array E500 */ 437 0x323D103C, /* Smart Array P700m */ 438 0x40800E11, /* Smart Array 5i */ 439 0x409C0E11, /* Smart Array 6400 */ 440 0x409D0E11, /* Smart Array 6400 EM */ 441 0x40700E11, /* Smart Array 5300 */ 442 0x40820E11, /* Smart Array 532 */ 443 0x40830E11, /* Smart Array 5312 */ 444 0x409A0E11, /* Smart Array 641 */ 445 0x409B0E11, /* Smart Array 642 */ 446 0x40910E11, /* Smart Array 6i */ 447 }; 448 449 /* List of controllers which cannot even be soft reset */ 450 static u32 soft_unresettable_controller[] = { 451 0x40800E11, /* Smart Array 5i */ 452 0x40700E11, /* Smart Array 5300 */ 453 0x40820E11, /* Smart Array 532 */ 454 0x40830E11, /* Smart Array 5312 */ 455 0x409A0E11, /* Smart Array 641 */ 456 0x409B0E11, /* Smart Array 642 */ 457 0x40910E11, /* Smart Array 6i */ 458 /* Exclude 640x boards. These are two pci devices in one slot 459 * which share a battery backed cache module. One controls the 460 * cache, the other accesses the cache through the one that controls 461 * it. If we reset the one controlling the cache, the other will 462 * likely not be happy. Just forbid resetting this conjoined mess. 463 * The 640x isn't really supported by hpsa anyway. 464 */ 465 0x409C0E11, /* Smart Array 6400 */ 466 0x409D0E11, /* Smart Array 6400 EM */ 467 }; 468 469 static int ctlr_is_hard_resettable(u32 board_id) 470 { 471 int i; 472 473 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 474 if (unresettable_controller[i] == board_id) 475 return 0; 476 return 1; 477 } 478 479 static int ctlr_is_soft_resettable(u32 board_id) 480 { 481 int i; 482 483 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 484 if (soft_unresettable_controller[i] == board_id) 485 return 0; 486 return 1; 487 } 488 489 static int ctlr_is_resettable(u32 board_id) 490 { 491 return ctlr_is_hard_resettable(board_id) || 492 ctlr_is_soft_resettable(board_id); 493 } 494 495 static ssize_t host_show_resettable(struct device *dev, 496 struct device_attribute *attr, char *buf) 497 { 498 struct ctlr_info *h; 499 struct Scsi_Host *shost = class_to_shost(dev); 500 501 h = shost_to_hba(shost); 502 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 503 } 504 505 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 506 { 507 return (scsi3addr[3] & 0xC0) == 0x40; 508 } 509 510 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", 511 "1(+0)ADM", "UNKNOWN" 512 }; 513 #define HPSA_RAID_0 0 514 #define HPSA_RAID_4 1 515 #define HPSA_RAID_1 2 /* also used for RAID 10 */ 516 #define HPSA_RAID_5 3 /* also used for RAID 50 */ 517 #define HPSA_RAID_51 4 518 #define HPSA_RAID_6 5 /* also used for RAID 60 */ 519 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 520 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 521 522 static ssize_t raid_level_show(struct device *dev, 523 struct device_attribute *attr, char *buf) 524 { 525 ssize_t l = 0; 526 unsigned char rlevel; 527 struct ctlr_info *h; 528 struct scsi_device *sdev; 529 struct hpsa_scsi_dev_t *hdev; 530 unsigned long flags; 531 532 sdev = to_scsi_device(dev); 533 h = sdev_to_hba(sdev); 534 spin_lock_irqsave(&h->lock, flags); 535 hdev = sdev->hostdata; 536 if (!hdev) { 537 spin_unlock_irqrestore(&h->lock, flags); 538 return -ENODEV; 539 } 540 541 /* Is this even a logical drive? */ 542 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 543 spin_unlock_irqrestore(&h->lock, flags); 544 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 545 return l; 546 } 547 548 rlevel = hdev->raid_level; 549 spin_unlock_irqrestore(&h->lock, flags); 550 if (rlevel > RAID_UNKNOWN) 551 rlevel = RAID_UNKNOWN; 552 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 553 return l; 554 } 555 556 static ssize_t lunid_show(struct device *dev, 557 struct device_attribute *attr, char *buf) 558 { 559 struct ctlr_info *h; 560 struct scsi_device *sdev; 561 struct hpsa_scsi_dev_t *hdev; 562 unsigned long flags; 563 unsigned char lunid[8]; 564 565 sdev = to_scsi_device(dev); 566 h = sdev_to_hba(sdev); 567 spin_lock_irqsave(&h->lock, flags); 568 hdev = sdev->hostdata; 569 if (!hdev) { 570 spin_unlock_irqrestore(&h->lock, flags); 571 return -ENODEV; 572 } 573 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 574 spin_unlock_irqrestore(&h->lock, flags); 575 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 576 lunid[0], lunid[1], lunid[2], lunid[3], 577 lunid[4], lunid[5], lunid[6], lunid[7]); 578 } 579 580 static ssize_t unique_id_show(struct device *dev, 581 struct device_attribute *attr, char *buf) 582 { 583 struct ctlr_info *h; 584 struct scsi_device *sdev; 585 struct hpsa_scsi_dev_t *hdev; 586 unsigned long flags; 587 unsigned char sn[16]; 588 589 sdev = to_scsi_device(dev); 590 h = sdev_to_hba(sdev); 591 spin_lock_irqsave(&h->lock, flags); 592 hdev = sdev->hostdata; 593 if (!hdev) { 594 spin_unlock_irqrestore(&h->lock, flags); 595 return -ENODEV; 596 } 597 memcpy(sn, hdev->device_id, sizeof(sn)); 598 spin_unlock_irqrestore(&h->lock, flags); 599 return snprintf(buf, 16 * 2 + 2, 600 "%02X%02X%02X%02X%02X%02X%02X%02X" 601 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 602 sn[0], sn[1], sn[2], sn[3], 603 sn[4], sn[5], sn[6], sn[7], 604 sn[8], sn[9], sn[10], sn[11], 605 sn[12], sn[13], sn[14], sn[15]); 606 } 607 608 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, 609 struct device_attribute *attr, char *buf) 610 { 611 struct ctlr_info *h; 612 struct scsi_device *sdev; 613 struct hpsa_scsi_dev_t *hdev; 614 unsigned long flags; 615 int offload_enabled; 616 617 sdev = to_scsi_device(dev); 618 h = sdev_to_hba(sdev); 619 spin_lock_irqsave(&h->lock, flags); 620 hdev = sdev->hostdata; 621 if (!hdev) { 622 spin_unlock_irqrestore(&h->lock, flags); 623 return -ENODEV; 624 } 625 offload_enabled = hdev->offload_enabled; 626 spin_unlock_irqrestore(&h->lock, flags); 627 return snprintf(buf, 20, "%d\n", offload_enabled); 628 } 629 630 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 631 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 632 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 633 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 634 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, 635 host_show_hp_ssd_smart_path_enabled, NULL); 636 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, 637 host_show_hp_ssd_smart_path_status, 638 host_store_hp_ssd_smart_path_status); 639 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, 640 host_store_raid_offload_debug); 641 static DEVICE_ATTR(firmware_revision, S_IRUGO, 642 host_show_firmware_revision, NULL); 643 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 644 host_show_commands_outstanding, NULL); 645 static DEVICE_ATTR(transport_mode, S_IRUGO, 646 host_show_transport_mode, NULL); 647 static DEVICE_ATTR(resettable, S_IRUGO, 648 host_show_resettable, NULL); 649 650 static struct device_attribute *hpsa_sdev_attrs[] = { 651 &dev_attr_raid_level, 652 &dev_attr_lunid, 653 &dev_attr_unique_id, 654 &dev_attr_hp_ssd_smart_path_enabled, 655 NULL, 656 }; 657 658 static struct device_attribute *hpsa_shost_attrs[] = { 659 &dev_attr_rescan, 660 &dev_attr_firmware_revision, 661 &dev_attr_commands_outstanding, 662 &dev_attr_transport_mode, 663 &dev_attr_resettable, 664 &dev_attr_hp_ssd_smart_path_status, 665 &dev_attr_raid_offload_debug, 666 NULL, 667 }; 668 669 static struct scsi_host_template hpsa_driver_template = { 670 .module = THIS_MODULE, 671 .name = HPSA, 672 .proc_name = HPSA, 673 .queuecommand = hpsa_scsi_queue_command, 674 .scan_start = hpsa_scan_start, 675 .scan_finished = hpsa_scan_finished, 676 .change_queue_depth = hpsa_change_queue_depth, 677 .this_id = -1, 678 .use_clustering = ENABLE_CLUSTERING, 679 .eh_abort_handler = hpsa_eh_abort_handler, 680 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 681 .ioctl = hpsa_ioctl, 682 .slave_alloc = hpsa_slave_alloc, 683 .slave_destroy = hpsa_slave_destroy, 684 #ifdef CONFIG_COMPAT 685 .compat_ioctl = hpsa_compat_ioctl, 686 #endif 687 .sdev_attrs = hpsa_sdev_attrs, 688 .shost_attrs = hpsa_shost_attrs, 689 .max_sectors = 8192, 690 .no_write_same = 1, 691 }; 692 693 694 /* Enqueuing and dequeuing functions for cmdlists. */ 695 static inline void addQ(struct list_head *list, struct CommandList *c) 696 { 697 list_add_tail(&c->list, list); 698 } 699 700 static inline u32 next_command(struct ctlr_info *h, u8 q) 701 { 702 u32 a; 703 struct reply_queue_buffer *rq = &h->reply_queue[q]; 704 705 if (h->transMethod & CFGTBL_Trans_io_accel1) 706 return h->access.command_completed(h, q); 707 708 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 709 return h->access.command_completed(h, q); 710 711 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 712 a = rq->head[rq->current_entry]; 713 rq->current_entry++; 714 atomic_dec(&h->commands_outstanding); 715 } else { 716 a = FIFO_EMPTY; 717 } 718 /* Check for wraparound */ 719 if (rq->current_entry == h->max_commands) { 720 rq->current_entry = 0; 721 rq->wraparound ^= 1; 722 } 723 return a; 724 } 725 726 /* 727 * There are some special bits in the bus address of the 728 * command that we have to set for the controller to know 729 * how to process the command: 730 * 731 * Normal performant mode: 732 * bit 0: 1 means performant mode, 0 means simple mode. 733 * bits 1-3 = block fetch table entry 734 * bits 4-6 = command type (== 0) 735 * 736 * ioaccel1 mode: 737 * bit 0 = "performant mode" bit. 738 * bits 1-3 = block fetch table entry 739 * bits 4-6 = command type (== 110) 740 * (command type is needed because ioaccel1 mode 741 * commands are submitted through the same register as normal 742 * mode commands, so this is how the controller knows whether 743 * the command is normal mode or ioaccel1 mode.) 744 * 745 * ioaccel2 mode: 746 * bit 0 = "performant mode" bit. 747 * bits 1-4 = block fetch table entry (note extra bit) 748 * bits 4-6 = not needed, because ioaccel2 mode has 749 * a separate special register for submitting commands. 750 */ 751 752 /* set_performant_mode: Modify the tag for cciss performant 753 * set bit 0 for pull model, bits 3-1 for block fetch 754 * register number 755 */ 756 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 757 { 758 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 759 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 760 if (likely(h->msix_vector > 0)) 761 c->Header.ReplyQueue = 762 raw_smp_processor_id() % h->nreply_queues; 763 } 764 } 765 766 static void set_ioaccel1_performant_mode(struct ctlr_info *h, 767 struct CommandList *c) 768 { 769 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 770 771 /* Tell the controller to post the reply to the queue for this 772 * processor. This seems to give the best I/O throughput. 773 */ 774 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; 775 /* Set the bits in the address sent down to include: 776 * - performant mode bit (bit 0) 777 * - pull count (bits 1-3) 778 * - command type (bits 4-6) 779 */ 780 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | 781 IOACCEL1_BUSADDR_CMDTYPE; 782 } 783 784 static void set_ioaccel2_performant_mode(struct ctlr_info *h, 785 struct CommandList *c) 786 { 787 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 788 789 /* Tell the controller to post the reply to the queue for this 790 * processor. This seems to give the best I/O throughput. 791 */ 792 cp->reply_queue = smp_processor_id() % h->nreply_queues; 793 /* Set the bits in the address sent down to include: 794 * - performant mode bit not used in ioaccel mode 2 795 * - pull count (bits 0-3) 796 * - command type isn't needed for ioaccel2 797 */ 798 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); 799 } 800 801 static int is_firmware_flash_cmd(u8 *cdb) 802 { 803 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; 804 } 805 806 /* 807 * During firmware flash, the heartbeat register may not update as frequently 808 * as it should. So we dial down lockup detection during firmware flash. and 809 * dial it back up when firmware flash completes. 810 */ 811 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 812 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) 813 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 814 struct CommandList *c) 815 { 816 if (!is_firmware_flash_cmd(c->Request.CDB)) 817 return; 818 atomic_inc(&h->firmware_flash_in_progress); 819 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; 820 } 821 822 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, 823 struct CommandList *c) 824 { 825 if (is_firmware_flash_cmd(c->Request.CDB) && 826 atomic_dec_and_test(&h->firmware_flash_in_progress)) 827 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 828 } 829 830 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 831 struct CommandList *c) 832 { 833 unsigned long flags; 834 835 switch (c->cmd_type) { 836 case CMD_IOACCEL1: 837 set_ioaccel1_performant_mode(h, c); 838 break; 839 case CMD_IOACCEL2: 840 set_ioaccel2_performant_mode(h, c); 841 break; 842 default: 843 set_performant_mode(h, c); 844 } 845 dial_down_lockup_detection_during_fw_flash(h, c); 846 spin_lock_irqsave(&h->lock, flags); 847 addQ(&h->reqQ, c); 848 h->Qdepth++; 849 start_io(h, &flags); 850 spin_unlock_irqrestore(&h->lock, flags); 851 } 852 853 static inline void removeQ(struct CommandList *c) 854 { 855 if (WARN_ON(list_empty(&c->list))) 856 return; 857 list_del_init(&c->list); 858 } 859 860 static inline int is_hba_lunid(unsigned char scsi3addr[]) 861 { 862 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 863 } 864 865 static inline int is_scsi_rev_5(struct ctlr_info *h) 866 { 867 if (!h->hba_inquiry_data) 868 return 0; 869 if ((h->hba_inquiry_data[2] & 0x07) == 5) 870 return 1; 871 return 0; 872 } 873 874 static int hpsa_find_target_lun(struct ctlr_info *h, 875 unsigned char scsi3addr[], int bus, int *target, int *lun) 876 { 877 /* finds an unused bus, target, lun for a new physical device 878 * assumes h->devlock is held 879 */ 880 int i, found = 0; 881 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 882 883 bitmap_zero(lun_taken, HPSA_MAX_DEVICES); 884 885 for (i = 0; i < h->ndevices; i++) { 886 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 887 __set_bit(h->dev[i]->target, lun_taken); 888 } 889 890 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); 891 if (i < HPSA_MAX_DEVICES) { 892 /* *bus = 1; */ 893 *target = i; 894 *lun = 0; 895 found = 1; 896 } 897 return !found; 898 } 899 900 /* Add an entry into h->dev[] array. */ 901 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 902 struct hpsa_scsi_dev_t *device, 903 struct hpsa_scsi_dev_t *added[], int *nadded) 904 { 905 /* assumes h->devlock is held */ 906 int n = h->ndevices; 907 int i; 908 unsigned char addr1[8], addr2[8]; 909 struct hpsa_scsi_dev_t *sd; 910 911 if (n >= HPSA_MAX_DEVICES) { 912 dev_err(&h->pdev->dev, "too many devices, some will be " 913 "inaccessible.\n"); 914 return -1; 915 } 916 917 /* physical devices do not have lun or target assigned until now. */ 918 if (device->lun != -1) 919 /* Logical device, lun is already assigned. */ 920 goto lun_assigned; 921 922 /* If this device a non-zero lun of a multi-lun device 923 * byte 4 of the 8-byte LUN addr will contain the logical 924 * unit no, zero otherwise. 925 */ 926 if (device->scsi3addr[4] == 0) { 927 /* This is not a non-zero lun of a multi-lun device */ 928 if (hpsa_find_target_lun(h, device->scsi3addr, 929 device->bus, &device->target, &device->lun) != 0) 930 return -1; 931 goto lun_assigned; 932 } 933 934 /* This is a non-zero lun of a multi-lun device. 935 * Search through our list and find the device which 936 * has the same 8 byte LUN address, excepting byte 4. 937 * Assign the same bus and target for this new LUN. 938 * Use the logical unit number from the firmware. 939 */ 940 memcpy(addr1, device->scsi3addr, 8); 941 addr1[4] = 0; 942 for (i = 0; i < n; i++) { 943 sd = h->dev[i]; 944 memcpy(addr2, sd->scsi3addr, 8); 945 addr2[4] = 0; 946 /* differ only in byte 4? */ 947 if (memcmp(addr1, addr2, 8) == 0) { 948 device->bus = sd->bus; 949 device->target = sd->target; 950 device->lun = device->scsi3addr[4]; 951 break; 952 } 953 } 954 if (device->lun == -1) { 955 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 956 " suspect firmware bug or unsupported hardware " 957 "configuration.\n"); 958 return -1; 959 } 960 961 lun_assigned: 962 963 h->dev[n] = device; 964 h->ndevices++; 965 added[*nadded] = device; 966 (*nadded)++; 967 968 /* initially, (before registering with scsi layer) we don't 969 * know our hostno and we don't want to print anything first 970 * time anyway (the scsi layer's inquiries will show that info) 971 */ 972 /* if (hostno != -1) */ 973 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 974 scsi_device_type(device->devtype), hostno, 975 device->bus, device->target, device->lun); 976 return 0; 977 } 978 979 /* Update an entry in h->dev[] array. */ 980 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, 981 int entry, struct hpsa_scsi_dev_t *new_entry) 982 { 983 /* assumes h->devlock is held */ 984 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 985 986 /* Raid level changed. */ 987 h->dev[entry]->raid_level = new_entry->raid_level; 988 989 /* Raid offload parameters changed. */ 990 h->dev[entry]->offload_config = new_entry->offload_config; 991 h->dev[entry]->offload_enabled = new_entry->offload_enabled; 992 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 993 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; 994 h->dev[entry]->raid_map = new_entry->raid_map; 995 996 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", 997 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 998 new_entry->target, new_entry->lun); 999 } 1000 1001 /* Replace an entry from h->dev[] array. */ 1002 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 1003 int entry, struct hpsa_scsi_dev_t *new_entry, 1004 struct hpsa_scsi_dev_t *added[], int *nadded, 1005 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1006 { 1007 /* assumes h->devlock is held */ 1008 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1009 removed[*nremoved] = h->dev[entry]; 1010 (*nremoved)++; 1011 1012 /* 1013 * New physical devices won't have target/lun assigned yet 1014 * so we need to preserve the values in the slot we are replacing. 1015 */ 1016 if (new_entry->target == -1) { 1017 new_entry->target = h->dev[entry]->target; 1018 new_entry->lun = h->dev[entry]->lun; 1019 } 1020 1021 h->dev[entry] = new_entry; 1022 added[*nadded] = new_entry; 1023 (*nadded)++; 1024 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 1025 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 1026 new_entry->target, new_entry->lun); 1027 } 1028 1029 /* Remove an entry from h->dev[] array. */ 1030 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 1031 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1032 { 1033 /* assumes h->devlock is held */ 1034 int i; 1035 struct hpsa_scsi_dev_t *sd; 1036 1037 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1038 1039 sd = h->dev[entry]; 1040 removed[*nremoved] = h->dev[entry]; 1041 (*nremoved)++; 1042 1043 for (i = entry; i < h->ndevices-1; i++) 1044 h->dev[i] = h->dev[i+1]; 1045 h->ndevices--; 1046 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 1047 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 1048 sd->lun); 1049 } 1050 1051 #define SCSI3ADDR_EQ(a, b) ( \ 1052 (a)[7] == (b)[7] && \ 1053 (a)[6] == (b)[6] && \ 1054 (a)[5] == (b)[5] && \ 1055 (a)[4] == (b)[4] && \ 1056 (a)[3] == (b)[3] && \ 1057 (a)[2] == (b)[2] && \ 1058 (a)[1] == (b)[1] && \ 1059 (a)[0] == (b)[0]) 1060 1061 static void fixup_botched_add(struct ctlr_info *h, 1062 struct hpsa_scsi_dev_t *added) 1063 { 1064 /* called when scsi_add_device fails in order to re-adjust 1065 * h->dev[] to match the mid layer's view. 1066 */ 1067 unsigned long flags; 1068 int i, j; 1069 1070 spin_lock_irqsave(&h->lock, flags); 1071 for (i = 0; i < h->ndevices; i++) { 1072 if (h->dev[i] == added) { 1073 for (j = i; j < h->ndevices-1; j++) 1074 h->dev[j] = h->dev[j+1]; 1075 h->ndevices--; 1076 break; 1077 } 1078 } 1079 spin_unlock_irqrestore(&h->lock, flags); 1080 kfree(added); 1081 } 1082 1083 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 1084 struct hpsa_scsi_dev_t *dev2) 1085 { 1086 /* we compare everything except lun and target as these 1087 * are not yet assigned. Compare parts likely 1088 * to differ first 1089 */ 1090 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 1091 sizeof(dev1->scsi3addr)) != 0) 1092 return 0; 1093 if (memcmp(dev1->device_id, dev2->device_id, 1094 sizeof(dev1->device_id)) != 0) 1095 return 0; 1096 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 1097 return 0; 1098 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 1099 return 0; 1100 if (dev1->devtype != dev2->devtype) 1101 return 0; 1102 if (dev1->bus != dev2->bus) 1103 return 0; 1104 return 1; 1105 } 1106 1107 static inline int device_updated(struct hpsa_scsi_dev_t *dev1, 1108 struct hpsa_scsi_dev_t *dev2) 1109 { 1110 /* Device attributes that can change, but don't mean 1111 * that the device is a different device, nor that the OS 1112 * needs to be told anything about the change. 1113 */ 1114 if (dev1->raid_level != dev2->raid_level) 1115 return 1; 1116 if (dev1->offload_config != dev2->offload_config) 1117 return 1; 1118 if (dev1->offload_enabled != dev2->offload_enabled) 1119 return 1; 1120 return 0; 1121 } 1122 1123 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 1124 * and return needle location in *index. If scsi3addr matches, but not 1125 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 1126 * location in *index. 1127 * In the case of a minor device attribute change, such as RAID level, just 1128 * return DEVICE_UPDATED, along with the updated device's location in index. 1129 * If needle not found, return DEVICE_NOT_FOUND. 1130 */ 1131 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 1132 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 1133 int *index) 1134 { 1135 int i; 1136 #define DEVICE_NOT_FOUND 0 1137 #define DEVICE_CHANGED 1 1138 #define DEVICE_SAME 2 1139 #define DEVICE_UPDATED 3 1140 for (i = 0; i < haystack_size; i++) { 1141 if (haystack[i] == NULL) /* previously removed. */ 1142 continue; 1143 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 1144 *index = i; 1145 if (device_is_the_same(needle, haystack[i])) { 1146 if (device_updated(needle, haystack[i])) 1147 return DEVICE_UPDATED; 1148 return DEVICE_SAME; 1149 } else { 1150 /* Keep offline devices offline */ 1151 if (needle->volume_offline) 1152 return DEVICE_NOT_FOUND; 1153 return DEVICE_CHANGED; 1154 } 1155 } 1156 } 1157 *index = -1; 1158 return DEVICE_NOT_FOUND; 1159 } 1160 1161 static void hpsa_monitor_offline_device(struct ctlr_info *h, 1162 unsigned char scsi3addr[]) 1163 { 1164 struct offline_device_entry *device; 1165 unsigned long flags; 1166 1167 /* Check to see if device is already on the list */ 1168 spin_lock_irqsave(&h->offline_device_lock, flags); 1169 list_for_each_entry(device, &h->offline_device_list, offline_list) { 1170 if (memcmp(device->scsi3addr, scsi3addr, 1171 sizeof(device->scsi3addr)) == 0) { 1172 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1173 return; 1174 } 1175 } 1176 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1177 1178 /* Device is not on the list, add it. */ 1179 device = kmalloc(sizeof(*device), GFP_KERNEL); 1180 if (!device) { 1181 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); 1182 return; 1183 } 1184 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1185 spin_lock_irqsave(&h->offline_device_lock, flags); 1186 list_add_tail(&device->offline_list, &h->offline_device_list); 1187 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1188 } 1189 1190 /* Print a message explaining various offline volume states */ 1191 static void hpsa_show_volume_status(struct ctlr_info *h, 1192 struct hpsa_scsi_dev_t *sd) 1193 { 1194 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) 1195 dev_info(&h->pdev->dev, 1196 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", 1197 h->scsi_host->host_no, 1198 sd->bus, sd->target, sd->lun); 1199 switch (sd->volume_offline) { 1200 case HPSA_LV_OK: 1201 break; 1202 case HPSA_LV_UNDERGOING_ERASE: 1203 dev_info(&h->pdev->dev, 1204 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", 1205 h->scsi_host->host_no, 1206 sd->bus, sd->target, sd->lun); 1207 break; 1208 case HPSA_LV_UNDERGOING_RPI: 1209 dev_info(&h->pdev->dev, 1210 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n", 1211 h->scsi_host->host_no, 1212 sd->bus, sd->target, sd->lun); 1213 break; 1214 case HPSA_LV_PENDING_RPI: 1215 dev_info(&h->pdev->dev, 1216 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", 1217 h->scsi_host->host_no, 1218 sd->bus, sd->target, sd->lun); 1219 break; 1220 case HPSA_LV_ENCRYPTED_NO_KEY: 1221 dev_info(&h->pdev->dev, 1222 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", 1223 h->scsi_host->host_no, 1224 sd->bus, sd->target, sd->lun); 1225 break; 1226 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 1227 dev_info(&h->pdev->dev, 1228 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", 1229 h->scsi_host->host_no, 1230 sd->bus, sd->target, sd->lun); 1231 break; 1232 case HPSA_LV_UNDERGOING_ENCRYPTION: 1233 dev_info(&h->pdev->dev, 1234 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", 1235 h->scsi_host->host_no, 1236 sd->bus, sd->target, sd->lun); 1237 break; 1238 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 1239 dev_info(&h->pdev->dev, 1240 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", 1241 h->scsi_host->host_no, 1242 sd->bus, sd->target, sd->lun); 1243 break; 1244 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1245 dev_info(&h->pdev->dev, 1246 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", 1247 h->scsi_host->host_no, 1248 sd->bus, sd->target, sd->lun); 1249 break; 1250 case HPSA_LV_PENDING_ENCRYPTION: 1251 dev_info(&h->pdev->dev, 1252 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", 1253 h->scsi_host->host_no, 1254 sd->bus, sd->target, sd->lun); 1255 break; 1256 case HPSA_LV_PENDING_ENCRYPTION_REKEYING: 1257 dev_info(&h->pdev->dev, 1258 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", 1259 h->scsi_host->host_no, 1260 sd->bus, sd->target, sd->lun); 1261 break; 1262 } 1263 } 1264 1265 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 1266 struct hpsa_scsi_dev_t *sd[], int nsds) 1267 { 1268 /* sd contains scsi3 addresses and devtypes, and inquiry 1269 * data. This function takes what's in sd to be the current 1270 * reality and updates h->dev[] to reflect that reality. 1271 */ 1272 int i, entry, device_change, changes = 0; 1273 struct hpsa_scsi_dev_t *csd; 1274 unsigned long flags; 1275 struct hpsa_scsi_dev_t **added, **removed; 1276 int nadded, nremoved; 1277 struct Scsi_Host *sh = NULL; 1278 1279 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 1280 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 1281 1282 if (!added || !removed) { 1283 dev_warn(&h->pdev->dev, "out of memory in " 1284 "adjust_hpsa_scsi_table\n"); 1285 goto free_and_out; 1286 } 1287 1288 spin_lock_irqsave(&h->devlock, flags); 1289 1290 /* find any devices in h->dev[] that are not in 1291 * sd[] and remove them from h->dev[], and for any 1292 * devices which have changed, remove the old device 1293 * info and add the new device info. 1294 * If minor device attributes change, just update 1295 * the existing device structure. 1296 */ 1297 i = 0; 1298 nremoved = 0; 1299 nadded = 0; 1300 while (i < h->ndevices) { 1301 csd = h->dev[i]; 1302 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 1303 if (device_change == DEVICE_NOT_FOUND) { 1304 changes++; 1305 hpsa_scsi_remove_entry(h, hostno, i, 1306 removed, &nremoved); 1307 continue; /* remove ^^^, hence i not incremented */ 1308 } else if (device_change == DEVICE_CHANGED) { 1309 changes++; 1310 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 1311 added, &nadded, removed, &nremoved); 1312 /* Set it to NULL to prevent it from being freed 1313 * at the bottom of hpsa_update_scsi_devices() 1314 */ 1315 sd[entry] = NULL; 1316 } else if (device_change == DEVICE_UPDATED) { 1317 hpsa_scsi_update_entry(h, hostno, i, sd[entry]); 1318 } 1319 i++; 1320 } 1321 1322 /* Now, make sure every device listed in sd[] is also 1323 * listed in h->dev[], adding them if they aren't found 1324 */ 1325 1326 for (i = 0; i < nsds; i++) { 1327 if (!sd[i]) /* if already added above. */ 1328 continue; 1329 1330 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS 1331 * as the SCSI mid-layer does not handle such devices well. 1332 * It relentlessly loops sending TUR at 3Hz, then READ(10) 1333 * at 160Hz, and prevents the system from coming up. 1334 */ 1335 if (sd[i]->volume_offline) { 1336 hpsa_show_volume_status(h, sd[i]); 1337 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", 1338 h->scsi_host->host_no, 1339 sd[i]->bus, sd[i]->target, sd[i]->lun); 1340 continue; 1341 } 1342 1343 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 1344 h->ndevices, &entry); 1345 if (device_change == DEVICE_NOT_FOUND) { 1346 changes++; 1347 if (hpsa_scsi_add_entry(h, hostno, sd[i], 1348 added, &nadded) != 0) 1349 break; 1350 sd[i] = NULL; /* prevent from being freed later. */ 1351 } else if (device_change == DEVICE_CHANGED) { 1352 /* should never happen... */ 1353 changes++; 1354 dev_warn(&h->pdev->dev, 1355 "device unexpectedly changed.\n"); 1356 /* but if it does happen, we just ignore that device */ 1357 } 1358 } 1359 spin_unlock_irqrestore(&h->devlock, flags); 1360 1361 /* Monitor devices which are in one of several NOT READY states to be 1362 * brought online later. This must be done without holding h->devlock, 1363 * so don't touch h->dev[] 1364 */ 1365 for (i = 0; i < nsds; i++) { 1366 if (!sd[i]) /* if already added above. */ 1367 continue; 1368 if (sd[i]->volume_offline) 1369 hpsa_monitor_offline_device(h, sd[i]->scsi3addr); 1370 } 1371 1372 /* Don't notify scsi mid layer of any changes the first time through 1373 * (or if there are no changes) scsi_scan_host will do it later the 1374 * first time through. 1375 */ 1376 if (hostno == -1 || !changes) 1377 goto free_and_out; 1378 1379 sh = h->scsi_host; 1380 /* Notify scsi mid layer of any removed devices */ 1381 for (i = 0; i < nremoved; i++) { 1382 struct scsi_device *sdev = 1383 scsi_device_lookup(sh, removed[i]->bus, 1384 removed[i]->target, removed[i]->lun); 1385 if (sdev != NULL) { 1386 scsi_remove_device(sdev); 1387 scsi_device_put(sdev); 1388 } else { 1389 /* We don't expect to get here. 1390 * future cmds to this device will get selection 1391 * timeout as if the device was gone. 1392 */ 1393 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 1394 " for removal.", hostno, removed[i]->bus, 1395 removed[i]->target, removed[i]->lun); 1396 } 1397 kfree(removed[i]); 1398 removed[i] = NULL; 1399 } 1400 1401 /* Notify scsi mid layer of any added devices */ 1402 for (i = 0; i < nadded; i++) { 1403 if (scsi_add_device(sh, added[i]->bus, 1404 added[i]->target, added[i]->lun) == 0) 1405 continue; 1406 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 1407 "device not added.\n", hostno, added[i]->bus, 1408 added[i]->target, added[i]->lun); 1409 /* now we have to remove it from h->dev, 1410 * since it didn't get added to scsi mid layer 1411 */ 1412 fixup_botched_add(h, added[i]); 1413 } 1414 1415 free_and_out: 1416 kfree(added); 1417 kfree(removed); 1418 } 1419 1420 /* 1421 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * 1422 * Assume's h->devlock is held. 1423 */ 1424 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 1425 int bus, int target, int lun) 1426 { 1427 int i; 1428 struct hpsa_scsi_dev_t *sd; 1429 1430 for (i = 0; i < h->ndevices; i++) { 1431 sd = h->dev[i]; 1432 if (sd->bus == bus && sd->target == target && sd->lun == lun) 1433 return sd; 1434 } 1435 return NULL; 1436 } 1437 1438 /* link sdev->hostdata to our per-device structure. */ 1439 static int hpsa_slave_alloc(struct scsi_device *sdev) 1440 { 1441 struct hpsa_scsi_dev_t *sd; 1442 unsigned long flags; 1443 struct ctlr_info *h; 1444 1445 h = sdev_to_hba(sdev); 1446 spin_lock_irqsave(&h->devlock, flags); 1447 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 1448 sdev_id(sdev), sdev->lun); 1449 if (sd != NULL) 1450 sdev->hostdata = sd; 1451 spin_unlock_irqrestore(&h->devlock, flags); 1452 return 0; 1453 } 1454 1455 static void hpsa_slave_destroy(struct scsi_device *sdev) 1456 { 1457 /* nothing to do. */ 1458 } 1459 1460 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 1461 { 1462 int i; 1463 1464 if (!h->cmd_sg_list) 1465 return; 1466 for (i = 0; i < h->nr_cmds; i++) { 1467 kfree(h->cmd_sg_list[i]); 1468 h->cmd_sg_list[i] = NULL; 1469 } 1470 kfree(h->cmd_sg_list); 1471 h->cmd_sg_list = NULL; 1472 } 1473 1474 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 1475 { 1476 int i; 1477 1478 if (h->chainsize <= 0) 1479 return 0; 1480 1481 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 1482 GFP_KERNEL); 1483 if (!h->cmd_sg_list) 1484 return -ENOMEM; 1485 for (i = 0; i < h->nr_cmds; i++) { 1486 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 1487 h->chainsize, GFP_KERNEL); 1488 if (!h->cmd_sg_list[i]) 1489 goto clean; 1490 } 1491 return 0; 1492 1493 clean: 1494 hpsa_free_sg_chain_blocks(h); 1495 return -ENOMEM; 1496 } 1497 1498 static int hpsa_map_sg_chain_block(struct ctlr_info *h, 1499 struct CommandList *c) 1500 { 1501 struct SGDescriptor *chain_sg, *chain_block; 1502 u64 temp64; 1503 u32 chain_len; 1504 1505 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1506 chain_block = h->cmd_sg_list[c->cmdindex]; 1507 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); 1508 chain_len = sizeof(*chain_sg) * 1509 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); 1510 chain_sg->Len = cpu_to_le32(chain_len); 1511 temp64 = pci_map_single(h->pdev, chain_block, chain_len, 1512 PCI_DMA_TODEVICE); 1513 if (dma_mapping_error(&h->pdev->dev, temp64)) { 1514 /* prevent subsequent unmapping */ 1515 chain_sg->Addr = cpu_to_le64(0); 1516 return -1; 1517 } 1518 chain_sg->Addr = cpu_to_le64(temp64); 1519 return 0; 1520 } 1521 1522 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 1523 struct CommandList *c) 1524 { 1525 struct SGDescriptor *chain_sg; 1526 1527 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) 1528 return; 1529 1530 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1531 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), 1532 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); 1533 } 1534 1535 1536 /* Decode the various types of errors on ioaccel2 path. 1537 * Return 1 for any error that should generate a RAID path retry. 1538 * Return 0 for errors that don't require a RAID path retry. 1539 */ 1540 static int handle_ioaccel_mode2_error(struct ctlr_info *h, 1541 struct CommandList *c, 1542 struct scsi_cmnd *cmd, 1543 struct io_accel2_cmd *c2) 1544 { 1545 int data_len; 1546 int retry = 0; 1547 1548 switch (c2->error_data.serv_response) { 1549 case IOACCEL2_SERV_RESPONSE_COMPLETE: 1550 switch (c2->error_data.status) { 1551 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: 1552 break; 1553 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: 1554 dev_warn(&h->pdev->dev, 1555 "%s: task complete with check condition.\n", 1556 "HP SSD Smart Path"); 1557 cmd->result |= SAM_STAT_CHECK_CONDITION; 1558 if (c2->error_data.data_present != 1559 IOACCEL2_SENSE_DATA_PRESENT) { 1560 memset(cmd->sense_buffer, 0, 1561 SCSI_SENSE_BUFFERSIZE); 1562 break; 1563 } 1564 /* copy the sense data */ 1565 data_len = c2->error_data.sense_data_len; 1566 if (data_len > SCSI_SENSE_BUFFERSIZE) 1567 data_len = SCSI_SENSE_BUFFERSIZE; 1568 if (data_len > sizeof(c2->error_data.sense_data_buff)) 1569 data_len = 1570 sizeof(c2->error_data.sense_data_buff); 1571 memcpy(cmd->sense_buffer, 1572 c2->error_data.sense_data_buff, data_len); 1573 retry = 1; 1574 break; 1575 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 1576 dev_warn(&h->pdev->dev, 1577 "%s: task complete with BUSY status.\n", 1578 "HP SSD Smart Path"); 1579 retry = 1; 1580 break; 1581 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: 1582 dev_warn(&h->pdev->dev, 1583 "%s: task complete with reservation conflict.\n", 1584 "HP SSD Smart Path"); 1585 retry = 1; 1586 break; 1587 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: 1588 /* Make scsi midlayer do unlimited retries */ 1589 cmd->result = DID_IMM_RETRY << 16; 1590 break; 1591 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: 1592 dev_warn(&h->pdev->dev, 1593 "%s: task complete with aborted status.\n", 1594 "HP SSD Smart Path"); 1595 retry = 1; 1596 break; 1597 default: 1598 dev_warn(&h->pdev->dev, 1599 "%s: task complete with unrecognized status: 0x%02x\n", 1600 "HP SSD Smart Path", c2->error_data.status); 1601 retry = 1; 1602 break; 1603 } 1604 break; 1605 case IOACCEL2_SERV_RESPONSE_FAILURE: 1606 /* don't expect to get here. */ 1607 dev_warn(&h->pdev->dev, 1608 "unexpected delivery or target failure, status = 0x%02x\n", 1609 c2->error_data.status); 1610 retry = 1; 1611 break; 1612 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 1613 break; 1614 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 1615 break; 1616 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 1617 dev_warn(&h->pdev->dev, "task management function rejected.\n"); 1618 retry = 1; 1619 break; 1620 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 1621 dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); 1622 break; 1623 default: 1624 dev_warn(&h->pdev->dev, 1625 "%s: Unrecognized server response: 0x%02x\n", 1626 "HP SSD Smart Path", 1627 c2->error_data.serv_response); 1628 retry = 1; 1629 break; 1630 } 1631 1632 return retry; /* retry on raid path? */ 1633 } 1634 1635 static void process_ioaccel2_completion(struct ctlr_info *h, 1636 struct CommandList *c, struct scsi_cmnd *cmd, 1637 struct hpsa_scsi_dev_t *dev) 1638 { 1639 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 1640 int raid_retry = 0; 1641 1642 /* check for good status */ 1643 if (likely(c2->error_data.serv_response == 0 && 1644 c2->error_data.status == 0)) { 1645 cmd_free(h, c); 1646 cmd->scsi_done(cmd); 1647 return; 1648 } 1649 1650 /* Any RAID offload error results in retry which will use 1651 * the normal I/O path so the controller can handle whatever's 1652 * wrong. 1653 */ 1654 if (is_logical_dev_addr_mode(dev->scsi3addr) && 1655 c2->error_data.serv_response == 1656 IOACCEL2_SERV_RESPONSE_FAILURE) { 1657 dev->offload_enabled = 0; 1658 h->drv_req_rescan = 1; /* schedule controller for a rescan */ 1659 cmd->result = DID_SOFT_ERROR << 16; 1660 cmd_free(h, c); 1661 cmd->scsi_done(cmd); 1662 return; 1663 } 1664 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2); 1665 /* If error found, disable Smart Path, schedule a rescan, 1666 * and force a retry on the standard path. 1667 */ 1668 if (raid_retry) { 1669 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n", 1670 "HP SSD Smart Path"); 1671 dev->offload_enabled = 0; /* Disable Smart Path */ 1672 h->drv_req_rescan = 1; /* schedule controller rescan */ 1673 cmd->result = DID_SOFT_ERROR << 16; 1674 } 1675 cmd_free(h, c); 1676 cmd->scsi_done(cmd); 1677 } 1678 1679 static void complete_scsi_command(struct CommandList *cp) 1680 { 1681 struct scsi_cmnd *cmd; 1682 struct ctlr_info *h; 1683 struct ErrorInfo *ei; 1684 struct hpsa_scsi_dev_t *dev; 1685 1686 unsigned char sense_key; 1687 unsigned char asc; /* additional sense code */ 1688 unsigned char ascq; /* additional sense code qualifier */ 1689 unsigned long sense_data_size; 1690 1691 ei = cp->err_info; 1692 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1693 h = cp->h; 1694 dev = cmd->device->hostdata; 1695 1696 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1697 if ((cp->cmd_type == CMD_SCSI) && 1698 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) 1699 hpsa_unmap_sg_chain_block(h, cp); 1700 1701 cmd->result = (DID_OK << 16); /* host byte */ 1702 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1703 1704 if (cp->cmd_type == CMD_IOACCEL2) 1705 return process_ioaccel2_completion(h, cp, cmd, dev); 1706 1707 cmd->result |= ei->ScsiStatus; 1708 1709 scsi_set_resid(cmd, ei->ResidualCnt); 1710 if (ei->CommandStatus == 0) { 1711 cmd_free(h, cp); 1712 cmd->scsi_done(cmd); 1713 return; 1714 } 1715 1716 /* copy the sense data */ 1717 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 1718 sense_data_size = SCSI_SENSE_BUFFERSIZE; 1719 else 1720 sense_data_size = sizeof(ei->SenseInfo); 1721 if (ei->SenseLen < sense_data_size) 1722 sense_data_size = ei->SenseLen; 1723 1724 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 1725 1726 /* For I/O accelerator commands, copy over some fields to the normal 1727 * CISS header used below for error handling. 1728 */ 1729 if (cp->cmd_type == CMD_IOACCEL1) { 1730 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 1731 cp->Header.SGList = scsi_sg_count(cmd); 1732 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); 1733 cp->Request.CDBLen = le16_to_cpu(c->io_flags) & 1734 IOACCEL1_IOFLAGS_CDBLEN_MASK; 1735 cp->Header.tag = c->tag; 1736 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 1737 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 1738 1739 /* Any RAID offload error results in retry which will use 1740 * the normal I/O path so the controller can handle whatever's 1741 * wrong. 1742 */ 1743 if (is_logical_dev_addr_mode(dev->scsi3addr)) { 1744 if (ei->CommandStatus == CMD_IOACCEL_DISABLED) 1745 dev->offload_enabled = 0; 1746 cmd->result = DID_SOFT_ERROR << 16; 1747 cmd_free(h, cp); 1748 cmd->scsi_done(cmd); 1749 return; 1750 } 1751 } 1752 1753 /* an error has occurred */ 1754 switch (ei->CommandStatus) { 1755 1756 case CMD_TARGET_STATUS: 1757 if (ei->ScsiStatus) { 1758 /* Get sense key */ 1759 sense_key = 0xf & ei->SenseInfo[2]; 1760 /* Get additional sense code */ 1761 asc = ei->SenseInfo[12]; 1762 /* Get addition sense code qualifier */ 1763 ascq = ei->SenseInfo[13]; 1764 } 1765 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1766 if (sense_key == ABORTED_COMMAND) { 1767 cmd->result |= DID_SOFT_ERROR << 16; 1768 break; 1769 } 1770 break; 1771 } 1772 /* Problem was not a check condition 1773 * Pass it up to the upper layers... 1774 */ 1775 if (ei->ScsiStatus) { 1776 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1777 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1778 "Returning result: 0x%x\n", 1779 cp, ei->ScsiStatus, 1780 sense_key, asc, ascq, 1781 cmd->result); 1782 } else { /* scsi status is zero??? How??? */ 1783 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1784 "Returning no connection.\n", cp), 1785 1786 /* Ordinarily, this case should never happen, 1787 * but there is a bug in some released firmware 1788 * revisions that allows it to happen if, for 1789 * example, a 4100 backplane loses power and 1790 * the tape drive is in it. We assume that 1791 * it's a fatal error of some kind because we 1792 * can't show that it wasn't. We will make it 1793 * look like selection timeout since that is 1794 * the most common reason for this to occur, 1795 * and it's severe enough. 1796 */ 1797 1798 cmd->result = DID_NO_CONNECT << 16; 1799 } 1800 break; 1801 1802 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1803 break; 1804 case CMD_DATA_OVERRUN: 1805 dev_warn(&h->pdev->dev, "cp %p has" 1806 " completed with data overrun " 1807 "reported\n", cp); 1808 break; 1809 case CMD_INVALID: { 1810 /* print_bytes(cp, sizeof(*cp), 1, 0); 1811 print_cmd(cp); */ 1812 /* We get CMD_INVALID if you address a non-existent device 1813 * instead of a selection timeout (no response). You will 1814 * see this if you yank out a drive, then try to access it. 1815 * This is kind of a shame because it means that any other 1816 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1817 * missing target. */ 1818 cmd->result = DID_NO_CONNECT << 16; 1819 } 1820 break; 1821 case CMD_PROTOCOL_ERR: 1822 cmd->result = DID_ERROR << 16; 1823 dev_warn(&h->pdev->dev, "cp %p has " 1824 "protocol error\n", cp); 1825 break; 1826 case CMD_HARDWARE_ERR: 1827 cmd->result = DID_ERROR << 16; 1828 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1829 break; 1830 case CMD_CONNECTION_LOST: 1831 cmd->result = DID_ERROR << 16; 1832 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1833 break; 1834 case CMD_ABORTED: 1835 cmd->result = DID_ABORT << 16; 1836 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1837 cp, ei->ScsiStatus); 1838 break; 1839 case CMD_ABORT_FAILED: 1840 cmd->result = DID_ERROR << 16; 1841 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1842 break; 1843 case CMD_UNSOLICITED_ABORT: 1844 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 1845 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " 1846 "abort\n", cp); 1847 break; 1848 case CMD_TIMEOUT: 1849 cmd->result = DID_TIME_OUT << 16; 1850 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1851 break; 1852 case CMD_UNABORTABLE: 1853 cmd->result = DID_ERROR << 16; 1854 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1855 break; 1856 case CMD_IOACCEL_DISABLED: 1857 /* This only handles the direct pass-through case since RAID 1858 * offload is handled above. Just attempt a retry. 1859 */ 1860 cmd->result = DID_SOFT_ERROR << 16; 1861 dev_warn(&h->pdev->dev, 1862 "cp %p had HP SSD Smart Path error\n", cp); 1863 break; 1864 default: 1865 cmd->result = DID_ERROR << 16; 1866 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1867 cp, ei->CommandStatus); 1868 } 1869 cmd_free(h, cp); 1870 cmd->scsi_done(cmd); 1871 } 1872 1873 static void hpsa_pci_unmap(struct pci_dev *pdev, 1874 struct CommandList *c, int sg_used, int data_direction) 1875 { 1876 int i; 1877 1878 for (i = 0; i < sg_used; i++) 1879 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), 1880 le32_to_cpu(c->SG[i].Len), 1881 data_direction); 1882 } 1883 1884 static int hpsa_map_one(struct pci_dev *pdev, 1885 struct CommandList *cp, 1886 unsigned char *buf, 1887 size_t buflen, 1888 int data_direction) 1889 { 1890 u64 addr64; 1891 1892 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1893 cp->Header.SGList = 0; 1894 cp->Header.SGTotal = cpu_to_le16(0); 1895 return 0; 1896 } 1897 1898 addr64 = pci_map_single(pdev, buf, buflen, data_direction); 1899 if (dma_mapping_error(&pdev->dev, addr64)) { 1900 /* Prevent subsequent unmap of something never mapped */ 1901 cp->Header.SGList = 0; 1902 cp->Header.SGTotal = cpu_to_le16(0); 1903 return -1; 1904 } 1905 cp->SG[0].Addr = cpu_to_le64(addr64); 1906 cp->SG[0].Len = cpu_to_le32(buflen); 1907 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ 1908 cp->Header.SGList = 1; /* no. SGs contig in this cmd */ 1909 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ 1910 return 0; 1911 } 1912 1913 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1914 struct CommandList *c) 1915 { 1916 DECLARE_COMPLETION_ONSTACK(wait); 1917 1918 c->waiting = &wait; 1919 enqueue_cmd_and_start_io(h, c); 1920 wait_for_completion(&wait); 1921 } 1922 1923 static u32 lockup_detected(struct ctlr_info *h) 1924 { 1925 int cpu; 1926 u32 rc, *lockup_detected; 1927 1928 cpu = get_cpu(); 1929 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 1930 rc = *lockup_detected; 1931 put_cpu(); 1932 return rc; 1933 } 1934 1935 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 1936 struct CommandList *c) 1937 { 1938 /* If controller lockup detected, fake a hardware error. */ 1939 if (unlikely(lockup_detected(h))) 1940 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 1941 else 1942 hpsa_scsi_do_simple_cmd_core(h, c); 1943 } 1944 1945 #define MAX_DRIVER_CMD_RETRIES 25 1946 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 1947 struct CommandList *c, int data_direction) 1948 { 1949 int backoff_time = 10, retry_count = 0; 1950 1951 do { 1952 memset(c->err_info, 0, sizeof(*c->err_info)); 1953 hpsa_scsi_do_simple_cmd_core(h, c); 1954 retry_count++; 1955 if (retry_count > 3) { 1956 msleep(backoff_time); 1957 if (backoff_time < 1000) 1958 backoff_time *= 2; 1959 } 1960 } while ((check_for_unit_attention(h, c) || 1961 check_for_busy(h, c)) && 1962 retry_count <= MAX_DRIVER_CMD_RETRIES); 1963 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 1964 } 1965 1966 static void hpsa_print_cmd(struct ctlr_info *h, char *txt, 1967 struct CommandList *c) 1968 { 1969 const u8 *cdb = c->Request.CDB; 1970 const u8 *lun = c->Header.LUN.LunAddrBytes; 1971 1972 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" 1973 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", 1974 txt, lun[0], lun[1], lun[2], lun[3], 1975 lun[4], lun[5], lun[6], lun[7], 1976 cdb[0], cdb[1], cdb[2], cdb[3], 1977 cdb[4], cdb[5], cdb[6], cdb[7], 1978 cdb[8], cdb[9], cdb[10], cdb[11], 1979 cdb[12], cdb[13], cdb[14], cdb[15]); 1980 } 1981 1982 static void hpsa_scsi_interpret_error(struct ctlr_info *h, 1983 struct CommandList *cp) 1984 { 1985 const struct ErrorInfo *ei = cp->err_info; 1986 struct device *d = &cp->h->pdev->dev; 1987 const u8 *sd = ei->SenseInfo; 1988 1989 switch (ei->CommandStatus) { 1990 case CMD_TARGET_STATUS: 1991 hpsa_print_cmd(h, "SCSI status", cp); 1992 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) 1993 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", 1994 sd[2] & 0x0f, sd[12], sd[13]); 1995 else 1996 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus); 1997 if (ei->ScsiStatus == 0) 1998 dev_warn(d, "SCSI status is abnormally zero. " 1999 "(probably indicates selection timeout " 2000 "reported incorrectly due to a known " 2001 "firmware bug, circa July, 2001.)\n"); 2002 break; 2003 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 2004 break; 2005 case CMD_DATA_OVERRUN: 2006 hpsa_print_cmd(h, "overrun condition", cp); 2007 break; 2008 case CMD_INVALID: { 2009 /* controller unfortunately reports SCSI passthru's 2010 * to non-existent targets as invalid commands. 2011 */ 2012 hpsa_print_cmd(h, "invalid command", cp); 2013 dev_warn(d, "probably means device no longer present\n"); 2014 } 2015 break; 2016 case CMD_PROTOCOL_ERR: 2017 hpsa_print_cmd(h, "protocol error", cp); 2018 break; 2019 case CMD_HARDWARE_ERR: 2020 hpsa_print_cmd(h, "hardware error", cp); 2021 break; 2022 case CMD_CONNECTION_LOST: 2023 hpsa_print_cmd(h, "connection lost", cp); 2024 break; 2025 case CMD_ABORTED: 2026 hpsa_print_cmd(h, "aborted", cp); 2027 break; 2028 case CMD_ABORT_FAILED: 2029 hpsa_print_cmd(h, "abort failed", cp); 2030 break; 2031 case CMD_UNSOLICITED_ABORT: 2032 hpsa_print_cmd(h, "unsolicited abort", cp); 2033 break; 2034 case CMD_TIMEOUT: 2035 hpsa_print_cmd(h, "timed out", cp); 2036 break; 2037 case CMD_UNABORTABLE: 2038 hpsa_print_cmd(h, "unabortable", cp); 2039 break; 2040 default: 2041 hpsa_print_cmd(h, "unknown status", cp); 2042 dev_warn(d, "Unknown command status %x\n", 2043 ei->CommandStatus); 2044 } 2045 } 2046 2047 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 2048 u16 page, unsigned char *buf, 2049 unsigned char bufsize) 2050 { 2051 int rc = IO_OK; 2052 struct CommandList *c; 2053 struct ErrorInfo *ei; 2054 2055 c = cmd_special_alloc(h); 2056 2057 if (c == NULL) { /* trouble... */ 2058 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2059 return -ENOMEM; 2060 } 2061 2062 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, 2063 page, scsi3addr, TYPE_CMD)) { 2064 rc = -1; 2065 goto out; 2066 } 2067 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2068 ei = c->err_info; 2069 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2070 hpsa_scsi_interpret_error(h, c); 2071 rc = -1; 2072 } 2073 out: 2074 cmd_special_free(h, c); 2075 return rc; 2076 } 2077 2078 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, 2079 unsigned char *scsi3addr, unsigned char page, 2080 struct bmic_controller_parameters *buf, size_t bufsize) 2081 { 2082 int rc = IO_OK; 2083 struct CommandList *c; 2084 struct ErrorInfo *ei; 2085 2086 c = cmd_special_alloc(h); 2087 2088 if (c == NULL) { /* trouble... */ 2089 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2090 return -ENOMEM; 2091 } 2092 2093 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, 2094 page, scsi3addr, TYPE_CMD)) { 2095 rc = -1; 2096 goto out; 2097 } 2098 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2099 ei = c->err_info; 2100 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2101 hpsa_scsi_interpret_error(h, c); 2102 rc = -1; 2103 } 2104 out: 2105 cmd_special_free(h, c); 2106 return rc; 2107 } 2108 2109 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, 2110 u8 reset_type) 2111 { 2112 int rc = IO_OK; 2113 struct CommandList *c; 2114 struct ErrorInfo *ei; 2115 2116 c = cmd_special_alloc(h); 2117 2118 if (c == NULL) { /* trouble... */ 2119 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2120 return -ENOMEM; 2121 } 2122 2123 /* fill_cmd can't fail here, no data buffer to map. */ 2124 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 2125 scsi3addr, TYPE_MSG); 2126 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */ 2127 hpsa_scsi_do_simple_cmd_core(h, c); 2128 /* no unmap needed here because no data xfer. */ 2129 2130 ei = c->err_info; 2131 if (ei->CommandStatus != 0) { 2132 hpsa_scsi_interpret_error(h, c); 2133 rc = -1; 2134 } 2135 cmd_special_free(h, c); 2136 return rc; 2137 } 2138 2139 static void hpsa_get_raid_level(struct ctlr_info *h, 2140 unsigned char *scsi3addr, unsigned char *raid_level) 2141 { 2142 int rc; 2143 unsigned char *buf; 2144 2145 *raid_level = RAID_UNKNOWN; 2146 buf = kzalloc(64, GFP_KERNEL); 2147 if (!buf) 2148 return; 2149 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); 2150 if (rc == 0) 2151 *raid_level = buf[8]; 2152 if (*raid_level > RAID_UNKNOWN) 2153 *raid_level = RAID_UNKNOWN; 2154 kfree(buf); 2155 return; 2156 } 2157 2158 #define HPSA_MAP_DEBUG 2159 #ifdef HPSA_MAP_DEBUG 2160 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, 2161 struct raid_map_data *map_buff) 2162 { 2163 struct raid_map_disk_data *dd = &map_buff->data[0]; 2164 int map, row, col; 2165 u16 map_cnt, row_cnt, disks_per_row; 2166 2167 if (rc != 0) 2168 return; 2169 2170 /* Show details only if debugging has been activated. */ 2171 if (h->raid_offload_debug < 2) 2172 return; 2173 2174 dev_info(&h->pdev->dev, "structure_size = %u\n", 2175 le32_to_cpu(map_buff->structure_size)); 2176 dev_info(&h->pdev->dev, "volume_blk_size = %u\n", 2177 le32_to_cpu(map_buff->volume_blk_size)); 2178 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", 2179 le64_to_cpu(map_buff->volume_blk_cnt)); 2180 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", 2181 map_buff->phys_blk_shift); 2182 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", 2183 map_buff->parity_rotation_shift); 2184 dev_info(&h->pdev->dev, "strip_size = %u\n", 2185 le16_to_cpu(map_buff->strip_size)); 2186 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", 2187 le64_to_cpu(map_buff->disk_starting_blk)); 2188 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", 2189 le64_to_cpu(map_buff->disk_blk_cnt)); 2190 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", 2191 le16_to_cpu(map_buff->data_disks_per_row)); 2192 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", 2193 le16_to_cpu(map_buff->metadata_disks_per_row)); 2194 dev_info(&h->pdev->dev, "row_cnt = %u\n", 2195 le16_to_cpu(map_buff->row_cnt)); 2196 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 2197 le16_to_cpu(map_buff->layout_map_count)); 2198 dev_info(&h->pdev->dev, "flags = 0x%x\n", 2199 le16_to_cpu(map_buff->flags)); 2200 dev_info(&h->pdev->dev, "encrypytion = %s\n", 2201 le16_to_cpu(map_buff->flags) & 2202 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); 2203 dev_info(&h->pdev->dev, "dekindex = %u\n", 2204 le16_to_cpu(map_buff->dekindex)); 2205 map_cnt = le16_to_cpu(map_buff->layout_map_count); 2206 for (map = 0; map < map_cnt; map++) { 2207 dev_info(&h->pdev->dev, "Map%u:\n", map); 2208 row_cnt = le16_to_cpu(map_buff->row_cnt); 2209 for (row = 0; row < row_cnt; row++) { 2210 dev_info(&h->pdev->dev, " Row%u:\n", row); 2211 disks_per_row = 2212 le16_to_cpu(map_buff->data_disks_per_row); 2213 for (col = 0; col < disks_per_row; col++, dd++) 2214 dev_info(&h->pdev->dev, 2215 " D%02u: h=0x%04x xor=%u,%u\n", 2216 col, dd->ioaccel_handle, 2217 dd->xor_mult[0], dd->xor_mult[1]); 2218 disks_per_row = 2219 le16_to_cpu(map_buff->metadata_disks_per_row); 2220 for (col = 0; col < disks_per_row; col++, dd++) 2221 dev_info(&h->pdev->dev, 2222 " M%02u: h=0x%04x xor=%u,%u\n", 2223 col, dd->ioaccel_handle, 2224 dd->xor_mult[0], dd->xor_mult[1]); 2225 } 2226 } 2227 } 2228 #else 2229 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, 2230 __attribute__((unused)) int rc, 2231 __attribute__((unused)) struct raid_map_data *map_buff) 2232 { 2233 } 2234 #endif 2235 2236 static int hpsa_get_raid_map(struct ctlr_info *h, 2237 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2238 { 2239 int rc = 0; 2240 struct CommandList *c; 2241 struct ErrorInfo *ei; 2242 2243 c = cmd_special_alloc(h); 2244 if (c == NULL) { 2245 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2246 return -ENOMEM; 2247 } 2248 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, 2249 sizeof(this_device->raid_map), 0, 2250 scsi3addr, TYPE_CMD)) { 2251 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); 2252 cmd_special_free(h, c); 2253 return -ENOMEM; 2254 } 2255 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2256 ei = c->err_info; 2257 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2258 hpsa_scsi_interpret_error(h, c); 2259 cmd_special_free(h, c); 2260 return -1; 2261 } 2262 cmd_special_free(h, c); 2263 2264 /* @todo in the future, dynamically allocate RAID map memory */ 2265 if (le32_to_cpu(this_device->raid_map.structure_size) > 2266 sizeof(this_device->raid_map)) { 2267 dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); 2268 rc = -1; 2269 } 2270 hpsa_debug_map_buff(h, rc, &this_device->raid_map); 2271 return rc; 2272 } 2273 2274 static int hpsa_vpd_page_supported(struct ctlr_info *h, 2275 unsigned char scsi3addr[], u8 page) 2276 { 2277 int rc; 2278 int i; 2279 int pages; 2280 unsigned char *buf, bufsize; 2281 2282 buf = kzalloc(256, GFP_KERNEL); 2283 if (!buf) 2284 return 0; 2285 2286 /* Get the size of the page list first */ 2287 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2288 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2289 buf, HPSA_VPD_HEADER_SZ); 2290 if (rc != 0) 2291 goto exit_unsupported; 2292 pages = buf[3]; 2293 if ((pages + HPSA_VPD_HEADER_SZ) <= 255) 2294 bufsize = pages + HPSA_VPD_HEADER_SZ; 2295 else 2296 bufsize = 255; 2297 2298 /* Get the whole VPD page list */ 2299 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2300 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2301 buf, bufsize); 2302 if (rc != 0) 2303 goto exit_unsupported; 2304 2305 pages = buf[3]; 2306 for (i = 1; i <= pages; i++) 2307 if (buf[3 + i] == page) 2308 goto exit_supported; 2309 exit_unsupported: 2310 kfree(buf); 2311 return 0; 2312 exit_supported: 2313 kfree(buf); 2314 return 1; 2315 } 2316 2317 static void hpsa_get_ioaccel_status(struct ctlr_info *h, 2318 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2319 { 2320 int rc; 2321 unsigned char *buf; 2322 u8 ioaccel_status; 2323 2324 this_device->offload_config = 0; 2325 this_device->offload_enabled = 0; 2326 2327 buf = kzalloc(64, GFP_KERNEL); 2328 if (!buf) 2329 return; 2330 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) 2331 goto out; 2332 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2333 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); 2334 if (rc != 0) 2335 goto out; 2336 2337 #define IOACCEL_STATUS_BYTE 4 2338 #define OFFLOAD_CONFIGURED_BIT 0x01 2339 #define OFFLOAD_ENABLED_BIT 0x02 2340 ioaccel_status = buf[IOACCEL_STATUS_BYTE]; 2341 this_device->offload_config = 2342 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 2343 if (this_device->offload_config) { 2344 this_device->offload_enabled = 2345 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 2346 if (hpsa_get_raid_map(h, scsi3addr, this_device)) 2347 this_device->offload_enabled = 0; 2348 } 2349 out: 2350 kfree(buf); 2351 return; 2352 } 2353 2354 /* Get the device id from inquiry page 0x83 */ 2355 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 2356 unsigned char *device_id, int buflen) 2357 { 2358 int rc; 2359 unsigned char *buf; 2360 2361 if (buflen > 16) 2362 buflen = 16; 2363 buf = kzalloc(64, GFP_KERNEL); 2364 if (!buf) 2365 return -ENOMEM; 2366 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 2367 if (rc == 0) 2368 memcpy(device_id, &buf[8], buflen); 2369 kfree(buf); 2370 return rc != 0; 2371 } 2372 2373 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 2374 struct ReportLUNdata *buf, int bufsize, 2375 int extended_response) 2376 { 2377 int rc = IO_OK; 2378 struct CommandList *c; 2379 unsigned char scsi3addr[8]; 2380 struct ErrorInfo *ei; 2381 2382 c = cmd_special_alloc(h); 2383 if (c == NULL) { /* trouble... */ 2384 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2385 return -1; 2386 } 2387 /* address the controller */ 2388 memset(scsi3addr, 0, sizeof(scsi3addr)); 2389 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 2390 buf, bufsize, 0, scsi3addr, TYPE_CMD)) { 2391 rc = -1; 2392 goto out; 2393 } 2394 if (extended_response) 2395 c->Request.CDB[1] = extended_response; 2396 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2397 ei = c->err_info; 2398 if (ei->CommandStatus != 0 && 2399 ei->CommandStatus != CMD_DATA_UNDERRUN) { 2400 hpsa_scsi_interpret_error(h, c); 2401 rc = -1; 2402 } else { 2403 if (buf->extended_response_flag != extended_response) { 2404 dev_err(&h->pdev->dev, 2405 "report luns requested format %u, got %u\n", 2406 extended_response, 2407 buf->extended_response_flag); 2408 rc = -1; 2409 } 2410 } 2411 out: 2412 cmd_special_free(h, c); 2413 return rc; 2414 } 2415 2416 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 2417 struct ReportLUNdata *buf, 2418 int bufsize, int extended_response) 2419 { 2420 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 2421 } 2422 2423 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 2424 struct ReportLUNdata *buf, int bufsize) 2425 { 2426 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 2427 } 2428 2429 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 2430 int bus, int target, int lun) 2431 { 2432 device->bus = bus; 2433 device->target = target; 2434 device->lun = lun; 2435 } 2436 2437 /* Use VPD inquiry to get details of volume status */ 2438 static int hpsa_get_volume_status(struct ctlr_info *h, 2439 unsigned char scsi3addr[]) 2440 { 2441 int rc; 2442 int status; 2443 int size; 2444 unsigned char *buf; 2445 2446 buf = kzalloc(64, GFP_KERNEL); 2447 if (!buf) 2448 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2449 2450 /* Does controller have VPD for logical volume status? */ 2451 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) 2452 goto exit_failed; 2453 2454 /* Get the size of the VPD return buffer */ 2455 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2456 buf, HPSA_VPD_HEADER_SZ); 2457 if (rc != 0) 2458 goto exit_failed; 2459 size = buf[3]; 2460 2461 /* Now get the whole VPD buffer */ 2462 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2463 buf, size + HPSA_VPD_HEADER_SZ); 2464 if (rc != 0) 2465 goto exit_failed; 2466 status = buf[4]; /* status byte */ 2467 2468 kfree(buf); 2469 return status; 2470 exit_failed: 2471 kfree(buf); 2472 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2473 } 2474 2475 /* Determine offline status of a volume. 2476 * Return either: 2477 * 0 (not offline) 2478 * 0xff (offline for unknown reasons) 2479 * # (integer code indicating one of several NOT READY states 2480 * describing why a volume is to be kept offline) 2481 */ 2482 static int hpsa_volume_offline(struct ctlr_info *h, 2483 unsigned char scsi3addr[]) 2484 { 2485 struct CommandList *c; 2486 unsigned char *sense, sense_key, asc, ascq; 2487 int ldstat = 0; 2488 u16 cmd_status; 2489 u8 scsi_status; 2490 #define ASC_LUN_NOT_READY 0x04 2491 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 2492 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 2493 2494 c = cmd_alloc(h); 2495 if (!c) 2496 return 0; 2497 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); 2498 hpsa_scsi_do_simple_cmd_core(h, c); 2499 sense = c->err_info->SenseInfo; 2500 sense_key = sense[2]; 2501 asc = sense[12]; 2502 ascq = sense[13]; 2503 cmd_status = c->err_info->CommandStatus; 2504 scsi_status = c->err_info->ScsiStatus; 2505 cmd_free(h, c); 2506 /* Is the volume 'not ready'? */ 2507 if (cmd_status != CMD_TARGET_STATUS || 2508 scsi_status != SAM_STAT_CHECK_CONDITION || 2509 sense_key != NOT_READY || 2510 asc != ASC_LUN_NOT_READY) { 2511 return 0; 2512 } 2513 2514 /* Determine the reason for not ready state */ 2515 ldstat = hpsa_get_volume_status(h, scsi3addr); 2516 2517 /* Keep volume offline in certain cases: */ 2518 switch (ldstat) { 2519 case HPSA_LV_UNDERGOING_ERASE: 2520 case HPSA_LV_UNDERGOING_RPI: 2521 case HPSA_LV_PENDING_RPI: 2522 case HPSA_LV_ENCRYPTED_NO_KEY: 2523 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 2524 case HPSA_LV_UNDERGOING_ENCRYPTION: 2525 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 2526 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 2527 return ldstat; 2528 case HPSA_VPD_LV_STATUS_UNSUPPORTED: 2529 /* If VPD status page isn't available, 2530 * use ASC/ASCQ to determine state 2531 */ 2532 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || 2533 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) 2534 return ldstat; 2535 break; 2536 default: 2537 break; 2538 } 2539 return 0; 2540 } 2541 2542 static int hpsa_update_device_info(struct ctlr_info *h, 2543 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 2544 unsigned char *is_OBDR_device) 2545 { 2546 2547 #define OBDR_SIG_OFFSET 43 2548 #define OBDR_TAPE_SIG "$DR-10" 2549 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 2550 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 2551 2552 unsigned char *inq_buff; 2553 unsigned char *obdr_sig; 2554 2555 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 2556 if (!inq_buff) 2557 goto bail_out; 2558 2559 /* Do an inquiry to the device to see what it is. */ 2560 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 2561 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 2562 /* Inquiry failed (msg printed already) */ 2563 dev_err(&h->pdev->dev, 2564 "hpsa_update_device_info: inquiry failed\n"); 2565 goto bail_out; 2566 } 2567 2568 this_device->devtype = (inq_buff[0] & 0x1f); 2569 memcpy(this_device->scsi3addr, scsi3addr, 8); 2570 memcpy(this_device->vendor, &inq_buff[8], 2571 sizeof(this_device->vendor)); 2572 memcpy(this_device->model, &inq_buff[16], 2573 sizeof(this_device->model)); 2574 memset(this_device->device_id, 0, 2575 sizeof(this_device->device_id)); 2576 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 2577 sizeof(this_device->device_id)); 2578 2579 if (this_device->devtype == TYPE_DISK && 2580 is_logical_dev_addr_mode(scsi3addr)) { 2581 int volume_offline; 2582 2583 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 2584 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 2585 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 2586 volume_offline = hpsa_volume_offline(h, scsi3addr); 2587 if (volume_offline < 0 || volume_offline > 0xff) 2588 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; 2589 this_device->volume_offline = volume_offline & 0xff; 2590 } else { 2591 this_device->raid_level = RAID_UNKNOWN; 2592 this_device->offload_config = 0; 2593 this_device->offload_enabled = 0; 2594 this_device->volume_offline = 0; 2595 } 2596 2597 if (is_OBDR_device) { 2598 /* See if this is a One-Button-Disaster-Recovery device 2599 * by looking for "$DR-10" at offset 43 in inquiry data. 2600 */ 2601 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 2602 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 2603 strncmp(obdr_sig, OBDR_TAPE_SIG, 2604 OBDR_SIG_LEN) == 0); 2605 } 2606 2607 kfree(inq_buff); 2608 return 0; 2609 2610 bail_out: 2611 kfree(inq_buff); 2612 return 1; 2613 } 2614 2615 static unsigned char *ext_target_model[] = { 2616 "MSA2012", 2617 "MSA2024", 2618 "MSA2312", 2619 "MSA2324", 2620 "P2000 G3 SAS", 2621 "MSA 2040 SAS", 2622 NULL, 2623 }; 2624 2625 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 2626 { 2627 int i; 2628 2629 for (i = 0; ext_target_model[i]; i++) 2630 if (strncmp(device->model, ext_target_model[i], 2631 strlen(ext_target_model[i])) == 0) 2632 return 1; 2633 return 0; 2634 } 2635 2636 /* Helper function to assign bus, target, lun mapping of devices. 2637 * Puts non-external target logical volumes on bus 0, external target logical 2638 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 2639 * Logical drive target and lun are assigned at this time, but 2640 * physical device lun and target assignment are deferred (assigned 2641 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 2642 */ 2643 static void figure_bus_target_lun(struct ctlr_info *h, 2644 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) 2645 { 2646 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 2647 2648 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 2649 /* physical device, target and lun filled in later */ 2650 if (is_hba_lunid(lunaddrbytes)) 2651 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff); 2652 else 2653 /* defer target, lun assignment for physical devices */ 2654 hpsa_set_bus_target_lun(device, 2, -1, -1); 2655 return; 2656 } 2657 /* It's a logical device */ 2658 if (is_ext_target(h, device)) { 2659 /* external target way, put logicals on bus 1 2660 * and match target/lun numbers box 2661 * reports, other smart array, bus 0, target 0, match lunid 2662 */ 2663 hpsa_set_bus_target_lun(device, 2664 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff); 2665 return; 2666 } 2667 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff); 2668 } 2669 2670 /* 2671 * If there is no lun 0 on a target, linux won't find any devices. 2672 * For the external targets (arrays), we have to manually detect the enclosure 2673 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 2674 * it for some reason. *tmpdevice is the target we're adding, 2675 * this_device is a pointer into the current element of currentsd[] 2676 * that we're building up in update_scsi_devices(), below. 2677 * lunzerobits is a bitmap that tracks which targets already have a 2678 * lun 0 assigned. 2679 * Returns 1 if an enclosure was added, 0 if not. 2680 */ 2681 static int add_ext_target_dev(struct ctlr_info *h, 2682 struct hpsa_scsi_dev_t *tmpdevice, 2683 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 2684 unsigned long lunzerobits[], int *n_ext_target_devs) 2685 { 2686 unsigned char scsi3addr[8]; 2687 2688 if (test_bit(tmpdevice->target, lunzerobits)) 2689 return 0; /* There is already a lun 0 on this target. */ 2690 2691 if (!is_logical_dev_addr_mode(lunaddrbytes)) 2692 return 0; /* It's the logical targets that may lack lun 0. */ 2693 2694 if (!is_ext_target(h, tmpdevice)) 2695 return 0; /* Only external target devices have this problem. */ 2696 2697 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */ 2698 return 0; 2699 2700 memset(scsi3addr, 0, 8); 2701 scsi3addr[3] = tmpdevice->target; 2702 if (is_hba_lunid(scsi3addr)) 2703 return 0; /* Don't add the RAID controller here. */ 2704 2705 if (is_scsi_rev_5(h)) 2706 return 0; /* p1210m doesn't need to do this. */ 2707 2708 if (*n_ext_target_devs >= MAX_EXT_TARGETS) { 2709 dev_warn(&h->pdev->dev, "Maximum number of external " 2710 "target devices exceeded. Check your hardware " 2711 "configuration."); 2712 return 0; 2713 } 2714 2715 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 2716 return 0; 2717 (*n_ext_target_devs)++; 2718 hpsa_set_bus_target_lun(this_device, 2719 tmpdevice->bus, tmpdevice->target, 0); 2720 set_bit(tmpdevice->target, lunzerobits); 2721 return 1; 2722 } 2723 2724 /* 2725 * Get address of physical disk used for an ioaccel2 mode command: 2726 * 1. Extract ioaccel2 handle from the command. 2727 * 2. Find a matching ioaccel2 handle from list of physical disks. 2728 * 3. Return: 2729 * 1 and set scsi3addr to address of matching physical 2730 * 0 if no matching physical disk was found. 2731 */ 2732 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, 2733 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) 2734 { 2735 struct ReportExtendedLUNdata *physicals = NULL; 2736 int responsesize = 24; /* size of physical extended response */ 2737 int extended = 2; /* flag forces reporting 'other dev info'. */ 2738 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; 2739 u32 nphysicals = 0; /* number of reported physical devs */ 2740 int found = 0; /* found match (1) or not (0) */ 2741 u32 find; /* handle we need to match */ 2742 int i; 2743 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 2744 struct hpsa_scsi_dev_t *d; /* device of request being aborted */ 2745 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ 2746 __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2747 __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2748 2749 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) 2750 return 0; /* no match */ 2751 2752 /* point to the ioaccel2 device handle */ 2753 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; 2754 if (c2a == NULL) 2755 return 0; /* no match */ 2756 2757 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; 2758 if (scmd == NULL) 2759 return 0; /* no match */ 2760 2761 d = scmd->device->hostdata; 2762 if (d == NULL) 2763 return 0; /* no match */ 2764 2765 it_nexus = cpu_to_le32(d->ioaccel_handle); 2766 scsi_nexus = c2a->scsi_nexus; 2767 find = le32_to_cpu(c2a->scsi_nexus); 2768 2769 if (h->raid_offload_debug > 0) 2770 dev_info(&h->pdev->dev, 2771 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", 2772 __func__, scsi_nexus, 2773 d->device_id[0], d->device_id[1], d->device_id[2], 2774 d->device_id[3], d->device_id[4], d->device_id[5], 2775 d->device_id[6], d->device_id[7], d->device_id[8], 2776 d->device_id[9], d->device_id[10], d->device_id[11], 2777 d->device_id[12], d->device_id[13], d->device_id[14], 2778 d->device_id[15]); 2779 2780 /* Get the list of physical devices */ 2781 physicals = kzalloc(reportsize, GFP_KERNEL); 2782 if (physicals == NULL) 2783 return 0; 2784 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, 2785 reportsize, extended)) { 2786 dev_err(&h->pdev->dev, 2787 "Can't lookup %s device handle: report physical LUNs failed.\n", 2788 "HP SSD Smart Path"); 2789 kfree(physicals); 2790 return 0; 2791 } 2792 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / 2793 responsesize; 2794 2795 /* find ioaccel2 handle in list of physicals: */ 2796 for (i = 0; i < nphysicals; i++) { 2797 struct ext_report_lun_entry *entry = &physicals->LUN[i]; 2798 2799 /* handle is in bytes 28-31 of each lun */ 2800 if (entry->ioaccel_handle != find) 2801 continue; /* didn't match */ 2802 found = 1; 2803 memcpy(scsi3addr, entry->lunid, 8); 2804 if (h->raid_offload_debug > 0) 2805 dev_info(&h->pdev->dev, 2806 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", 2807 __func__, find, 2808 entry->ioaccel_handle, scsi3addr); 2809 break; /* found it */ 2810 } 2811 2812 kfree(physicals); 2813 if (found) 2814 return 1; 2815 else 2816 return 0; 2817 2818 } 2819 /* 2820 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 2821 * logdev. The number of luns in physdev and logdev are returned in 2822 * *nphysicals and *nlogicals, respectively. 2823 * Returns 0 on success, -1 otherwise. 2824 */ 2825 static int hpsa_gather_lun_info(struct ctlr_info *h, 2826 int reportphyslunsize, int reportloglunsize, 2827 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, 2828 struct ReportLUNdata *logdev, u32 *nlogicals) 2829 { 2830 int physical_entry_size = 8; 2831 2832 *physical_mode = 0; 2833 2834 /* For I/O accelerator mode we need to read physical device handles */ 2835 if (h->transMethod & CFGTBL_Trans_io_accel1 || 2836 h->transMethod & CFGTBL_Trans_io_accel2) { 2837 *physical_mode = HPSA_REPORT_PHYS_EXTENDED; 2838 physical_entry_size = 24; 2839 } 2840 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize, 2841 *physical_mode)) { 2842 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 2843 return -1; 2844 } 2845 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 2846 physical_entry_size; 2847 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 2848 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 2849 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2850 *nphysicals - HPSA_MAX_PHYS_LUN); 2851 *nphysicals = HPSA_MAX_PHYS_LUN; 2852 } 2853 if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) { 2854 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 2855 return -1; 2856 } 2857 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 2858 /* Reject Logicals in excess of our max capability. */ 2859 if (*nlogicals > HPSA_MAX_LUN) { 2860 dev_warn(&h->pdev->dev, 2861 "maximum logical LUNs (%d) exceeded. " 2862 "%d LUNs ignored.\n", HPSA_MAX_LUN, 2863 *nlogicals - HPSA_MAX_LUN); 2864 *nlogicals = HPSA_MAX_LUN; 2865 } 2866 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 2867 dev_warn(&h->pdev->dev, 2868 "maximum logical + physical LUNs (%d) exceeded. " 2869 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2870 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 2871 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 2872 } 2873 return 0; 2874 } 2875 2876 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, 2877 int i, int nphysicals, int nlogicals, 2878 struct ReportExtendedLUNdata *physdev_list, 2879 struct ReportLUNdata *logdev_list) 2880 { 2881 /* Helper function, figure out where the LUN ID info is coming from 2882 * given index i, lists of physical and logical devices, where in 2883 * the list the raid controller is supposed to appear (first or last) 2884 */ 2885 2886 int logicals_start = nphysicals + (raid_ctlr_position == 0); 2887 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 2888 2889 if (i == raid_ctlr_position) 2890 return RAID_CTLR_LUNID; 2891 2892 if (i < logicals_start) 2893 return &physdev_list->LUN[i - 2894 (raid_ctlr_position == 0)].lunid[0]; 2895 2896 if (i < last_device) 2897 return &logdev_list->LUN[i - nphysicals - 2898 (raid_ctlr_position == 0)][0]; 2899 BUG(); 2900 return NULL; 2901 } 2902 2903 static int hpsa_hba_mode_enabled(struct ctlr_info *h) 2904 { 2905 int rc; 2906 int hba_mode_enabled; 2907 struct bmic_controller_parameters *ctlr_params; 2908 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), 2909 GFP_KERNEL); 2910 2911 if (!ctlr_params) 2912 return -ENOMEM; 2913 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, 2914 sizeof(struct bmic_controller_parameters)); 2915 if (rc) { 2916 kfree(ctlr_params); 2917 return rc; 2918 } 2919 2920 hba_mode_enabled = 2921 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0); 2922 kfree(ctlr_params); 2923 return hba_mode_enabled; 2924 } 2925 2926 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 2927 { 2928 /* the idea here is we could get notified 2929 * that some devices have changed, so we do a report 2930 * physical luns and report logical luns cmd, and adjust 2931 * our list of devices accordingly. 2932 * 2933 * The scsi3addr's of devices won't change so long as the 2934 * adapter is not reset. That means we can rescan and 2935 * tell which devices we already know about, vs. new 2936 * devices, vs. disappearing devices. 2937 */ 2938 struct ReportExtendedLUNdata *physdev_list = NULL; 2939 struct ReportLUNdata *logdev_list = NULL; 2940 u32 nphysicals = 0; 2941 u32 nlogicals = 0; 2942 int physical_mode = 0; 2943 u32 ndev_allocated = 0; 2944 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 2945 int ncurrent = 0; 2946 int i, n_ext_target_devs, ndevs_to_allocate; 2947 int raid_ctlr_position; 2948 int rescan_hba_mode; 2949 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 2950 2951 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 2952 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); 2953 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); 2954 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 2955 2956 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 2957 dev_err(&h->pdev->dev, "out of memory\n"); 2958 goto out; 2959 } 2960 memset(lunzerobits, 0, sizeof(lunzerobits)); 2961 2962 rescan_hba_mode = hpsa_hba_mode_enabled(h); 2963 if (rescan_hba_mode < 0) 2964 goto out; 2965 2966 if (!h->hba_mode_enabled && rescan_hba_mode) 2967 dev_warn(&h->pdev->dev, "HBA mode enabled\n"); 2968 else if (h->hba_mode_enabled && !rescan_hba_mode) 2969 dev_warn(&h->pdev->dev, "HBA mode disabled\n"); 2970 2971 h->hba_mode_enabled = rescan_hba_mode; 2972 2973 if (hpsa_gather_lun_info(h, 2974 sizeof(*physdev_list), sizeof(*logdev_list), 2975 (struct ReportLUNdata *) physdev_list, &nphysicals, 2976 &physical_mode, logdev_list, &nlogicals)) 2977 goto out; 2978 2979 /* We might see up to the maximum number of logical and physical disks 2980 * plus external target devices, and a device for the local RAID 2981 * controller. 2982 */ 2983 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; 2984 2985 /* Allocate the per device structures */ 2986 for (i = 0; i < ndevs_to_allocate; i++) { 2987 if (i >= HPSA_MAX_DEVICES) { 2988 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." 2989 " %d devices ignored.\n", HPSA_MAX_DEVICES, 2990 ndevs_to_allocate - HPSA_MAX_DEVICES); 2991 break; 2992 } 2993 2994 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 2995 if (!currentsd[i]) { 2996 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 2997 __FILE__, __LINE__); 2998 goto out; 2999 } 3000 ndev_allocated++; 3001 } 3002 3003 if (is_scsi_rev_5(h)) 3004 raid_ctlr_position = 0; 3005 else 3006 raid_ctlr_position = nphysicals + nlogicals; 3007 3008 /* adjust our table of devices */ 3009 n_ext_target_devs = 0; 3010 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 3011 u8 *lunaddrbytes, is_OBDR = 0; 3012 3013 /* Figure out where the LUN ID info is coming from */ 3014 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 3015 i, nphysicals, nlogicals, physdev_list, logdev_list); 3016 /* skip masked physical devices. */ 3017 if (lunaddrbytes[3] & 0xC0 && 3018 i < nphysicals + (raid_ctlr_position == 0)) 3019 continue; 3020 3021 /* Get device type, vendor, model, device id */ 3022 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 3023 &is_OBDR)) 3024 continue; /* skip it if we can't talk to it. */ 3025 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 3026 this_device = currentsd[ncurrent]; 3027 3028 /* 3029 * For external target devices, we have to insert a LUN 0 which 3030 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 3031 * is nonetheless an enclosure device there. We have to 3032 * present that otherwise linux won't find anything if 3033 * there is no lun 0. 3034 */ 3035 if (add_ext_target_dev(h, tmpdevice, this_device, 3036 lunaddrbytes, lunzerobits, 3037 &n_ext_target_devs)) { 3038 ncurrent++; 3039 this_device = currentsd[ncurrent]; 3040 } 3041 3042 *this_device = *tmpdevice; 3043 3044 switch (this_device->devtype) { 3045 case TYPE_ROM: 3046 /* We don't *really* support actual CD-ROM devices, 3047 * just "One Button Disaster Recovery" tape drive 3048 * which temporarily pretends to be a CD-ROM drive. 3049 * So we check that the device is really an OBDR tape 3050 * device by checking for "$DR-10" in bytes 43-48 of 3051 * the inquiry data. 3052 */ 3053 if (is_OBDR) 3054 ncurrent++; 3055 break; 3056 case TYPE_DISK: 3057 if (h->hba_mode_enabled) { 3058 /* never use raid mapper in HBA mode */ 3059 this_device->offload_enabled = 0; 3060 ncurrent++; 3061 break; 3062 } else if (h->acciopath_status) { 3063 if (i >= nphysicals) { 3064 ncurrent++; 3065 break; 3066 } 3067 } else { 3068 if (i < nphysicals) 3069 break; 3070 ncurrent++; 3071 break; 3072 } 3073 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { 3074 memcpy(&this_device->ioaccel_handle, 3075 &lunaddrbytes[20], 3076 sizeof(this_device->ioaccel_handle)); 3077 ncurrent++; 3078 } 3079 break; 3080 case TYPE_TAPE: 3081 case TYPE_MEDIUM_CHANGER: 3082 ncurrent++; 3083 break; 3084 case TYPE_RAID: 3085 /* Only present the Smartarray HBA as a RAID controller. 3086 * If it's a RAID controller other than the HBA itself 3087 * (an external RAID controller, MSA500 or similar) 3088 * don't present it. 3089 */ 3090 if (!is_hba_lunid(lunaddrbytes)) 3091 break; 3092 ncurrent++; 3093 break; 3094 default: 3095 break; 3096 } 3097 if (ncurrent >= HPSA_MAX_DEVICES) 3098 break; 3099 } 3100 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 3101 out: 3102 kfree(tmpdevice); 3103 for (i = 0; i < ndev_allocated; i++) 3104 kfree(currentsd[i]); 3105 kfree(currentsd); 3106 kfree(physdev_list); 3107 kfree(logdev_list); 3108 } 3109 3110 /* 3111 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 3112 * dma mapping and fills in the scatter gather entries of the 3113 * hpsa command, cp. 3114 */ 3115 static int hpsa_scatter_gather(struct ctlr_info *h, 3116 struct CommandList *cp, 3117 struct scsi_cmnd *cmd) 3118 { 3119 unsigned int len; 3120 struct scatterlist *sg; 3121 u64 addr64; 3122 int use_sg, i, sg_index, chained; 3123 struct SGDescriptor *curr_sg; 3124 3125 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 3126 3127 use_sg = scsi_dma_map(cmd); 3128 if (use_sg < 0) 3129 return use_sg; 3130 3131 if (!use_sg) 3132 goto sglist_finished; 3133 3134 curr_sg = cp->SG; 3135 chained = 0; 3136 sg_index = 0; 3137 scsi_for_each_sg(cmd, sg, use_sg, i) { 3138 if (i == h->max_cmd_sg_entries - 1 && 3139 use_sg > h->max_cmd_sg_entries) { 3140 chained = 1; 3141 curr_sg = h->cmd_sg_list[cp->cmdindex]; 3142 sg_index = 0; 3143 } 3144 addr64 = (u64) sg_dma_address(sg); 3145 len = sg_dma_len(sg); 3146 curr_sg->Addr = cpu_to_le64(addr64); 3147 curr_sg->Len = cpu_to_le32(len); 3148 curr_sg->Ext = cpu_to_le32(0); 3149 curr_sg++; 3150 } 3151 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); 3152 3153 if (use_sg + chained > h->maxSG) 3154 h->maxSG = use_sg + chained; 3155 3156 if (chained) { 3157 cp->Header.SGList = h->max_cmd_sg_entries; 3158 cp->Header.SGTotal = cpu_to_le16(use_sg + 1); 3159 if (hpsa_map_sg_chain_block(h, cp)) { 3160 scsi_dma_unmap(cmd); 3161 return -1; 3162 } 3163 return 0; 3164 } 3165 3166 sglist_finished: 3167 3168 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 3169 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ 3170 return 0; 3171 } 3172 3173 #define IO_ACCEL_INELIGIBLE (1) 3174 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) 3175 { 3176 int is_write = 0; 3177 u32 block; 3178 u32 block_cnt; 3179 3180 /* Perform some CDB fixups if needed using 10 byte reads/writes only */ 3181 switch (cdb[0]) { 3182 case WRITE_6: 3183 case WRITE_12: 3184 is_write = 1; 3185 case READ_6: 3186 case READ_12: 3187 if (*cdb_len == 6) { 3188 block = (((u32) cdb[2]) << 8) | cdb[3]; 3189 block_cnt = cdb[4]; 3190 } else { 3191 BUG_ON(*cdb_len != 12); 3192 block = (((u32) cdb[2]) << 24) | 3193 (((u32) cdb[3]) << 16) | 3194 (((u32) cdb[4]) << 8) | 3195 cdb[5]; 3196 block_cnt = 3197 (((u32) cdb[6]) << 24) | 3198 (((u32) cdb[7]) << 16) | 3199 (((u32) cdb[8]) << 8) | 3200 cdb[9]; 3201 } 3202 if (block_cnt > 0xffff) 3203 return IO_ACCEL_INELIGIBLE; 3204 3205 cdb[0] = is_write ? WRITE_10 : READ_10; 3206 cdb[1] = 0; 3207 cdb[2] = (u8) (block >> 24); 3208 cdb[3] = (u8) (block >> 16); 3209 cdb[4] = (u8) (block >> 8); 3210 cdb[5] = (u8) (block); 3211 cdb[6] = 0; 3212 cdb[7] = (u8) (block_cnt >> 8); 3213 cdb[8] = (u8) (block_cnt); 3214 cdb[9] = 0; 3215 *cdb_len = 10; 3216 break; 3217 } 3218 return 0; 3219 } 3220 3221 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, 3222 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3223 u8 *scsi3addr) 3224 { 3225 struct scsi_cmnd *cmd = c->scsi_cmd; 3226 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 3227 unsigned int len; 3228 unsigned int total_len = 0; 3229 struct scatterlist *sg; 3230 u64 addr64; 3231 int use_sg, i; 3232 struct SGDescriptor *curr_sg; 3233 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; 3234 3235 /* TODO: implement chaining support */ 3236 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 3237 return IO_ACCEL_INELIGIBLE; 3238 3239 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 3240 3241 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 3242 return IO_ACCEL_INELIGIBLE; 3243 3244 c->cmd_type = CMD_IOACCEL1; 3245 3246 /* Adjust the DMA address to point to the accelerated command buffer */ 3247 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + 3248 (c->cmdindex * sizeof(*cp)); 3249 BUG_ON(c->busaddr & 0x0000007F); 3250 3251 use_sg = scsi_dma_map(cmd); 3252 if (use_sg < 0) 3253 return use_sg; 3254 3255 if (use_sg) { 3256 curr_sg = cp->SG; 3257 scsi_for_each_sg(cmd, sg, use_sg, i) { 3258 addr64 = (u64) sg_dma_address(sg); 3259 len = sg_dma_len(sg); 3260 total_len += len; 3261 curr_sg->Addr = cpu_to_le64(addr64); 3262 curr_sg->Len = cpu_to_le32(len); 3263 curr_sg->Ext = cpu_to_le32(0); 3264 curr_sg++; 3265 } 3266 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); 3267 3268 switch (cmd->sc_data_direction) { 3269 case DMA_TO_DEVICE: 3270 control |= IOACCEL1_CONTROL_DATA_OUT; 3271 break; 3272 case DMA_FROM_DEVICE: 3273 control |= IOACCEL1_CONTROL_DATA_IN; 3274 break; 3275 case DMA_NONE: 3276 control |= IOACCEL1_CONTROL_NODATAXFER; 3277 break; 3278 default: 3279 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3280 cmd->sc_data_direction); 3281 BUG(); 3282 break; 3283 } 3284 } else { 3285 control |= IOACCEL1_CONTROL_NODATAXFER; 3286 } 3287 3288 c->Header.SGList = use_sg; 3289 /* Fill out the command structure to submit */ 3290 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); 3291 cp->transfer_len = cpu_to_le32(total_len); 3292 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | 3293 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); 3294 cp->control = cpu_to_le32(control); 3295 memcpy(cp->CDB, cdb, cdb_len); 3296 memcpy(cp->CISS_LUN, scsi3addr, 8); 3297 /* Tag was already set at init time. */ 3298 enqueue_cmd_and_start_io(h, c); 3299 return 0; 3300 } 3301 3302 /* 3303 * Queue a command directly to a device behind the controller using the 3304 * I/O accelerator path. 3305 */ 3306 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, 3307 struct CommandList *c) 3308 { 3309 struct scsi_cmnd *cmd = c->scsi_cmd; 3310 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3311 3312 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, 3313 cmd->cmnd, cmd->cmd_len, dev->scsi3addr); 3314 } 3315 3316 /* 3317 * Set encryption parameters for the ioaccel2 request 3318 */ 3319 static void set_encrypt_ioaccel2(struct ctlr_info *h, 3320 struct CommandList *c, struct io_accel2_cmd *cp) 3321 { 3322 struct scsi_cmnd *cmd = c->scsi_cmd; 3323 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3324 struct raid_map_data *map = &dev->raid_map; 3325 u64 first_block; 3326 3327 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3328 3329 /* Are we doing encryption on this device */ 3330 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) 3331 return; 3332 /* Set the data encryption key index. */ 3333 cp->dekindex = map->dekindex; 3334 3335 /* Set the encryption enable flag, encoded into direction field. */ 3336 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; 3337 3338 /* Set encryption tweak values based on logical block address 3339 * If block size is 512, tweak value is LBA. 3340 * For other block sizes, tweak is (LBA * block size)/ 512) 3341 */ 3342 switch (cmd->cmnd[0]) { 3343 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ 3344 case WRITE_6: 3345 case READ_6: 3346 first_block = get_unaligned_be16(&cmd->cmnd[2]); 3347 break; 3348 case WRITE_10: 3349 case READ_10: 3350 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ 3351 case WRITE_12: 3352 case READ_12: 3353 first_block = get_unaligned_be32(&cmd->cmnd[2]); 3354 break; 3355 case WRITE_16: 3356 case READ_16: 3357 first_block = get_unaligned_be64(&cmd->cmnd[2]); 3358 break; 3359 default: 3360 dev_err(&h->pdev->dev, 3361 "ERROR: %s: size (0x%x) not supported for encryption\n", 3362 __func__, cmd->cmnd[0]); 3363 BUG(); 3364 break; 3365 } 3366 3367 if (le32_to_cpu(map->volume_blk_size) != 512) 3368 first_block = first_block * 3369 le32_to_cpu(map->volume_blk_size)/512; 3370 3371 cp->tweak_lower = cpu_to_le32(first_block); 3372 cp->tweak_upper = cpu_to_le32(first_block >> 32); 3373 } 3374 3375 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 3376 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3377 u8 *scsi3addr) 3378 { 3379 struct scsi_cmnd *cmd = c->scsi_cmd; 3380 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 3381 struct ioaccel2_sg_element *curr_sg; 3382 int use_sg, i; 3383 struct scatterlist *sg; 3384 u64 addr64; 3385 u32 len; 3386 u32 total_len = 0; 3387 3388 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 3389 return IO_ACCEL_INELIGIBLE; 3390 3391 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 3392 return IO_ACCEL_INELIGIBLE; 3393 c->cmd_type = CMD_IOACCEL2; 3394 /* Adjust the DMA address to point to the accelerated command buffer */ 3395 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 3396 (c->cmdindex * sizeof(*cp)); 3397 BUG_ON(c->busaddr & 0x0000007F); 3398 3399 memset(cp, 0, sizeof(*cp)); 3400 cp->IU_type = IOACCEL2_IU_TYPE; 3401 3402 use_sg = scsi_dma_map(cmd); 3403 if (use_sg < 0) 3404 return use_sg; 3405 3406 if (use_sg) { 3407 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); 3408 curr_sg = cp->sg; 3409 scsi_for_each_sg(cmd, sg, use_sg, i) { 3410 addr64 = (u64) sg_dma_address(sg); 3411 len = sg_dma_len(sg); 3412 total_len += len; 3413 curr_sg->address = cpu_to_le64(addr64); 3414 curr_sg->length = cpu_to_le32(len); 3415 curr_sg->reserved[0] = 0; 3416 curr_sg->reserved[1] = 0; 3417 curr_sg->reserved[2] = 0; 3418 curr_sg->chain_indicator = 0; 3419 curr_sg++; 3420 } 3421 3422 switch (cmd->sc_data_direction) { 3423 case DMA_TO_DEVICE: 3424 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3425 cp->direction |= IOACCEL2_DIR_DATA_OUT; 3426 break; 3427 case DMA_FROM_DEVICE: 3428 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3429 cp->direction |= IOACCEL2_DIR_DATA_IN; 3430 break; 3431 case DMA_NONE: 3432 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3433 cp->direction |= IOACCEL2_DIR_NO_DATA; 3434 break; 3435 default: 3436 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3437 cmd->sc_data_direction); 3438 BUG(); 3439 break; 3440 } 3441 } else { 3442 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3443 cp->direction |= IOACCEL2_DIR_NO_DATA; 3444 } 3445 3446 /* Set encryption parameters, if necessary */ 3447 set_encrypt_ioaccel2(h, c, cp); 3448 3449 cp->scsi_nexus = cpu_to_le32(ioaccel_handle); 3450 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT | 3451 DIRECT_LOOKUP_BIT); 3452 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 3453 3454 /* fill in sg elements */ 3455 cp->sg_count = (u8) use_sg; 3456 3457 cp->data_len = cpu_to_le32(total_len); 3458 cp->err_ptr = cpu_to_le64(c->busaddr + 3459 offsetof(struct io_accel2_cmd, error_data)); 3460 cp->err_len = cpu_to_le32(sizeof(cp->error_data)); 3461 3462 enqueue_cmd_and_start_io(h, c); 3463 return 0; 3464 } 3465 3466 /* 3467 * Queue a command to the correct I/O accelerator path. 3468 */ 3469 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 3470 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3471 u8 *scsi3addr) 3472 { 3473 if (h->transMethod & CFGTBL_Trans_io_accel1) 3474 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, 3475 cdb, cdb_len, scsi3addr); 3476 else 3477 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, 3478 cdb, cdb_len, scsi3addr); 3479 } 3480 3481 static void raid_map_helper(struct raid_map_data *map, 3482 int offload_to_mirror, u32 *map_index, u32 *current_group) 3483 { 3484 if (offload_to_mirror == 0) { 3485 /* use physical disk in the first mirrored group. */ 3486 *map_index %= le16_to_cpu(map->data_disks_per_row); 3487 return; 3488 } 3489 do { 3490 /* determine mirror group that *map_index indicates */ 3491 *current_group = *map_index / 3492 le16_to_cpu(map->data_disks_per_row); 3493 if (offload_to_mirror == *current_group) 3494 continue; 3495 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { 3496 /* select map index from next group */ 3497 *map_index += le16_to_cpu(map->data_disks_per_row); 3498 (*current_group)++; 3499 } else { 3500 /* select map index from first group */ 3501 *map_index %= le16_to_cpu(map->data_disks_per_row); 3502 *current_group = 0; 3503 } 3504 } while (offload_to_mirror != *current_group); 3505 } 3506 3507 /* 3508 * Attempt to perform offload RAID mapping for a logical volume I/O. 3509 */ 3510 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, 3511 struct CommandList *c) 3512 { 3513 struct scsi_cmnd *cmd = c->scsi_cmd; 3514 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3515 struct raid_map_data *map = &dev->raid_map; 3516 struct raid_map_disk_data *dd = &map->data[0]; 3517 int is_write = 0; 3518 u32 map_index; 3519 u64 first_block, last_block; 3520 u32 block_cnt; 3521 u32 blocks_per_row; 3522 u64 first_row, last_row; 3523 u32 first_row_offset, last_row_offset; 3524 u32 first_column, last_column; 3525 u64 r0_first_row, r0_last_row; 3526 u32 r5or6_blocks_per_row; 3527 u64 r5or6_first_row, r5or6_last_row; 3528 u32 r5or6_first_row_offset, r5or6_last_row_offset; 3529 u32 r5or6_first_column, r5or6_last_column; 3530 u32 total_disks_per_row; 3531 u32 stripesize; 3532 u32 first_group, last_group, current_group; 3533 u32 map_row; 3534 u32 disk_handle; 3535 u64 disk_block; 3536 u32 disk_block_cnt; 3537 u8 cdb[16]; 3538 u8 cdb_len; 3539 u16 strip_size; 3540 #if BITS_PER_LONG == 32 3541 u64 tmpdiv; 3542 #endif 3543 int offload_to_mirror; 3544 3545 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3546 3547 /* check for valid opcode, get LBA and block count */ 3548 switch (cmd->cmnd[0]) { 3549 case WRITE_6: 3550 is_write = 1; 3551 case READ_6: 3552 first_block = 3553 (((u64) cmd->cmnd[2]) << 8) | 3554 cmd->cmnd[3]; 3555 block_cnt = cmd->cmnd[4]; 3556 if (block_cnt == 0) 3557 block_cnt = 256; 3558 break; 3559 case WRITE_10: 3560 is_write = 1; 3561 case READ_10: 3562 first_block = 3563 (((u64) cmd->cmnd[2]) << 24) | 3564 (((u64) cmd->cmnd[3]) << 16) | 3565 (((u64) cmd->cmnd[4]) << 8) | 3566 cmd->cmnd[5]; 3567 block_cnt = 3568 (((u32) cmd->cmnd[7]) << 8) | 3569 cmd->cmnd[8]; 3570 break; 3571 case WRITE_12: 3572 is_write = 1; 3573 case READ_12: 3574 first_block = 3575 (((u64) cmd->cmnd[2]) << 24) | 3576 (((u64) cmd->cmnd[3]) << 16) | 3577 (((u64) cmd->cmnd[4]) << 8) | 3578 cmd->cmnd[5]; 3579 block_cnt = 3580 (((u32) cmd->cmnd[6]) << 24) | 3581 (((u32) cmd->cmnd[7]) << 16) | 3582 (((u32) cmd->cmnd[8]) << 8) | 3583 cmd->cmnd[9]; 3584 break; 3585 case WRITE_16: 3586 is_write = 1; 3587 case READ_16: 3588 first_block = 3589 (((u64) cmd->cmnd[2]) << 56) | 3590 (((u64) cmd->cmnd[3]) << 48) | 3591 (((u64) cmd->cmnd[4]) << 40) | 3592 (((u64) cmd->cmnd[5]) << 32) | 3593 (((u64) cmd->cmnd[6]) << 24) | 3594 (((u64) cmd->cmnd[7]) << 16) | 3595 (((u64) cmd->cmnd[8]) << 8) | 3596 cmd->cmnd[9]; 3597 block_cnt = 3598 (((u32) cmd->cmnd[10]) << 24) | 3599 (((u32) cmd->cmnd[11]) << 16) | 3600 (((u32) cmd->cmnd[12]) << 8) | 3601 cmd->cmnd[13]; 3602 break; 3603 default: 3604 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ 3605 } 3606 last_block = first_block + block_cnt - 1; 3607 3608 /* check for write to non-RAID-0 */ 3609 if (is_write && dev->raid_level != 0) 3610 return IO_ACCEL_INELIGIBLE; 3611 3612 /* check for invalid block or wraparound */ 3613 if (last_block >= le64_to_cpu(map->volume_blk_cnt) || 3614 last_block < first_block) 3615 return IO_ACCEL_INELIGIBLE; 3616 3617 /* calculate stripe information for the request */ 3618 blocks_per_row = le16_to_cpu(map->data_disks_per_row) * 3619 le16_to_cpu(map->strip_size); 3620 strip_size = le16_to_cpu(map->strip_size); 3621 #if BITS_PER_LONG == 32 3622 tmpdiv = first_block; 3623 (void) do_div(tmpdiv, blocks_per_row); 3624 first_row = tmpdiv; 3625 tmpdiv = last_block; 3626 (void) do_div(tmpdiv, blocks_per_row); 3627 last_row = tmpdiv; 3628 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3629 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3630 tmpdiv = first_row_offset; 3631 (void) do_div(tmpdiv, strip_size); 3632 first_column = tmpdiv; 3633 tmpdiv = last_row_offset; 3634 (void) do_div(tmpdiv, strip_size); 3635 last_column = tmpdiv; 3636 #else 3637 first_row = first_block / blocks_per_row; 3638 last_row = last_block / blocks_per_row; 3639 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3640 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3641 first_column = first_row_offset / strip_size; 3642 last_column = last_row_offset / strip_size; 3643 #endif 3644 3645 /* if this isn't a single row/column then give to the controller */ 3646 if ((first_row != last_row) || (first_column != last_column)) 3647 return IO_ACCEL_INELIGIBLE; 3648 3649 /* proceeding with driver mapping */ 3650 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + 3651 le16_to_cpu(map->metadata_disks_per_row); 3652 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3653 le16_to_cpu(map->row_cnt); 3654 map_index = (map_row * total_disks_per_row) + first_column; 3655 3656 switch (dev->raid_level) { 3657 case HPSA_RAID_0: 3658 break; /* nothing special to do */ 3659 case HPSA_RAID_1: 3660 /* Handles load balance across RAID 1 members. 3661 * (2-drive R1 and R10 with even # of drives.) 3662 * Appropriate for SSDs, not optimal for HDDs 3663 */ 3664 BUG_ON(le16_to_cpu(map->layout_map_count) != 2); 3665 if (dev->offload_to_mirror) 3666 map_index += le16_to_cpu(map->data_disks_per_row); 3667 dev->offload_to_mirror = !dev->offload_to_mirror; 3668 break; 3669 case HPSA_RAID_ADM: 3670 /* Handles N-way mirrors (R1-ADM) 3671 * and R10 with # of drives divisible by 3.) 3672 */ 3673 BUG_ON(le16_to_cpu(map->layout_map_count) != 3); 3674 3675 offload_to_mirror = dev->offload_to_mirror; 3676 raid_map_helper(map, offload_to_mirror, 3677 &map_index, ¤t_group); 3678 /* set mirror group to use next time */ 3679 offload_to_mirror = 3680 (offload_to_mirror >= 3681 le16_to_cpu(map->layout_map_count) - 1) 3682 ? 0 : offload_to_mirror + 1; 3683 dev->offload_to_mirror = offload_to_mirror; 3684 /* Avoid direct use of dev->offload_to_mirror within this 3685 * function since multiple threads might simultaneously 3686 * increment it beyond the range of dev->layout_map_count -1. 3687 */ 3688 break; 3689 case HPSA_RAID_5: 3690 case HPSA_RAID_6: 3691 if (le16_to_cpu(map->layout_map_count) <= 1) 3692 break; 3693 3694 /* Verify first and last block are in same RAID group */ 3695 r5or6_blocks_per_row = 3696 le16_to_cpu(map->strip_size) * 3697 le16_to_cpu(map->data_disks_per_row); 3698 BUG_ON(r5or6_blocks_per_row == 0); 3699 stripesize = r5or6_blocks_per_row * 3700 le16_to_cpu(map->layout_map_count); 3701 #if BITS_PER_LONG == 32 3702 tmpdiv = first_block; 3703 first_group = do_div(tmpdiv, stripesize); 3704 tmpdiv = first_group; 3705 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3706 first_group = tmpdiv; 3707 tmpdiv = last_block; 3708 last_group = do_div(tmpdiv, stripesize); 3709 tmpdiv = last_group; 3710 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3711 last_group = tmpdiv; 3712 #else 3713 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 3714 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 3715 #endif 3716 if (first_group != last_group) 3717 return IO_ACCEL_INELIGIBLE; 3718 3719 /* Verify request is in a single row of RAID 5/6 */ 3720 #if BITS_PER_LONG == 32 3721 tmpdiv = first_block; 3722 (void) do_div(tmpdiv, stripesize); 3723 first_row = r5or6_first_row = r0_first_row = tmpdiv; 3724 tmpdiv = last_block; 3725 (void) do_div(tmpdiv, stripesize); 3726 r5or6_last_row = r0_last_row = tmpdiv; 3727 #else 3728 first_row = r5or6_first_row = r0_first_row = 3729 first_block / stripesize; 3730 r5or6_last_row = r0_last_row = last_block / stripesize; 3731 #endif 3732 if (r5or6_first_row != r5or6_last_row) 3733 return IO_ACCEL_INELIGIBLE; 3734 3735 3736 /* Verify request is in a single column */ 3737 #if BITS_PER_LONG == 32 3738 tmpdiv = first_block; 3739 first_row_offset = do_div(tmpdiv, stripesize); 3740 tmpdiv = first_row_offset; 3741 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); 3742 r5or6_first_row_offset = first_row_offset; 3743 tmpdiv = last_block; 3744 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 3745 tmpdiv = r5or6_last_row_offset; 3746 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 3747 tmpdiv = r5or6_first_row_offset; 3748 (void) do_div(tmpdiv, map->strip_size); 3749 first_column = r5or6_first_column = tmpdiv; 3750 tmpdiv = r5or6_last_row_offset; 3751 (void) do_div(tmpdiv, map->strip_size); 3752 r5or6_last_column = tmpdiv; 3753 #else 3754 first_row_offset = r5or6_first_row_offset = 3755 (u32)((first_block % stripesize) % 3756 r5or6_blocks_per_row); 3757 3758 r5or6_last_row_offset = 3759 (u32)((last_block % stripesize) % 3760 r5or6_blocks_per_row); 3761 3762 first_column = r5or6_first_column = 3763 r5or6_first_row_offset / le16_to_cpu(map->strip_size); 3764 r5or6_last_column = 3765 r5or6_last_row_offset / le16_to_cpu(map->strip_size); 3766 #endif 3767 if (r5or6_first_column != r5or6_last_column) 3768 return IO_ACCEL_INELIGIBLE; 3769 3770 /* Request is eligible */ 3771 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3772 le16_to_cpu(map->row_cnt); 3773 3774 map_index = (first_group * 3775 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + 3776 (map_row * total_disks_per_row) + first_column; 3777 break; 3778 default: 3779 return IO_ACCEL_INELIGIBLE; 3780 } 3781 3782 disk_handle = dd[map_index].ioaccel_handle; 3783 disk_block = le64_to_cpu(map->disk_starting_blk) + 3784 first_row * le16_to_cpu(map->strip_size) + 3785 (first_row_offset - first_column * 3786 le16_to_cpu(map->strip_size)); 3787 disk_block_cnt = block_cnt; 3788 3789 /* handle differing logical/physical block sizes */ 3790 if (map->phys_blk_shift) { 3791 disk_block <<= map->phys_blk_shift; 3792 disk_block_cnt <<= map->phys_blk_shift; 3793 } 3794 BUG_ON(disk_block_cnt > 0xffff); 3795 3796 /* build the new CDB for the physical disk I/O */ 3797 if (disk_block > 0xffffffff) { 3798 cdb[0] = is_write ? WRITE_16 : READ_16; 3799 cdb[1] = 0; 3800 cdb[2] = (u8) (disk_block >> 56); 3801 cdb[3] = (u8) (disk_block >> 48); 3802 cdb[4] = (u8) (disk_block >> 40); 3803 cdb[5] = (u8) (disk_block >> 32); 3804 cdb[6] = (u8) (disk_block >> 24); 3805 cdb[7] = (u8) (disk_block >> 16); 3806 cdb[8] = (u8) (disk_block >> 8); 3807 cdb[9] = (u8) (disk_block); 3808 cdb[10] = (u8) (disk_block_cnt >> 24); 3809 cdb[11] = (u8) (disk_block_cnt >> 16); 3810 cdb[12] = (u8) (disk_block_cnt >> 8); 3811 cdb[13] = (u8) (disk_block_cnt); 3812 cdb[14] = 0; 3813 cdb[15] = 0; 3814 cdb_len = 16; 3815 } else { 3816 cdb[0] = is_write ? WRITE_10 : READ_10; 3817 cdb[1] = 0; 3818 cdb[2] = (u8) (disk_block >> 24); 3819 cdb[3] = (u8) (disk_block >> 16); 3820 cdb[4] = (u8) (disk_block >> 8); 3821 cdb[5] = (u8) (disk_block); 3822 cdb[6] = 0; 3823 cdb[7] = (u8) (disk_block_cnt >> 8); 3824 cdb[8] = (u8) (disk_block_cnt); 3825 cdb[9] = 0; 3826 cdb_len = 10; 3827 } 3828 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, 3829 dev->scsi3addr); 3830 } 3831 3832 /* 3833 * Running in struct Scsi_Host->host_lock less mode using LLD internal 3834 * struct ctlr_info *h->lock w/ spin_lock_irqsave() protection. 3835 */ 3836 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 3837 { 3838 struct ctlr_info *h; 3839 struct hpsa_scsi_dev_t *dev; 3840 unsigned char scsi3addr[8]; 3841 struct CommandList *c; 3842 int rc = 0; 3843 3844 /* Get the ptr to our adapter structure out of cmd->host. */ 3845 h = sdev_to_hba(cmd->device); 3846 dev = cmd->device->hostdata; 3847 if (!dev) { 3848 cmd->result = DID_NO_CONNECT << 16; 3849 cmd->scsi_done(cmd); 3850 return 0; 3851 } 3852 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3853 3854 if (unlikely(lockup_detected(h))) { 3855 cmd->result = DID_ERROR << 16; 3856 cmd->scsi_done(cmd); 3857 return 0; 3858 } 3859 c = cmd_alloc(h); 3860 if (c == NULL) { /* trouble... */ 3861 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 3862 return SCSI_MLQUEUE_HOST_BUSY; 3863 } 3864 3865 /* Fill in the command list header */ 3866 /* save c in case we have to abort it */ 3867 cmd->host_scribble = (unsigned char *) c; 3868 3869 c->cmd_type = CMD_SCSI; 3870 c->scsi_cmd = cmd; 3871 3872 /* Call alternate submit routine for I/O accelerated commands. 3873 * Retries always go down the normal I/O path. 3874 */ 3875 if (likely(cmd->retries == 0 && 3876 cmd->request->cmd_type == REQ_TYPE_FS && 3877 h->acciopath_status)) { 3878 if (dev->offload_enabled) { 3879 rc = hpsa_scsi_ioaccel_raid_map(h, c); 3880 if (rc == 0) 3881 return 0; /* Sent on ioaccel path */ 3882 if (rc < 0) { /* scsi_dma_map failed. */ 3883 cmd_free(h, c); 3884 return SCSI_MLQUEUE_HOST_BUSY; 3885 } 3886 } else if (dev->ioaccel_handle) { 3887 rc = hpsa_scsi_ioaccel_direct_map(h, c); 3888 if (rc == 0) 3889 return 0; /* Sent on direct map path */ 3890 if (rc < 0) { /* scsi_dma_map failed. */ 3891 cmd_free(h, c); 3892 return SCSI_MLQUEUE_HOST_BUSY; 3893 } 3894 } 3895 } 3896 3897 c->Header.ReplyQueue = 0; /* unused in simple mode */ 3898 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 3899 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT) | 3900 DIRECT_LOOKUP_BIT); 3901 3902 /* Fill in the request block... */ 3903 3904 c->Request.Timeout = 0; 3905 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 3906 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 3907 c->Request.CDBLen = cmd->cmd_len; 3908 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 3909 switch (cmd->sc_data_direction) { 3910 case DMA_TO_DEVICE: 3911 c->Request.type_attr_dir = 3912 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); 3913 break; 3914 case DMA_FROM_DEVICE: 3915 c->Request.type_attr_dir = 3916 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); 3917 break; 3918 case DMA_NONE: 3919 c->Request.type_attr_dir = 3920 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); 3921 break; 3922 case DMA_BIDIRECTIONAL: 3923 /* This can happen if a buggy application does a scsi passthru 3924 * and sets both inlen and outlen to non-zero. ( see 3925 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 3926 */ 3927 3928 c->Request.type_attr_dir = 3929 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); 3930 /* This is technically wrong, and hpsa controllers should 3931 * reject it with CMD_INVALID, which is the most correct 3932 * response, but non-fibre backends appear to let it 3933 * slide by, and give the same results as if this field 3934 * were set correctly. Either way is acceptable for 3935 * our purposes here. 3936 */ 3937 3938 break; 3939 3940 default: 3941 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3942 cmd->sc_data_direction); 3943 BUG(); 3944 break; 3945 } 3946 3947 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 3948 cmd_free(h, c); 3949 return SCSI_MLQUEUE_HOST_BUSY; 3950 } 3951 enqueue_cmd_and_start_io(h, c); 3952 /* the cmd'll come back via intr handler in complete_scsi_command() */ 3953 return 0; 3954 } 3955 3956 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) 3957 { 3958 unsigned long flags; 3959 3960 /* 3961 * Don't let rescans be initiated on a controller known 3962 * to be locked up. If the controller locks up *during* 3963 * a rescan, that thread is probably hosed, but at least 3964 * we can prevent new rescan threads from piling up on a 3965 * locked up controller. 3966 */ 3967 if (unlikely(lockup_detected(h))) { 3968 spin_lock_irqsave(&h->scan_lock, flags); 3969 h->scan_finished = 1; 3970 wake_up_all(&h->scan_wait_queue); 3971 spin_unlock_irqrestore(&h->scan_lock, flags); 3972 return 1; 3973 } 3974 return 0; 3975 } 3976 3977 static void hpsa_scan_start(struct Scsi_Host *sh) 3978 { 3979 struct ctlr_info *h = shost_to_hba(sh); 3980 unsigned long flags; 3981 3982 if (do_not_scan_if_controller_locked_up(h)) 3983 return; 3984 3985 /* wait until any scan already in progress is finished. */ 3986 while (1) { 3987 spin_lock_irqsave(&h->scan_lock, flags); 3988 if (h->scan_finished) 3989 break; 3990 spin_unlock_irqrestore(&h->scan_lock, flags); 3991 wait_event(h->scan_wait_queue, h->scan_finished); 3992 /* Note: We don't need to worry about a race between this 3993 * thread and driver unload because the midlayer will 3994 * have incremented the reference count, so unload won't 3995 * happen if we're in here. 3996 */ 3997 } 3998 h->scan_finished = 0; /* mark scan as in progress */ 3999 spin_unlock_irqrestore(&h->scan_lock, flags); 4000 4001 if (do_not_scan_if_controller_locked_up(h)) 4002 return; 4003 4004 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 4005 4006 spin_lock_irqsave(&h->scan_lock, flags); 4007 h->scan_finished = 1; /* mark scan as finished. */ 4008 wake_up_all(&h->scan_wait_queue); 4009 spin_unlock_irqrestore(&h->scan_lock, flags); 4010 } 4011 4012 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) 4013 { 4014 struct ctlr_info *h = sdev_to_hba(sdev); 4015 4016 if (qdepth < 1) 4017 qdepth = 1; 4018 else 4019 if (qdepth > h->nr_cmds) 4020 qdepth = h->nr_cmds; 4021 scsi_change_queue_depth(sdev, qdepth); 4022 return sdev->queue_depth; 4023 } 4024 4025 static int hpsa_scan_finished(struct Scsi_Host *sh, 4026 unsigned long elapsed_time) 4027 { 4028 struct ctlr_info *h = shost_to_hba(sh); 4029 unsigned long flags; 4030 int finished; 4031 4032 spin_lock_irqsave(&h->scan_lock, flags); 4033 finished = h->scan_finished; 4034 spin_unlock_irqrestore(&h->scan_lock, flags); 4035 return finished; 4036 } 4037 4038 static void hpsa_unregister_scsi(struct ctlr_info *h) 4039 { 4040 /* we are being forcibly unloaded, and may not refuse. */ 4041 scsi_remove_host(h->scsi_host); 4042 scsi_host_put(h->scsi_host); 4043 h->scsi_host = NULL; 4044 } 4045 4046 static int hpsa_register_scsi(struct ctlr_info *h) 4047 { 4048 struct Scsi_Host *sh; 4049 int error; 4050 4051 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 4052 if (sh == NULL) 4053 goto fail; 4054 4055 sh->io_port = 0; 4056 sh->n_io_port = 0; 4057 sh->this_id = -1; 4058 sh->max_channel = 3; 4059 sh->max_cmd_len = MAX_COMMAND_SIZE; 4060 sh->max_lun = HPSA_MAX_LUN; 4061 sh->max_id = HPSA_MAX_LUN; 4062 sh->can_queue = h->nr_cmds; 4063 if (h->hba_mode_enabled) 4064 sh->cmd_per_lun = 7; 4065 else 4066 sh->cmd_per_lun = h->nr_cmds; 4067 sh->sg_tablesize = h->maxsgentries; 4068 h->scsi_host = sh; 4069 sh->hostdata[0] = (unsigned long) h; 4070 sh->irq = h->intr[h->intr_mode]; 4071 sh->unique_id = sh->irq; 4072 error = scsi_add_host(sh, &h->pdev->dev); 4073 if (error) 4074 goto fail_host_put; 4075 scsi_scan_host(sh); 4076 return 0; 4077 4078 fail_host_put: 4079 dev_err(&h->pdev->dev, "%s: scsi_add_host" 4080 " failed for controller %d\n", __func__, h->ctlr); 4081 scsi_host_put(sh); 4082 return error; 4083 fail: 4084 dev_err(&h->pdev->dev, "%s: scsi_host_alloc" 4085 " failed for controller %d\n", __func__, h->ctlr); 4086 return -ENOMEM; 4087 } 4088 4089 static int wait_for_device_to_become_ready(struct ctlr_info *h, 4090 unsigned char lunaddr[]) 4091 { 4092 int rc; 4093 int count = 0; 4094 int waittime = 1; /* seconds */ 4095 struct CommandList *c; 4096 4097 c = cmd_special_alloc(h); 4098 if (!c) { 4099 dev_warn(&h->pdev->dev, "out of memory in " 4100 "wait_for_device_to_become_ready.\n"); 4101 return IO_ERROR; 4102 } 4103 4104 /* Send test unit ready until device ready, or give up. */ 4105 while (count < HPSA_TUR_RETRY_LIMIT) { 4106 4107 /* Wait for a bit. do this first, because if we send 4108 * the TUR right away, the reset will just abort it. 4109 */ 4110 msleep(1000 * waittime); 4111 count++; 4112 rc = 0; /* Device ready. */ 4113 4114 /* Increase wait time with each try, up to a point. */ 4115 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 4116 waittime = waittime * 2; 4117 4118 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ 4119 (void) fill_cmd(c, TEST_UNIT_READY, h, 4120 NULL, 0, 0, lunaddr, TYPE_CMD); 4121 hpsa_scsi_do_simple_cmd_core(h, c); 4122 /* no unmap needed here because no data xfer. */ 4123 4124 if (c->err_info->CommandStatus == CMD_SUCCESS) 4125 break; 4126 4127 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 4128 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 4129 (c->err_info->SenseInfo[2] == NO_SENSE || 4130 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 4131 break; 4132 4133 dev_warn(&h->pdev->dev, "waiting %d secs " 4134 "for device to become ready.\n", waittime); 4135 rc = 1; /* device not ready. */ 4136 } 4137 4138 if (rc) 4139 dev_warn(&h->pdev->dev, "giving up on device.\n"); 4140 else 4141 dev_warn(&h->pdev->dev, "device is ready.\n"); 4142 4143 cmd_special_free(h, c); 4144 return rc; 4145 } 4146 4147 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 4148 * complaining. Doing a host- or bus-reset can't do anything good here. 4149 */ 4150 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 4151 { 4152 int rc; 4153 struct ctlr_info *h; 4154 struct hpsa_scsi_dev_t *dev; 4155 4156 /* find the controller to which the command to be aborted was sent */ 4157 h = sdev_to_hba(scsicmd->device); 4158 if (h == NULL) /* paranoia */ 4159 return FAILED; 4160 dev = scsicmd->device->hostdata; 4161 if (!dev) { 4162 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 4163 "device lookup failed.\n"); 4164 return FAILED; 4165 } 4166 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 4167 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4168 /* send a reset to the SCSI LUN which the command was sent to */ 4169 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); 4170 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 4171 return SUCCESS; 4172 4173 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 4174 return FAILED; 4175 } 4176 4177 static void swizzle_abort_tag(u8 *tag) 4178 { 4179 u8 original_tag[8]; 4180 4181 memcpy(original_tag, tag, 8); 4182 tag[0] = original_tag[3]; 4183 tag[1] = original_tag[2]; 4184 tag[2] = original_tag[1]; 4185 tag[3] = original_tag[0]; 4186 tag[4] = original_tag[7]; 4187 tag[5] = original_tag[6]; 4188 tag[6] = original_tag[5]; 4189 tag[7] = original_tag[4]; 4190 } 4191 4192 static void hpsa_get_tag(struct ctlr_info *h, 4193 struct CommandList *c, __le32 *taglower, __le32 *tagupper) 4194 { 4195 u64 tag; 4196 if (c->cmd_type == CMD_IOACCEL1) { 4197 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 4198 &h->ioaccel_cmd_pool[c->cmdindex]; 4199 tag = le64_to_cpu(cm1->tag); 4200 *tagupper = cpu_to_le32(tag >> 32); 4201 *taglower = cpu_to_le32(tag); 4202 return; 4203 } 4204 if (c->cmd_type == CMD_IOACCEL2) { 4205 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) 4206 &h->ioaccel2_cmd_pool[c->cmdindex]; 4207 /* upper tag not used in ioaccel2 mode */ 4208 memset(tagupper, 0, sizeof(*tagupper)); 4209 *taglower = cm2->Tag; 4210 return; 4211 } 4212 tag = le64_to_cpu(c->Header.tag); 4213 *tagupper = cpu_to_le32(tag >> 32); 4214 *taglower = cpu_to_le32(tag); 4215 } 4216 4217 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 4218 struct CommandList *abort, int swizzle) 4219 { 4220 int rc = IO_OK; 4221 struct CommandList *c; 4222 struct ErrorInfo *ei; 4223 __le32 tagupper, taglower; 4224 4225 c = cmd_special_alloc(h); 4226 if (c == NULL) { /* trouble... */ 4227 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 4228 return -ENOMEM; 4229 } 4230 4231 /* fill_cmd can't fail here, no buffer to map */ 4232 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, 4233 0, 0, scsi3addr, TYPE_MSG); 4234 if (swizzle) 4235 swizzle_abort_tag(&c->Request.CDB[4]); 4236 hpsa_scsi_do_simple_cmd_core(h, c); 4237 hpsa_get_tag(h, abort, &taglower, &tagupper); 4238 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", 4239 __func__, tagupper, taglower); 4240 /* no unmap needed here because no data xfer. */ 4241 4242 ei = c->err_info; 4243 switch (ei->CommandStatus) { 4244 case CMD_SUCCESS: 4245 break; 4246 case CMD_UNABORTABLE: /* Very common, don't make noise. */ 4247 rc = -1; 4248 break; 4249 default: 4250 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", 4251 __func__, tagupper, taglower); 4252 hpsa_scsi_interpret_error(h, c); 4253 rc = -1; 4254 break; 4255 } 4256 cmd_special_free(h, c); 4257 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", 4258 __func__, tagupper, taglower); 4259 return rc; 4260 } 4261 4262 /* 4263 * hpsa_find_cmd_in_queue 4264 * 4265 * Used to determine whether a command (find) is still present 4266 * in queue_head. Optionally excludes the last element of queue_head. 4267 * 4268 * This is used to avoid unnecessary aborts. Commands in h->reqQ have 4269 * not yet been submitted, and so can be aborted by the driver without 4270 * sending an abort to the hardware. 4271 * 4272 * Returns pointer to command if found in queue, NULL otherwise. 4273 */ 4274 static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h, 4275 struct scsi_cmnd *find, struct list_head *queue_head) 4276 { 4277 unsigned long flags; 4278 struct CommandList *c = NULL; /* ptr into cmpQ */ 4279 4280 if (!find) 4281 return NULL; 4282 spin_lock_irqsave(&h->lock, flags); 4283 list_for_each_entry(c, queue_head, list) { 4284 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */ 4285 continue; 4286 if (c->scsi_cmd == find) { 4287 spin_unlock_irqrestore(&h->lock, flags); 4288 return c; 4289 } 4290 } 4291 spin_unlock_irqrestore(&h->lock, flags); 4292 return NULL; 4293 } 4294 4295 static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h, 4296 u8 *tag, struct list_head *queue_head) 4297 { 4298 unsigned long flags; 4299 struct CommandList *c; 4300 4301 spin_lock_irqsave(&h->lock, flags); 4302 list_for_each_entry(c, queue_head, list) { 4303 if (memcmp(&c->Header.tag, tag, 8) != 0) 4304 continue; 4305 spin_unlock_irqrestore(&h->lock, flags); 4306 return c; 4307 } 4308 spin_unlock_irqrestore(&h->lock, flags); 4309 return NULL; 4310 } 4311 4312 /* ioaccel2 path firmware cannot handle abort task requests. 4313 * Change abort requests to physical target reset, and send to the 4314 * address of the physical disk used for the ioaccel 2 command. 4315 * Return 0 on success (IO_OK) 4316 * -1 on failure 4317 */ 4318 4319 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, 4320 unsigned char *scsi3addr, struct CommandList *abort) 4321 { 4322 int rc = IO_OK; 4323 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 4324 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ 4325 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ 4326 unsigned char *psa = &phys_scsi3addr[0]; 4327 4328 /* Get a pointer to the hpsa logical device. */ 4329 scmd = (struct scsi_cmnd *) abort->scsi_cmd; 4330 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); 4331 if (dev == NULL) { 4332 dev_warn(&h->pdev->dev, 4333 "Cannot abort: no device pointer for command.\n"); 4334 return -1; /* not abortable */ 4335 } 4336 4337 if (h->raid_offload_debug > 0) 4338 dev_info(&h->pdev->dev, 4339 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4340 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 4341 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], 4342 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); 4343 4344 if (!dev->offload_enabled) { 4345 dev_warn(&h->pdev->dev, 4346 "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); 4347 return -1; /* not abortable */ 4348 } 4349 4350 /* Incoming scsi3addr is logical addr. We need physical disk addr. */ 4351 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { 4352 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); 4353 return -1; /* not abortable */ 4354 } 4355 4356 /* send the reset */ 4357 if (h->raid_offload_debug > 0) 4358 dev_info(&h->pdev->dev, 4359 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4360 psa[0], psa[1], psa[2], psa[3], 4361 psa[4], psa[5], psa[6], psa[7]); 4362 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); 4363 if (rc != 0) { 4364 dev_warn(&h->pdev->dev, 4365 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4366 psa[0], psa[1], psa[2], psa[3], 4367 psa[4], psa[5], psa[6], psa[7]); 4368 return rc; /* failed to reset */ 4369 } 4370 4371 /* wait for device to recover */ 4372 if (wait_for_device_to_become_ready(h, psa) != 0) { 4373 dev_warn(&h->pdev->dev, 4374 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4375 psa[0], psa[1], psa[2], psa[3], 4376 psa[4], psa[5], psa[6], psa[7]); 4377 return -1; /* failed to recover */ 4378 } 4379 4380 /* device recovered */ 4381 dev_info(&h->pdev->dev, 4382 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4383 psa[0], psa[1], psa[2], psa[3], 4384 psa[4], psa[5], psa[6], psa[7]); 4385 4386 return rc; /* success */ 4387 } 4388 4389 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to 4390 * tell which kind we're dealing with, so we send the abort both ways. There 4391 * shouldn't be any collisions between swizzled and unswizzled tags due to the 4392 * way we construct our tags but we check anyway in case the assumptions which 4393 * make this true someday become false. 4394 */ 4395 static int hpsa_send_abort_both_ways(struct ctlr_info *h, 4396 unsigned char *scsi3addr, struct CommandList *abort) 4397 { 4398 u8 swizzled_tag[8]; 4399 struct CommandList *c; 4400 int rc = 0, rc2 = 0; 4401 4402 /* ioccelerator mode 2 commands should be aborted via the 4403 * accelerated path, since RAID path is unaware of these commands, 4404 * but underlying firmware can't handle abort TMF. 4405 * Change abort to physical device reset. 4406 */ 4407 if (abort->cmd_type == CMD_IOACCEL2) 4408 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); 4409 4410 /* we do not expect to find the swizzled tag in our queue, but 4411 * check anyway just to be sure the assumptions which make this 4412 * the case haven't become wrong. 4413 */ 4414 memcpy(swizzled_tag, &abort->Request.CDB[4], 8); 4415 swizzle_abort_tag(swizzled_tag); 4416 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ); 4417 if (c != NULL) { 4418 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n"); 4419 return hpsa_send_abort(h, scsi3addr, abort, 0); 4420 } 4421 rc = hpsa_send_abort(h, scsi3addr, abort, 0); 4422 4423 /* if the command is still in our queue, we can't conclude that it was 4424 * aborted (it might have just completed normally) but in any case 4425 * we don't need to try to abort it another way. 4426 */ 4427 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ); 4428 if (c) 4429 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1); 4430 return rc && rc2; 4431 } 4432 4433 /* Send an abort for the specified command. 4434 * If the device and controller support it, 4435 * send a task abort request. 4436 */ 4437 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) 4438 { 4439 4440 int i, rc; 4441 struct ctlr_info *h; 4442 struct hpsa_scsi_dev_t *dev; 4443 struct CommandList *abort; /* pointer to command to be aborted */ 4444 struct CommandList *found; 4445 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ 4446 char msg[256]; /* For debug messaging. */ 4447 int ml = 0; 4448 __le32 tagupper, taglower; 4449 4450 /* Find the controller of the command to be aborted */ 4451 h = sdev_to_hba(sc->device); 4452 if (WARN(h == NULL, 4453 "ABORT REQUEST FAILED, Controller lookup failed.\n")) 4454 return FAILED; 4455 4456 /* Check that controller supports some kind of task abort */ 4457 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && 4458 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 4459 return FAILED; 4460 4461 memset(msg, 0, sizeof(msg)); 4462 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ", 4463 h->scsi_host->host_no, sc->device->channel, 4464 sc->device->id, sc->device->lun); 4465 4466 /* Find the device of the command to be aborted */ 4467 dev = sc->device->hostdata; 4468 if (!dev) { 4469 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", 4470 msg); 4471 return FAILED; 4472 } 4473 4474 /* Get SCSI command to be aborted */ 4475 abort = (struct CommandList *) sc->host_scribble; 4476 if (abort == NULL) { 4477 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n", 4478 msg); 4479 return FAILED; 4480 } 4481 hpsa_get_tag(h, abort, &taglower, &tagupper); 4482 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); 4483 as = (struct scsi_cmnd *) abort->scsi_cmd; 4484 if (as != NULL) 4485 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", 4486 as->cmnd[0], as->serial_number); 4487 dev_dbg(&h->pdev->dev, "%s\n", msg); 4488 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", 4489 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4490 4491 /* Search reqQ to See if command is queued but not submitted, 4492 * if so, complete the command with aborted status and remove 4493 * it from the reqQ. 4494 */ 4495 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ); 4496 if (found) { 4497 found->err_info->CommandStatus = CMD_ABORTED; 4498 finish_cmd(found); 4499 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n", 4500 msg); 4501 return SUCCESS; 4502 } 4503 4504 /* not in reqQ, if also not in cmpQ, must have already completed */ 4505 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); 4506 if (!found) { 4507 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n", 4508 msg); 4509 return SUCCESS; 4510 } 4511 4512 /* 4513 * Command is in flight, or possibly already completed 4514 * by the firmware (but not to the scsi mid layer) but we can't 4515 * distinguish which. Send the abort down. 4516 */ 4517 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); 4518 if (rc != 0) { 4519 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); 4520 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", 4521 h->scsi_host->host_no, 4522 dev->bus, dev->target, dev->lun); 4523 return FAILED; 4524 } 4525 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); 4526 4527 /* If the abort(s) above completed and actually aborted the 4528 * command, then the command to be aborted should already be 4529 * completed. If not, wait around a bit more to see if they 4530 * manage to complete normally. 4531 */ 4532 #define ABORT_COMPLETE_WAIT_SECS 30 4533 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { 4534 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); 4535 if (!found) 4536 return SUCCESS; 4537 msleep(100); 4538 } 4539 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", 4540 msg, ABORT_COMPLETE_WAIT_SECS); 4541 return FAILED; 4542 } 4543 4544 4545 /* 4546 * For operations that cannot sleep, a command block is allocated at init, 4547 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 4548 * which ones are free or in use. Lock must be held when calling this. 4549 * cmd_free() is the complement. 4550 */ 4551 static struct CommandList *cmd_alloc(struct ctlr_info *h) 4552 { 4553 struct CommandList *c; 4554 int i; 4555 union u64bit temp64; 4556 dma_addr_t cmd_dma_handle, err_dma_handle; 4557 int loopcount; 4558 4559 /* There is some *extremely* small but non-zero chance that that 4560 * multiple threads could get in here, and one thread could 4561 * be scanning through the list of bits looking for a free 4562 * one, but the free ones are always behind him, and other 4563 * threads sneak in behind him and eat them before he can 4564 * get to them, so that while there is always a free one, a 4565 * very unlucky thread might be starved anyway, never able to 4566 * beat the other threads. In reality, this happens so 4567 * infrequently as to be indistinguishable from never. 4568 */ 4569 4570 loopcount = 0; 4571 do { 4572 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 4573 if (i == h->nr_cmds) 4574 i = 0; 4575 loopcount++; 4576 } while (test_and_set_bit(i & (BITS_PER_LONG - 1), 4577 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0 && 4578 loopcount < 10); 4579 4580 /* Thread got starved? We do not expect this to ever happen. */ 4581 if (loopcount >= 10) 4582 return NULL; 4583 4584 c = h->cmd_pool + i; 4585 memset(c, 0, sizeof(*c)); 4586 cmd_dma_handle = h->cmd_pool_dhandle 4587 + i * sizeof(*c); 4588 c->err_info = h->errinfo_pool + i; 4589 memset(c->err_info, 0, sizeof(*c->err_info)); 4590 err_dma_handle = h->errinfo_pool_dhandle 4591 + i * sizeof(*c->err_info); 4592 4593 c->cmdindex = i; 4594 4595 INIT_LIST_HEAD(&c->list); 4596 c->busaddr = (u32) cmd_dma_handle; 4597 temp64.val = (u64) err_dma_handle; 4598 c->ErrDesc.Addr = cpu_to_le64(err_dma_handle); 4599 c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info)); 4600 4601 c->h = h; 4602 return c; 4603 } 4604 4605 /* For operations that can wait for kmalloc to possibly sleep, 4606 * this routine can be called. Lock need not be held to call 4607 * cmd_special_alloc. cmd_special_free() is the complement. 4608 */ 4609 static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 4610 { 4611 struct CommandList *c; 4612 dma_addr_t cmd_dma_handle, err_dma_handle; 4613 4614 c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 4615 if (c == NULL) 4616 return NULL; 4617 4618 c->cmd_type = CMD_SCSI; 4619 c->cmdindex = -1; 4620 4621 c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info), 4622 &err_dma_handle); 4623 4624 if (c->err_info == NULL) { 4625 pci_free_consistent(h->pdev, 4626 sizeof(*c), c, cmd_dma_handle); 4627 return NULL; 4628 } 4629 4630 INIT_LIST_HEAD(&c->list); 4631 c->busaddr = (u32) cmd_dma_handle; 4632 c->ErrDesc.Addr = cpu_to_le64(err_dma_handle); 4633 c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info)); 4634 4635 c->h = h; 4636 return c; 4637 } 4638 4639 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 4640 { 4641 int i; 4642 4643 i = c - h->cmd_pool; 4644 clear_bit(i & (BITS_PER_LONG - 1), 4645 h->cmd_pool_bits + (i / BITS_PER_LONG)); 4646 } 4647 4648 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 4649 { 4650 pci_free_consistent(h->pdev, sizeof(*c->err_info), 4651 c->err_info, 4652 (dma_addr_t) le64_to_cpu(c->ErrDesc.Addr)); 4653 pci_free_consistent(h->pdev, sizeof(*c), 4654 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 4655 } 4656 4657 #ifdef CONFIG_COMPAT 4658 4659 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, 4660 void __user *arg) 4661 { 4662 IOCTL32_Command_struct __user *arg32 = 4663 (IOCTL32_Command_struct __user *) arg; 4664 IOCTL_Command_struct arg64; 4665 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 4666 int err; 4667 u32 cp; 4668 4669 memset(&arg64, 0, sizeof(arg64)); 4670 err = 0; 4671 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4672 sizeof(arg64.LUN_info)); 4673 err |= copy_from_user(&arg64.Request, &arg32->Request, 4674 sizeof(arg64.Request)); 4675 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4676 sizeof(arg64.error_info)); 4677 err |= get_user(arg64.buf_size, &arg32->buf_size); 4678 err |= get_user(cp, &arg32->buf); 4679 arg64.buf = compat_ptr(cp); 4680 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4681 4682 if (err) 4683 return -EFAULT; 4684 4685 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p); 4686 if (err) 4687 return err; 4688 err |= copy_in_user(&arg32->error_info, &p->error_info, 4689 sizeof(arg32->error_info)); 4690 if (err) 4691 return -EFAULT; 4692 return err; 4693 } 4694 4695 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 4696 int cmd, void __user *arg) 4697 { 4698 BIG_IOCTL32_Command_struct __user *arg32 = 4699 (BIG_IOCTL32_Command_struct __user *) arg; 4700 BIG_IOCTL_Command_struct arg64; 4701 BIG_IOCTL_Command_struct __user *p = 4702 compat_alloc_user_space(sizeof(arg64)); 4703 int err; 4704 u32 cp; 4705 4706 memset(&arg64, 0, sizeof(arg64)); 4707 err = 0; 4708 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4709 sizeof(arg64.LUN_info)); 4710 err |= copy_from_user(&arg64.Request, &arg32->Request, 4711 sizeof(arg64.Request)); 4712 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4713 sizeof(arg64.error_info)); 4714 err |= get_user(arg64.buf_size, &arg32->buf_size); 4715 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 4716 err |= get_user(cp, &arg32->buf); 4717 arg64.buf = compat_ptr(cp); 4718 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4719 4720 if (err) 4721 return -EFAULT; 4722 4723 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p); 4724 if (err) 4725 return err; 4726 err |= copy_in_user(&arg32->error_info, &p->error_info, 4727 sizeof(arg32->error_info)); 4728 if (err) 4729 return -EFAULT; 4730 return err; 4731 } 4732 4733 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 4734 { 4735 switch (cmd) { 4736 case CCISS_GETPCIINFO: 4737 case CCISS_GETINTINFO: 4738 case CCISS_SETINTINFO: 4739 case CCISS_GETNODENAME: 4740 case CCISS_SETNODENAME: 4741 case CCISS_GETHEARTBEAT: 4742 case CCISS_GETBUSTYPES: 4743 case CCISS_GETFIRMVER: 4744 case CCISS_GETDRIVVER: 4745 case CCISS_REVALIDVOLS: 4746 case CCISS_DEREGDISK: 4747 case CCISS_REGNEWDISK: 4748 case CCISS_REGNEWD: 4749 case CCISS_RESCANDISK: 4750 case CCISS_GETLUNINFO: 4751 return hpsa_ioctl(dev, cmd, arg); 4752 4753 case CCISS_PASSTHRU32: 4754 return hpsa_ioctl32_passthru(dev, cmd, arg); 4755 case CCISS_BIG_PASSTHRU32: 4756 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 4757 4758 default: 4759 return -ENOIOCTLCMD; 4760 } 4761 } 4762 #endif 4763 4764 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 4765 { 4766 struct hpsa_pci_info pciinfo; 4767 4768 if (!argp) 4769 return -EINVAL; 4770 pciinfo.domain = pci_domain_nr(h->pdev->bus); 4771 pciinfo.bus = h->pdev->bus->number; 4772 pciinfo.dev_fn = h->pdev->devfn; 4773 pciinfo.board_id = h->board_id; 4774 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 4775 return -EFAULT; 4776 return 0; 4777 } 4778 4779 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 4780 { 4781 DriverVer_type DriverVer; 4782 unsigned char vmaj, vmin, vsubmin; 4783 int rc; 4784 4785 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 4786 &vmaj, &vmin, &vsubmin); 4787 if (rc != 3) { 4788 dev_info(&h->pdev->dev, "driver version string '%s' " 4789 "unrecognized.", HPSA_DRIVER_VERSION); 4790 vmaj = 0; 4791 vmin = 0; 4792 vsubmin = 0; 4793 } 4794 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 4795 if (!argp) 4796 return -EINVAL; 4797 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 4798 return -EFAULT; 4799 return 0; 4800 } 4801 4802 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 4803 { 4804 IOCTL_Command_struct iocommand; 4805 struct CommandList *c; 4806 char *buff = NULL; 4807 u64 temp64; 4808 int rc = 0; 4809 4810 if (!argp) 4811 return -EINVAL; 4812 if (!capable(CAP_SYS_RAWIO)) 4813 return -EPERM; 4814 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 4815 return -EFAULT; 4816 if ((iocommand.buf_size < 1) && 4817 (iocommand.Request.Type.Direction != XFER_NONE)) { 4818 return -EINVAL; 4819 } 4820 if (iocommand.buf_size > 0) { 4821 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 4822 if (buff == NULL) 4823 return -EFAULT; 4824 if (iocommand.Request.Type.Direction & XFER_WRITE) { 4825 /* Copy the data into the buffer we created */ 4826 if (copy_from_user(buff, iocommand.buf, 4827 iocommand.buf_size)) { 4828 rc = -EFAULT; 4829 goto out_kfree; 4830 } 4831 } else { 4832 memset(buff, 0, iocommand.buf_size); 4833 } 4834 } 4835 c = cmd_special_alloc(h); 4836 if (c == NULL) { 4837 rc = -ENOMEM; 4838 goto out_kfree; 4839 } 4840 /* Fill in the command type */ 4841 c->cmd_type = CMD_IOCTL_PEND; 4842 /* Fill in Command Header */ 4843 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4844 if (iocommand.buf_size > 0) { /* buffer to fill */ 4845 c->Header.SGList = 1; 4846 c->Header.SGTotal = cpu_to_le16(1); 4847 } else { /* no buffers to fill */ 4848 c->Header.SGList = 0; 4849 c->Header.SGTotal = cpu_to_le16(0); 4850 } 4851 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 4852 /* use the kernel address the cmd block for tag */ 4853 c->Header.tag = cpu_to_le64(c->busaddr); 4854 4855 /* Fill in Request block */ 4856 memcpy(&c->Request, &iocommand.Request, 4857 sizeof(c->Request)); 4858 4859 /* Fill in the scatter gather information */ 4860 if (iocommand.buf_size > 0) { 4861 temp64 = pci_map_single(h->pdev, buff, 4862 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 4863 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { 4864 c->SG[0].Addr = cpu_to_le64(0); 4865 c->SG[0].Len = cpu_to_le32(0); 4866 rc = -ENOMEM; 4867 goto out; 4868 } 4869 c->SG[0].Addr = cpu_to_le64(temp64); 4870 c->SG[0].Len = cpu_to_le32(iocommand.buf_size); 4871 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ 4872 } 4873 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 4874 if (iocommand.buf_size > 0) 4875 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 4876 check_ioctl_unit_attention(h, c); 4877 4878 /* Copy the error information out */ 4879 memcpy(&iocommand.error_info, c->err_info, 4880 sizeof(iocommand.error_info)); 4881 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 4882 rc = -EFAULT; 4883 goto out; 4884 } 4885 if ((iocommand.Request.Type.Direction & XFER_READ) && 4886 iocommand.buf_size > 0) { 4887 /* Copy the data out of the buffer we created */ 4888 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 4889 rc = -EFAULT; 4890 goto out; 4891 } 4892 } 4893 out: 4894 cmd_special_free(h, c); 4895 out_kfree: 4896 kfree(buff); 4897 return rc; 4898 } 4899 4900 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 4901 { 4902 BIG_IOCTL_Command_struct *ioc; 4903 struct CommandList *c; 4904 unsigned char **buff = NULL; 4905 int *buff_size = NULL; 4906 u64 temp64; 4907 BYTE sg_used = 0; 4908 int status = 0; 4909 u32 left; 4910 u32 sz; 4911 BYTE __user *data_ptr; 4912 4913 if (!argp) 4914 return -EINVAL; 4915 if (!capable(CAP_SYS_RAWIO)) 4916 return -EPERM; 4917 ioc = (BIG_IOCTL_Command_struct *) 4918 kmalloc(sizeof(*ioc), GFP_KERNEL); 4919 if (!ioc) { 4920 status = -ENOMEM; 4921 goto cleanup1; 4922 } 4923 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 4924 status = -EFAULT; 4925 goto cleanup1; 4926 } 4927 if ((ioc->buf_size < 1) && 4928 (ioc->Request.Type.Direction != XFER_NONE)) { 4929 status = -EINVAL; 4930 goto cleanup1; 4931 } 4932 /* Check kmalloc limits using all SGs */ 4933 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 4934 status = -EINVAL; 4935 goto cleanup1; 4936 } 4937 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { 4938 status = -EINVAL; 4939 goto cleanup1; 4940 } 4941 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL); 4942 if (!buff) { 4943 status = -ENOMEM; 4944 goto cleanup1; 4945 } 4946 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL); 4947 if (!buff_size) { 4948 status = -ENOMEM; 4949 goto cleanup1; 4950 } 4951 left = ioc->buf_size; 4952 data_ptr = ioc->buf; 4953 while (left) { 4954 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 4955 buff_size[sg_used] = sz; 4956 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 4957 if (buff[sg_used] == NULL) { 4958 status = -ENOMEM; 4959 goto cleanup1; 4960 } 4961 if (ioc->Request.Type.Direction & XFER_WRITE) { 4962 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 4963 status = -EFAULT; 4964 goto cleanup1; 4965 } 4966 } else 4967 memset(buff[sg_used], 0, sz); 4968 left -= sz; 4969 data_ptr += sz; 4970 sg_used++; 4971 } 4972 c = cmd_special_alloc(h); 4973 if (c == NULL) { 4974 status = -ENOMEM; 4975 goto cleanup1; 4976 } 4977 c->cmd_type = CMD_IOCTL_PEND; 4978 c->Header.ReplyQueue = 0; 4979 c->Header.SGList = (u8) sg_used; 4980 c->Header.SGTotal = cpu_to_le16(sg_used); 4981 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 4982 c->Header.tag = cpu_to_le64(c->busaddr); 4983 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 4984 if (ioc->buf_size > 0) { 4985 int i; 4986 for (i = 0; i < sg_used; i++) { 4987 temp64 = pci_map_single(h->pdev, buff[i], 4988 buff_size[i], PCI_DMA_BIDIRECTIONAL); 4989 if (dma_mapping_error(&h->pdev->dev, 4990 (dma_addr_t) temp64)) { 4991 c->SG[i].Addr = cpu_to_le64(0); 4992 c->SG[i].Len = cpu_to_le32(0); 4993 hpsa_pci_unmap(h->pdev, c, i, 4994 PCI_DMA_BIDIRECTIONAL); 4995 status = -ENOMEM; 4996 goto cleanup0; 4997 } 4998 c->SG[i].Addr = cpu_to_le64(temp64); 4999 c->SG[i].Len = cpu_to_le32(buff_size[i]); 5000 c->SG[i].Ext = cpu_to_le32(0); 5001 } 5002 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); 5003 } 5004 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 5005 if (sg_used) 5006 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 5007 check_ioctl_unit_attention(h, c); 5008 /* Copy the error information out */ 5009 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 5010 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 5011 status = -EFAULT; 5012 goto cleanup0; 5013 } 5014 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { 5015 int i; 5016 5017 /* Copy the data out of the buffer we created */ 5018 BYTE __user *ptr = ioc->buf; 5019 for (i = 0; i < sg_used; i++) { 5020 if (copy_to_user(ptr, buff[i], buff_size[i])) { 5021 status = -EFAULT; 5022 goto cleanup0; 5023 } 5024 ptr += buff_size[i]; 5025 } 5026 } 5027 status = 0; 5028 cleanup0: 5029 cmd_special_free(h, c); 5030 cleanup1: 5031 if (buff) { 5032 int i; 5033 5034 for (i = 0; i < sg_used; i++) 5035 kfree(buff[i]); 5036 kfree(buff); 5037 } 5038 kfree(buff_size); 5039 kfree(ioc); 5040 return status; 5041 } 5042 5043 static void check_ioctl_unit_attention(struct ctlr_info *h, 5044 struct CommandList *c) 5045 { 5046 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 5047 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 5048 (void) check_for_unit_attention(h, c); 5049 } 5050 5051 static int increment_passthru_count(struct ctlr_info *h) 5052 { 5053 unsigned long flags; 5054 5055 spin_lock_irqsave(&h->passthru_count_lock, flags); 5056 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { 5057 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5058 return -1; 5059 } 5060 h->passthru_count++; 5061 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5062 return 0; 5063 } 5064 5065 static void decrement_passthru_count(struct ctlr_info *h) 5066 { 5067 unsigned long flags; 5068 5069 spin_lock_irqsave(&h->passthru_count_lock, flags); 5070 if (h->passthru_count <= 0) { 5071 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5072 /* not expecting to get here. */ 5073 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); 5074 return; 5075 } 5076 h->passthru_count--; 5077 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5078 } 5079 5080 /* 5081 * ioctl 5082 */ 5083 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 5084 { 5085 struct ctlr_info *h; 5086 void __user *argp = (void __user *)arg; 5087 int rc; 5088 5089 h = sdev_to_hba(dev); 5090 5091 switch (cmd) { 5092 case CCISS_DEREGDISK: 5093 case CCISS_REGNEWDISK: 5094 case CCISS_REGNEWD: 5095 hpsa_scan_start(h->scsi_host); 5096 return 0; 5097 case CCISS_GETPCIINFO: 5098 return hpsa_getpciinfo_ioctl(h, argp); 5099 case CCISS_GETDRIVVER: 5100 return hpsa_getdrivver_ioctl(h, argp); 5101 case CCISS_PASSTHRU: 5102 if (increment_passthru_count(h)) 5103 return -EAGAIN; 5104 rc = hpsa_passthru_ioctl(h, argp); 5105 decrement_passthru_count(h); 5106 return rc; 5107 case CCISS_BIG_PASSTHRU: 5108 if (increment_passthru_count(h)) 5109 return -EAGAIN; 5110 rc = hpsa_big_passthru_ioctl(h, argp); 5111 decrement_passthru_count(h); 5112 return rc; 5113 default: 5114 return -ENOTTY; 5115 } 5116 } 5117 5118 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, 5119 u8 reset_type) 5120 { 5121 struct CommandList *c; 5122 5123 c = cmd_alloc(h); 5124 if (!c) 5125 return -ENOMEM; 5126 /* fill_cmd can't fail here, no data buffer to map */ 5127 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 5128 RAID_CTLR_LUNID, TYPE_MSG); 5129 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 5130 c->waiting = NULL; 5131 enqueue_cmd_and_start_io(h, c); 5132 /* Don't wait for completion, the reset won't complete. Don't free 5133 * the command either. This is the last command we will send before 5134 * re-initializing everything, so it doesn't matter and won't leak. 5135 */ 5136 return 0; 5137 } 5138 5139 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 5140 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 5141 int cmd_type) 5142 { 5143 int pci_dir = XFER_NONE; 5144 struct CommandList *a; /* for commands to be aborted */ 5145 5146 c->cmd_type = CMD_IOCTL_PEND; 5147 c->Header.ReplyQueue = 0; 5148 if (buff != NULL && size > 0) { 5149 c->Header.SGList = 1; 5150 c->Header.SGTotal = cpu_to_le16(1); 5151 } else { 5152 c->Header.SGList = 0; 5153 c->Header.SGTotal = cpu_to_le16(0); 5154 } 5155 c->Header.tag = cpu_to_le64(c->busaddr); 5156 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 5157 5158 if (cmd_type == TYPE_CMD) { 5159 switch (cmd) { 5160 case HPSA_INQUIRY: 5161 /* are we trying to read a vital product page */ 5162 if (page_code & VPD_PAGE) { 5163 c->Request.CDB[1] = 0x01; 5164 c->Request.CDB[2] = (page_code & 0xff); 5165 } 5166 c->Request.CDBLen = 6; 5167 c->Request.type_attr_dir = 5168 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5169 c->Request.Timeout = 0; 5170 c->Request.CDB[0] = HPSA_INQUIRY; 5171 c->Request.CDB[4] = size & 0xFF; 5172 break; 5173 case HPSA_REPORT_LOG: 5174 case HPSA_REPORT_PHYS: 5175 /* Talking to controller so It's a physical command 5176 mode = 00 target = 0. Nothing to write. 5177 */ 5178 c->Request.CDBLen = 12; 5179 c->Request.type_attr_dir = 5180 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5181 c->Request.Timeout = 0; 5182 c->Request.CDB[0] = cmd; 5183 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5184 c->Request.CDB[7] = (size >> 16) & 0xFF; 5185 c->Request.CDB[8] = (size >> 8) & 0xFF; 5186 c->Request.CDB[9] = size & 0xFF; 5187 break; 5188 case HPSA_CACHE_FLUSH: 5189 c->Request.CDBLen = 12; 5190 c->Request.type_attr_dir = 5191 TYPE_ATTR_DIR(cmd_type, 5192 ATTR_SIMPLE, XFER_WRITE); 5193 c->Request.Timeout = 0; 5194 c->Request.CDB[0] = BMIC_WRITE; 5195 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 5196 c->Request.CDB[7] = (size >> 8) & 0xFF; 5197 c->Request.CDB[8] = size & 0xFF; 5198 break; 5199 case TEST_UNIT_READY: 5200 c->Request.CDBLen = 6; 5201 c->Request.type_attr_dir = 5202 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 5203 c->Request.Timeout = 0; 5204 break; 5205 case HPSA_GET_RAID_MAP: 5206 c->Request.CDBLen = 12; 5207 c->Request.type_attr_dir = 5208 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5209 c->Request.Timeout = 0; 5210 c->Request.CDB[0] = HPSA_CISS_READ; 5211 c->Request.CDB[1] = cmd; 5212 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5213 c->Request.CDB[7] = (size >> 16) & 0xFF; 5214 c->Request.CDB[8] = (size >> 8) & 0xFF; 5215 c->Request.CDB[9] = size & 0xFF; 5216 break; 5217 case BMIC_SENSE_CONTROLLER_PARAMETERS: 5218 c->Request.CDBLen = 10; 5219 c->Request.type_attr_dir = 5220 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5221 c->Request.Timeout = 0; 5222 c->Request.CDB[0] = BMIC_READ; 5223 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; 5224 c->Request.CDB[7] = (size >> 16) & 0xFF; 5225 c->Request.CDB[8] = (size >> 8) & 0xFF; 5226 break; 5227 default: 5228 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 5229 BUG(); 5230 return -1; 5231 } 5232 } else if (cmd_type == TYPE_MSG) { 5233 switch (cmd) { 5234 5235 case HPSA_DEVICE_RESET_MSG: 5236 c->Request.CDBLen = 16; 5237 c->Request.type_attr_dir = 5238 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 5239 c->Request.Timeout = 0; /* Don't time out */ 5240 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 5241 c->Request.CDB[0] = cmd; 5242 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; 5243 /* If bytes 4-7 are zero, it means reset the */ 5244 /* LunID device */ 5245 c->Request.CDB[4] = 0x00; 5246 c->Request.CDB[5] = 0x00; 5247 c->Request.CDB[6] = 0x00; 5248 c->Request.CDB[7] = 0x00; 5249 break; 5250 case HPSA_ABORT_MSG: 5251 a = buff; /* point to command to be aborted */ 5252 dev_dbg(&h->pdev->dev, 5253 "Abort Tag:0x%016llx request Tag:0x%016llx", 5254 a->Header.tag, c->Header.tag); 5255 c->Request.CDBLen = 16; 5256 c->Request.type_attr_dir = 5257 TYPE_ATTR_DIR(cmd_type, 5258 ATTR_SIMPLE, XFER_WRITE); 5259 c->Request.Timeout = 0; /* Don't time out */ 5260 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; 5261 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; 5262 c->Request.CDB[2] = 0x00; /* reserved */ 5263 c->Request.CDB[3] = 0x00; /* reserved */ 5264 /* Tag to abort goes in CDB[4]-CDB[11] */ 5265 memcpy(&c->Request.CDB[4], &a->Header.tag, 5266 sizeof(a->Header.tag)); 5267 c->Request.CDB[12] = 0x00; /* reserved */ 5268 c->Request.CDB[13] = 0x00; /* reserved */ 5269 c->Request.CDB[14] = 0x00; /* reserved */ 5270 c->Request.CDB[15] = 0x00; /* reserved */ 5271 break; 5272 default: 5273 dev_warn(&h->pdev->dev, "unknown message type %d\n", 5274 cmd); 5275 BUG(); 5276 } 5277 } else { 5278 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 5279 BUG(); 5280 } 5281 5282 switch (GET_DIR(c->Request.type_attr_dir)) { 5283 case XFER_READ: 5284 pci_dir = PCI_DMA_FROMDEVICE; 5285 break; 5286 case XFER_WRITE: 5287 pci_dir = PCI_DMA_TODEVICE; 5288 break; 5289 case XFER_NONE: 5290 pci_dir = PCI_DMA_NONE; 5291 break; 5292 default: 5293 pci_dir = PCI_DMA_BIDIRECTIONAL; 5294 } 5295 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) 5296 return -1; 5297 return 0; 5298 } 5299 5300 /* 5301 * Map (physical) PCI mem into (virtual) kernel space 5302 */ 5303 static void __iomem *remap_pci_mem(ulong base, ulong size) 5304 { 5305 ulong page_base = ((ulong) base) & PAGE_MASK; 5306 ulong page_offs = ((ulong) base) - page_base; 5307 void __iomem *page_remapped = ioremap_nocache(page_base, 5308 page_offs + size); 5309 5310 return page_remapped ? (page_remapped + page_offs) : NULL; 5311 } 5312 5313 /* Takes cmds off the submission queue and sends them to the hardware, 5314 * then puts them on the queue of cmds waiting for completion. 5315 * Assumes h->lock is held 5316 */ 5317 static void start_io(struct ctlr_info *h, unsigned long *flags) 5318 { 5319 struct CommandList *c; 5320 5321 while (!list_empty(&h->reqQ)) { 5322 c = list_entry(h->reqQ.next, struct CommandList, list); 5323 /* can't do anything if fifo is full */ 5324 if ((h->access.fifo_full(h))) { 5325 h->fifo_recently_full = 1; 5326 dev_warn(&h->pdev->dev, "fifo full\n"); 5327 break; 5328 } 5329 h->fifo_recently_full = 0; 5330 5331 /* Get the first entry from the Request Q */ 5332 removeQ(c); 5333 h->Qdepth--; 5334 5335 /* Put job onto the completed Q */ 5336 addQ(&h->cmpQ, c); 5337 atomic_inc(&h->commands_outstanding); 5338 spin_unlock_irqrestore(&h->lock, *flags); 5339 /* Tell the controller execute command */ 5340 h->access.submit_command(h, c); 5341 spin_lock_irqsave(&h->lock, *flags); 5342 } 5343 } 5344 5345 static void lock_and_start_io(struct ctlr_info *h) 5346 { 5347 unsigned long flags; 5348 5349 spin_lock_irqsave(&h->lock, flags); 5350 start_io(h, &flags); 5351 spin_unlock_irqrestore(&h->lock, flags); 5352 } 5353 5354 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) 5355 { 5356 return h->access.command_completed(h, q); 5357 } 5358 5359 static inline bool interrupt_pending(struct ctlr_info *h) 5360 { 5361 return h->access.intr_pending(h); 5362 } 5363 5364 static inline long interrupt_not_for_us(struct ctlr_info *h) 5365 { 5366 return (h->access.intr_pending(h) == 0) || 5367 (h->interrupts_enabled == 0); 5368 } 5369 5370 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 5371 u32 raw_tag) 5372 { 5373 if (unlikely(tag_index >= h->nr_cmds)) { 5374 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 5375 return 1; 5376 } 5377 return 0; 5378 } 5379 5380 static inline void finish_cmd(struct CommandList *c) 5381 { 5382 unsigned long flags; 5383 int io_may_be_stalled = 0; 5384 struct ctlr_info *h = c->h; 5385 int count; 5386 5387 spin_lock_irqsave(&h->lock, flags); 5388 removeQ(c); 5389 5390 /* 5391 * Check for possibly stalled i/o. 5392 * 5393 * If a fifo_full condition is encountered, requests will back up 5394 * in h->reqQ. This queue is only emptied out by start_io which is 5395 * only called when a new i/o request comes in. If no i/o's are 5396 * forthcoming, the i/o's in h->reqQ can get stuck. So we call 5397 * start_io from here if we detect such a danger. 5398 * 5399 * Normally, we shouldn't hit this case, but pounding on the 5400 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if 5401 * commands_outstanding is low. We want to avoid calling 5402 * start_io from in here as much as possible, and esp. don't 5403 * want to get in a cycle where we call start_io every time 5404 * through here. 5405 */ 5406 count = atomic_read(&h->commands_outstanding); 5407 spin_unlock_irqrestore(&h->lock, flags); 5408 if (unlikely(h->fifo_recently_full) && count < 5) 5409 io_may_be_stalled = 1; 5410 5411 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 5412 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 5413 || c->cmd_type == CMD_IOACCEL2)) 5414 complete_scsi_command(c); 5415 else if (c->cmd_type == CMD_IOCTL_PEND) 5416 complete(c->waiting); 5417 if (unlikely(io_may_be_stalled)) 5418 lock_and_start_io(h); 5419 } 5420 5421 static inline u32 hpsa_tag_contains_index(u32 tag) 5422 { 5423 return tag & DIRECT_LOOKUP_BIT; 5424 } 5425 5426 static inline u32 hpsa_tag_to_index(u32 tag) 5427 { 5428 return tag >> DIRECT_LOOKUP_SHIFT; 5429 } 5430 5431 5432 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) 5433 { 5434 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 5435 #define HPSA_SIMPLE_ERROR_BITS 0x03 5436 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 5437 return tag & ~HPSA_SIMPLE_ERROR_BITS; 5438 return tag & ~HPSA_PERF_ERROR_BITS; 5439 } 5440 5441 /* process completion of an indexed ("direct lookup") command */ 5442 static inline void process_indexed_cmd(struct ctlr_info *h, 5443 u32 raw_tag) 5444 { 5445 u32 tag_index; 5446 struct CommandList *c; 5447 5448 tag_index = hpsa_tag_to_index(raw_tag); 5449 if (!bad_tag(h, tag_index, raw_tag)) { 5450 c = h->cmd_pool + tag_index; 5451 finish_cmd(c); 5452 } 5453 } 5454 5455 /* process completion of a non-indexed command */ 5456 static inline void process_nonindexed_cmd(struct ctlr_info *h, 5457 u32 raw_tag) 5458 { 5459 u32 tag; 5460 struct CommandList *c = NULL; 5461 unsigned long flags; 5462 5463 tag = hpsa_tag_discard_error_bits(h, raw_tag); 5464 spin_lock_irqsave(&h->lock, flags); 5465 list_for_each_entry(c, &h->cmpQ, list) { 5466 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 5467 spin_unlock_irqrestore(&h->lock, flags); 5468 finish_cmd(c); 5469 return; 5470 } 5471 } 5472 spin_unlock_irqrestore(&h->lock, flags); 5473 bad_tag(h, h->nr_cmds + 1, raw_tag); 5474 } 5475 5476 /* Some controllers, like p400, will give us one interrupt 5477 * after a soft reset, even if we turned interrupts off. 5478 * Only need to check for this in the hpsa_xxx_discard_completions 5479 * functions. 5480 */ 5481 static int ignore_bogus_interrupt(struct ctlr_info *h) 5482 { 5483 if (likely(!reset_devices)) 5484 return 0; 5485 5486 if (likely(h->interrupts_enabled)) 5487 return 0; 5488 5489 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 5490 "(known firmware bug.) Ignoring.\n"); 5491 5492 return 1; 5493 } 5494 5495 /* 5496 * Convert &h->q[x] (passed to interrupt handlers) back to h. 5497 * Relies on (h-q[x] == x) being true for x such that 5498 * 0 <= x < MAX_REPLY_QUEUES. 5499 */ 5500 static struct ctlr_info *queue_to_hba(u8 *queue) 5501 { 5502 return container_of((queue - *queue), struct ctlr_info, q[0]); 5503 } 5504 5505 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) 5506 { 5507 struct ctlr_info *h = queue_to_hba(queue); 5508 u8 q = *(u8 *) queue; 5509 u32 raw_tag; 5510 5511 if (ignore_bogus_interrupt(h)) 5512 return IRQ_NONE; 5513 5514 if (interrupt_not_for_us(h)) 5515 return IRQ_NONE; 5516 h->last_intr_timestamp = get_jiffies_64(); 5517 while (interrupt_pending(h)) { 5518 raw_tag = get_next_completion(h, q); 5519 while (raw_tag != FIFO_EMPTY) 5520 raw_tag = next_command(h, q); 5521 } 5522 return IRQ_HANDLED; 5523 } 5524 5525 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) 5526 { 5527 struct ctlr_info *h = queue_to_hba(queue); 5528 u32 raw_tag; 5529 u8 q = *(u8 *) queue; 5530 5531 if (ignore_bogus_interrupt(h)) 5532 return IRQ_NONE; 5533 5534 h->last_intr_timestamp = get_jiffies_64(); 5535 raw_tag = get_next_completion(h, q); 5536 while (raw_tag != FIFO_EMPTY) 5537 raw_tag = next_command(h, q); 5538 return IRQ_HANDLED; 5539 } 5540 5541 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) 5542 { 5543 struct ctlr_info *h = queue_to_hba((u8 *) queue); 5544 u32 raw_tag; 5545 u8 q = *(u8 *) queue; 5546 5547 if (interrupt_not_for_us(h)) 5548 return IRQ_NONE; 5549 h->last_intr_timestamp = get_jiffies_64(); 5550 while (interrupt_pending(h)) { 5551 raw_tag = get_next_completion(h, q); 5552 while (raw_tag != FIFO_EMPTY) { 5553 if (likely(hpsa_tag_contains_index(raw_tag))) 5554 process_indexed_cmd(h, raw_tag); 5555 else 5556 process_nonindexed_cmd(h, raw_tag); 5557 raw_tag = next_command(h, q); 5558 } 5559 } 5560 return IRQ_HANDLED; 5561 } 5562 5563 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) 5564 { 5565 struct ctlr_info *h = queue_to_hba(queue); 5566 u32 raw_tag; 5567 u8 q = *(u8 *) queue; 5568 5569 h->last_intr_timestamp = get_jiffies_64(); 5570 raw_tag = get_next_completion(h, q); 5571 while (raw_tag != FIFO_EMPTY) { 5572 if (likely(hpsa_tag_contains_index(raw_tag))) 5573 process_indexed_cmd(h, raw_tag); 5574 else 5575 process_nonindexed_cmd(h, raw_tag); 5576 raw_tag = next_command(h, q); 5577 } 5578 return IRQ_HANDLED; 5579 } 5580 5581 /* Send a message CDB to the firmware. Careful, this only works 5582 * in simple mode, not performant mode due to the tag lookup. 5583 * We only ever use this immediately after a controller reset. 5584 */ 5585 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 5586 unsigned char type) 5587 { 5588 struct Command { 5589 struct CommandListHeader CommandHeader; 5590 struct RequestBlock Request; 5591 struct ErrDescriptor ErrorDescriptor; 5592 }; 5593 struct Command *cmd; 5594 static const size_t cmd_sz = sizeof(*cmd) + 5595 sizeof(cmd->ErrorDescriptor); 5596 dma_addr_t paddr64; 5597 __le32 paddr32; 5598 u32 tag; 5599 void __iomem *vaddr; 5600 int i, err; 5601 5602 vaddr = pci_ioremap_bar(pdev, 0); 5603 if (vaddr == NULL) 5604 return -ENOMEM; 5605 5606 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 5607 * CCISS commands, so they must be allocated from the lower 4GiB of 5608 * memory. 5609 */ 5610 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5611 if (err) { 5612 iounmap(vaddr); 5613 return -ENOMEM; 5614 } 5615 5616 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 5617 if (cmd == NULL) { 5618 iounmap(vaddr); 5619 return -ENOMEM; 5620 } 5621 5622 /* This must fit, because of the 32-bit consistent DMA mask. Also, 5623 * although there's no guarantee, we assume that the address is at 5624 * least 4-byte aligned (most likely, it's page-aligned). 5625 */ 5626 paddr32 = cpu_to_le32(paddr64); 5627 5628 cmd->CommandHeader.ReplyQueue = 0; 5629 cmd->CommandHeader.SGList = 0; 5630 cmd->CommandHeader.SGTotal = cpu_to_le16(0); 5631 cmd->CommandHeader.tag = cpu_to_le64(paddr64); 5632 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 5633 5634 cmd->Request.CDBLen = 16; 5635 cmd->Request.type_attr_dir = 5636 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); 5637 cmd->Request.Timeout = 0; /* Don't time out */ 5638 cmd->Request.CDB[0] = opcode; 5639 cmd->Request.CDB[1] = type; 5640 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 5641 cmd->ErrorDescriptor.Addr = 5642 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); 5643 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); 5644 5645 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); 5646 5647 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 5648 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 5649 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) 5650 break; 5651 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 5652 } 5653 5654 iounmap(vaddr); 5655 5656 /* we leak the DMA buffer here ... no choice since the controller could 5657 * still complete the command. 5658 */ 5659 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 5660 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 5661 opcode, type); 5662 return -ETIMEDOUT; 5663 } 5664 5665 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 5666 5667 if (tag & HPSA_ERROR_BIT) { 5668 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 5669 opcode, type); 5670 return -EIO; 5671 } 5672 5673 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 5674 opcode, type); 5675 return 0; 5676 } 5677 5678 #define hpsa_noop(p) hpsa_message(p, 3, 0) 5679 5680 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 5681 void __iomem *vaddr, u32 use_doorbell) 5682 { 5683 5684 if (use_doorbell) { 5685 /* For everything after the P600, the PCI power state method 5686 * of resetting the controller doesn't work, so we have this 5687 * other way using the doorbell register. 5688 */ 5689 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 5690 writel(use_doorbell, vaddr + SA5_DOORBELL); 5691 5692 /* PMC hardware guys tell us we need a 10 second delay after 5693 * doorbell reset and before any attempt to talk to the board 5694 * at all to ensure that this actually works and doesn't fall 5695 * over in some weird corner cases. 5696 */ 5697 msleep(10000); 5698 } else { /* Try to do it the PCI power state way */ 5699 5700 /* Quoting from the Open CISS Specification: "The Power 5701 * Management Control/Status Register (CSR) controls the power 5702 * state of the device. The normal operating state is D0, 5703 * CSR=00h. The software off state is D3, CSR=03h. To reset 5704 * the controller, place the interface device in D3 then to D0, 5705 * this causes a secondary PCI reset which will reset the 5706 * controller." */ 5707 5708 int rc = 0; 5709 5710 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 5711 5712 /* enter the D3hot power management state */ 5713 rc = pci_set_power_state(pdev, PCI_D3hot); 5714 if (rc) 5715 return rc; 5716 5717 msleep(500); 5718 5719 /* enter the D0 power management state */ 5720 rc = pci_set_power_state(pdev, PCI_D0); 5721 if (rc) 5722 return rc; 5723 5724 /* 5725 * The P600 requires a small delay when changing states. 5726 * Otherwise we may think the board did not reset and we bail. 5727 * This for kdump only and is particular to the P600. 5728 */ 5729 msleep(500); 5730 } 5731 return 0; 5732 } 5733 5734 static void init_driver_version(char *driver_version, int len) 5735 { 5736 memset(driver_version, 0, len); 5737 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); 5738 } 5739 5740 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) 5741 { 5742 char *driver_version; 5743 int i, size = sizeof(cfgtable->driver_version); 5744 5745 driver_version = kmalloc(size, GFP_KERNEL); 5746 if (!driver_version) 5747 return -ENOMEM; 5748 5749 init_driver_version(driver_version, size); 5750 for (i = 0; i < size; i++) 5751 writeb(driver_version[i], &cfgtable->driver_version[i]); 5752 kfree(driver_version); 5753 return 0; 5754 } 5755 5756 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, 5757 unsigned char *driver_ver) 5758 { 5759 int i; 5760 5761 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 5762 driver_ver[i] = readb(&cfgtable->driver_version[i]); 5763 } 5764 5765 static int controller_reset_failed(struct CfgTable __iomem *cfgtable) 5766 { 5767 5768 char *driver_ver, *old_driver_ver; 5769 int rc, size = sizeof(cfgtable->driver_version); 5770 5771 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 5772 if (!old_driver_ver) 5773 return -ENOMEM; 5774 driver_ver = old_driver_ver + size; 5775 5776 /* After a reset, the 32 bytes of "driver version" in the cfgtable 5777 * should have been changed, otherwise we know the reset failed. 5778 */ 5779 init_driver_version(old_driver_ver, size); 5780 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 5781 rc = !memcmp(driver_ver, old_driver_ver, size); 5782 kfree(old_driver_ver); 5783 return rc; 5784 } 5785 /* This does a hard reset of the controller using PCI power management 5786 * states or the using the doorbell register. 5787 */ 5788 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 5789 { 5790 u64 cfg_offset; 5791 u32 cfg_base_addr; 5792 u64 cfg_base_addr_index; 5793 void __iomem *vaddr; 5794 unsigned long paddr; 5795 u32 misc_fw_support; 5796 int rc; 5797 struct CfgTable __iomem *cfgtable; 5798 u32 use_doorbell; 5799 u32 board_id; 5800 u16 command_register; 5801 5802 /* For controllers as old as the P600, this is very nearly 5803 * the same thing as 5804 * 5805 * pci_save_state(pci_dev); 5806 * pci_set_power_state(pci_dev, PCI_D3hot); 5807 * pci_set_power_state(pci_dev, PCI_D0); 5808 * pci_restore_state(pci_dev); 5809 * 5810 * For controllers newer than the P600, the pci power state 5811 * method of resetting doesn't work so we have another way 5812 * using the doorbell register. 5813 */ 5814 5815 rc = hpsa_lookup_board_id(pdev, &board_id); 5816 if (rc < 0) { 5817 dev_warn(&pdev->dev, "Board ID not found\n"); 5818 return rc; 5819 } 5820 if (!ctlr_is_resettable(board_id)) { 5821 dev_warn(&pdev->dev, "Controller not resettable\n"); 5822 return -ENODEV; 5823 } 5824 5825 /* if controller is soft- but not hard resettable... */ 5826 if (!ctlr_is_hard_resettable(board_id)) 5827 return -ENOTSUPP; /* try soft reset later. */ 5828 5829 /* Save the PCI command register */ 5830 pci_read_config_word(pdev, 4, &command_register); 5831 pci_save_state(pdev); 5832 5833 /* find the first memory BAR, so we can find the cfg table */ 5834 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 5835 if (rc) 5836 return rc; 5837 vaddr = remap_pci_mem(paddr, 0x250); 5838 if (!vaddr) 5839 return -ENOMEM; 5840 5841 /* find cfgtable in order to check if reset via doorbell is supported */ 5842 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 5843 &cfg_base_addr_index, &cfg_offset); 5844 if (rc) 5845 goto unmap_vaddr; 5846 cfgtable = remap_pci_mem(pci_resource_start(pdev, 5847 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 5848 if (!cfgtable) { 5849 rc = -ENOMEM; 5850 goto unmap_vaddr; 5851 } 5852 rc = write_driver_ver_to_cfgtable(cfgtable); 5853 if (rc) 5854 goto unmap_cfgtable; 5855 5856 /* If reset via doorbell register is supported, use that. 5857 * There are two such methods. Favor the newest method. 5858 */ 5859 misc_fw_support = readl(&cfgtable->misc_fw_support); 5860 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 5861 if (use_doorbell) { 5862 use_doorbell = DOORBELL_CTLR_RESET2; 5863 } else { 5864 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 5865 if (use_doorbell) { 5866 dev_warn(&pdev->dev, "Soft reset not supported. " 5867 "Firmware update is required.\n"); 5868 rc = -ENOTSUPP; /* try soft reset */ 5869 goto unmap_cfgtable; 5870 } 5871 } 5872 5873 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 5874 if (rc) 5875 goto unmap_cfgtable; 5876 5877 pci_restore_state(pdev); 5878 pci_write_config_word(pdev, 4, command_register); 5879 5880 /* Some devices (notably the HP Smart Array 5i Controller) 5881 need a little pause here */ 5882 msleep(HPSA_POST_RESET_PAUSE_MSECS); 5883 5884 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 5885 if (rc) { 5886 dev_warn(&pdev->dev, 5887 "failed waiting for board to become ready " 5888 "after hard reset\n"); 5889 goto unmap_cfgtable; 5890 } 5891 5892 rc = controller_reset_failed(vaddr); 5893 if (rc < 0) 5894 goto unmap_cfgtable; 5895 if (rc) { 5896 dev_warn(&pdev->dev, "Unable to successfully reset " 5897 "controller. Will try soft reset.\n"); 5898 rc = -ENOTSUPP; 5899 } else { 5900 dev_info(&pdev->dev, "board ready after hard reset.\n"); 5901 } 5902 5903 unmap_cfgtable: 5904 iounmap(cfgtable); 5905 5906 unmap_vaddr: 5907 iounmap(vaddr); 5908 return rc; 5909 } 5910 5911 /* 5912 * We cannot read the structure directly, for portability we must use 5913 * the io functions. 5914 * This is for debug only. 5915 */ 5916 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) 5917 { 5918 #ifdef HPSA_DEBUG 5919 int i; 5920 char temp_name[17]; 5921 5922 dev_info(dev, "Controller Configuration information\n"); 5923 dev_info(dev, "------------------------------------\n"); 5924 for (i = 0; i < 4; i++) 5925 temp_name[i] = readb(&(tb->Signature[i])); 5926 temp_name[4] = '\0'; 5927 dev_info(dev, " Signature = %s\n", temp_name); 5928 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 5929 dev_info(dev, " Transport methods supported = 0x%x\n", 5930 readl(&(tb->TransportSupport))); 5931 dev_info(dev, " Transport methods active = 0x%x\n", 5932 readl(&(tb->TransportActive))); 5933 dev_info(dev, " Requested transport Method = 0x%x\n", 5934 readl(&(tb->HostWrite.TransportRequest))); 5935 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 5936 readl(&(tb->HostWrite.CoalIntDelay))); 5937 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 5938 readl(&(tb->HostWrite.CoalIntCount))); 5939 dev_info(dev, " Max outstanding commands = %d\n", 5940 readl(&(tb->CmdsOutMax))); 5941 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 5942 for (i = 0; i < 16; i++) 5943 temp_name[i] = readb(&(tb->ServerName[i])); 5944 temp_name[16] = '\0'; 5945 dev_info(dev, " Server Name = %s\n", temp_name); 5946 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 5947 readl(&(tb->HeartBeat))); 5948 #endif /* HPSA_DEBUG */ 5949 } 5950 5951 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 5952 { 5953 int i, offset, mem_type, bar_type; 5954 5955 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 5956 return 0; 5957 offset = 0; 5958 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5959 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 5960 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 5961 offset += 4; 5962 else { 5963 mem_type = pci_resource_flags(pdev, i) & 5964 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 5965 switch (mem_type) { 5966 case PCI_BASE_ADDRESS_MEM_TYPE_32: 5967 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 5968 offset += 4; /* 32 bit */ 5969 break; 5970 case PCI_BASE_ADDRESS_MEM_TYPE_64: 5971 offset += 8; 5972 break; 5973 default: /* reserved in PCI 2.2 */ 5974 dev_warn(&pdev->dev, 5975 "base address is invalid\n"); 5976 return -1; 5977 break; 5978 } 5979 } 5980 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 5981 return i + 1; 5982 } 5983 return -1; 5984 } 5985 5986 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 5987 * controllers that are capable. If not, we use IO-APIC mode. 5988 */ 5989 5990 static void hpsa_interrupt_mode(struct ctlr_info *h) 5991 { 5992 #ifdef CONFIG_PCI_MSI 5993 int err, i; 5994 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES]; 5995 5996 for (i = 0; i < MAX_REPLY_QUEUES; i++) { 5997 hpsa_msix_entries[i].vector = 0; 5998 hpsa_msix_entries[i].entry = i; 5999 } 6000 6001 /* Some boards advertise MSI but don't really support it */ 6002 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 6003 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 6004 goto default_int_mode; 6005 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 6006 dev_info(&h->pdev->dev, "MSIX\n"); 6007 h->msix_vector = MAX_REPLY_QUEUES; 6008 if (h->msix_vector > num_online_cpus()) 6009 h->msix_vector = num_online_cpus(); 6010 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, 6011 1, h->msix_vector); 6012 if (err < 0) { 6013 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); 6014 h->msix_vector = 0; 6015 goto single_msi_mode; 6016 } else if (err < h->msix_vector) { 6017 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 6018 "available\n", err); 6019 } 6020 h->msix_vector = err; 6021 for (i = 0; i < h->msix_vector; i++) 6022 h->intr[i] = hpsa_msix_entries[i].vector; 6023 return; 6024 } 6025 single_msi_mode: 6026 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 6027 dev_info(&h->pdev->dev, "MSI\n"); 6028 if (!pci_enable_msi(h->pdev)) 6029 h->msi_vector = 1; 6030 else 6031 dev_warn(&h->pdev->dev, "MSI init failed\n"); 6032 } 6033 default_int_mode: 6034 #endif /* CONFIG_PCI_MSI */ 6035 /* if we get here we're going to use the default interrupt mode */ 6036 h->intr[h->intr_mode] = h->pdev->irq; 6037 } 6038 6039 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 6040 { 6041 int i; 6042 u32 subsystem_vendor_id, subsystem_device_id; 6043 6044 subsystem_vendor_id = pdev->subsystem_vendor; 6045 subsystem_device_id = pdev->subsystem_device; 6046 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 6047 subsystem_vendor_id; 6048 6049 for (i = 0; i < ARRAY_SIZE(products); i++) 6050 if (*board_id == products[i].board_id) 6051 return i; 6052 6053 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 6054 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 6055 !hpsa_allow_any) { 6056 dev_warn(&pdev->dev, "unrecognized board ID: " 6057 "0x%08x, ignoring.\n", *board_id); 6058 return -ENODEV; 6059 } 6060 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 6061 } 6062 6063 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 6064 unsigned long *memory_bar) 6065 { 6066 int i; 6067 6068 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 6069 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 6070 /* addressing mode bits already removed */ 6071 *memory_bar = pci_resource_start(pdev, i); 6072 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 6073 *memory_bar); 6074 return 0; 6075 } 6076 dev_warn(&pdev->dev, "no memory BAR found\n"); 6077 return -ENODEV; 6078 } 6079 6080 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 6081 int wait_for_ready) 6082 { 6083 int i, iterations; 6084 u32 scratchpad; 6085 if (wait_for_ready) 6086 iterations = HPSA_BOARD_READY_ITERATIONS; 6087 else 6088 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 6089 6090 for (i = 0; i < iterations; i++) { 6091 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 6092 if (wait_for_ready) { 6093 if (scratchpad == HPSA_FIRMWARE_READY) 6094 return 0; 6095 } else { 6096 if (scratchpad != HPSA_FIRMWARE_READY) 6097 return 0; 6098 } 6099 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 6100 } 6101 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 6102 return -ENODEV; 6103 } 6104 6105 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 6106 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 6107 u64 *cfg_offset) 6108 { 6109 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 6110 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 6111 *cfg_base_addr &= (u32) 0x0000ffff; 6112 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 6113 if (*cfg_base_addr_index == -1) { 6114 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 6115 return -ENODEV; 6116 } 6117 return 0; 6118 } 6119 6120 static int hpsa_find_cfgtables(struct ctlr_info *h) 6121 { 6122 u64 cfg_offset; 6123 u32 cfg_base_addr; 6124 u64 cfg_base_addr_index; 6125 u32 trans_offset; 6126 int rc; 6127 6128 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 6129 &cfg_base_addr_index, &cfg_offset); 6130 if (rc) 6131 return rc; 6132 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 6133 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 6134 if (!h->cfgtable) 6135 return -ENOMEM; 6136 rc = write_driver_ver_to_cfgtable(h->cfgtable); 6137 if (rc) 6138 return rc; 6139 /* Find performant mode table. */ 6140 trans_offset = readl(&h->cfgtable->TransMethodOffset); 6141 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 6142 cfg_base_addr_index)+cfg_offset+trans_offset, 6143 sizeof(*h->transtable)); 6144 if (!h->transtable) 6145 return -ENOMEM; 6146 return 0; 6147 } 6148 6149 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 6150 { 6151 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 6152 6153 /* Limit commands in memory limited kdump scenario. */ 6154 if (reset_devices && h->max_commands > 32) 6155 h->max_commands = 32; 6156 6157 if (h->max_commands < 16) { 6158 dev_warn(&h->pdev->dev, "Controller reports " 6159 "max supported commands of %d, an obvious lie. " 6160 "Using 16. Ensure that firmware is up to date.\n", 6161 h->max_commands); 6162 h->max_commands = 16; 6163 } 6164 } 6165 6166 /* If the controller reports that the total max sg entries is greater than 512, 6167 * then we know that chained SG blocks work. (Original smart arrays did not 6168 * support chained SG blocks and would return zero for max sg entries.) 6169 */ 6170 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) 6171 { 6172 return h->maxsgentries > 512; 6173 } 6174 6175 /* Interrogate the hardware for some limits: 6176 * max commands, max SG elements without chaining, and with chaining, 6177 * SG chain block size, etc. 6178 */ 6179 static void hpsa_find_board_params(struct ctlr_info *h) 6180 { 6181 hpsa_get_max_perf_mode_cmds(h); 6182 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 6183 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 6184 h->fw_support = readl(&(h->cfgtable->misc_fw_support)); 6185 if (hpsa_supports_chained_sg_blocks(h)) { 6186 /* Limit in-command s/g elements to 32 save dma'able memory. */ 6187 h->max_cmd_sg_entries = 32; 6188 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; 6189 h->maxsgentries--; /* save one for chain pointer */ 6190 } else { 6191 /* 6192 * Original smart arrays supported at most 31 s/g entries 6193 * embedded inline in the command (trying to use more 6194 * would lock up the controller) 6195 */ 6196 h->max_cmd_sg_entries = 31; 6197 h->maxsgentries = 31; /* default to traditional values */ 6198 h->chainsize = 0; 6199 } 6200 6201 /* Find out what task management functions are supported and cache */ 6202 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); 6203 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) 6204 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); 6205 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 6206 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); 6207 } 6208 6209 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 6210 { 6211 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { 6212 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 6213 return false; 6214 } 6215 return true; 6216 } 6217 6218 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) 6219 { 6220 u32 driver_support; 6221 6222 driver_support = readl(&(h->cfgtable->driver_support)); 6223 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 6224 #ifdef CONFIG_X86 6225 driver_support |= ENABLE_SCSI_PREFETCH; 6226 #endif 6227 driver_support |= ENABLE_UNIT_ATTN; 6228 writel(driver_support, &(h->cfgtable->driver_support)); 6229 } 6230 6231 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 6232 * in a prefetch beyond physical memory. 6233 */ 6234 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 6235 { 6236 u32 dma_prefetch; 6237 6238 if (h->board_id != 0x3225103C) 6239 return; 6240 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 6241 dma_prefetch |= 0x8000; 6242 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 6243 } 6244 6245 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) 6246 { 6247 int i; 6248 u32 doorbell_value; 6249 unsigned long flags; 6250 /* wait until the clear_event_notify bit 6 is cleared by controller. */ 6251 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 6252 spin_lock_irqsave(&h->lock, flags); 6253 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 6254 spin_unlock_irqrestore(&h->lock, flags); 6255 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) 6256 break; 6257 /* delay and try again */ 6258 msleep(20); 6259 } 6260 } 6261 6262 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 6263 { 6264 int i; 6265 u32 doorbell_value; 6266 unsigned long flags; 6267 6268 /* under certain very rare conditions, this can take awhile. 6269 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 6270 * as we enter this code.) 6271 */ 6272 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 6273 spin_lock_irqsave(&h->lock, flags); 6274 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 6275 spin_unlock_irqrestore(&h->lock, flags); 6276 if (!(doorbell_value & CFGTBL_ChangeReq)) 6277 break; 6278 /* delay and try again */ 6279 usleep_range(10000, 20000); 6280 } 6281 } 6282 6283 static int hpsa_enter_simple_mode(struct ctlr_info *h) 6284 { 6285 u32 trans_support; 6286 6287 trans_support = readl(&(h->cfgtable->TransportSupport)); 6288 if (!(trans_support & SIMPLE_MODE)) 6289 return -ENOTSUPP; 6290 6291 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 6292 6293 /* Update the field, and then ring the doorbell */ 6294 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 6295 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 6296 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6297 hpsa_wait_for_mode_change_ack(h); 6298 print_cfg_table(&h->pdev->dev, h->cfgtable); 6299 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) 6300 goto error; 6301 h->transMethod = CFGTBL_Trans_Simple; 6302 return 0; 6303 error: 6304 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); 6305 return -ENODEV; 6306 } 6307 6308 static int hpsa_pci_init(struct ctlr_info *h) 6309 { 6310 int prod_index, err; 6311 6312 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 6313 if (prod_index < 0) 6314 return prod_index; 6315 h->product_name = products[prod_index].product_name; 6316 h->access = *(products[prod_index].access); 6317 6318 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 6319 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 6320 6321 err = pci_enable_device(h->pdev); 6322 if (err) { 6323 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 6324 return err; 6325 } 6326 6327 err = pci_request_regions(h->pdev, HPSA); 6328 if (err) { 6329 dev_err(&h->pdev->dev, 6330 "cannot obtain PCI resources, aborting\n"); 6331 return err; 6332 } 6333 6334 pci_set_master(h->pdev); 6335 6336 hpsa_interrupt_mode(h); 6337 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 6338 if (err) 6339 goto err_out_free_res; 6340 h->vaddr = remap_pci_mem(h->paddr, 0x250); 6341 if (!h->vaddr) { 6342 err = -ENOMEM; 6343 goto err_out_free_res; 6344 } 6345 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 6346 if (err) 6347 goto err_out_free_res; 6348 err = hpsa_find_cfgtables(h); 6349 if (err) 6350 goto err_out_free_res; 6351 hpsa_find_board_params(h); 6352 6353 if (!hpsa_CISS_signature_present(h)) { 6354 err = -ENODEV; 6355 goto err_out_free_res; 6356 } 6357 hpsa_set_driver_support_bits(h); 6358 hpsa_p600_dma_prefetch_quirk(h); 6359 err = hpsa_enter_simple_mode(h); 6360 if (err) 6361 goto err_out_free_res; 6362 return 0; 6363 6364 err_out_free_res: 6365 if (h->transtable) 6366 iounmap(h->transtable); 6367 if (h->cfgtable) 6368 iounmap(h->cfgtable); 6369 if (h->vaddr) 6370 iounmap(h->vaddr); 6371 pci_disable_device(h->pdev); 6372 pci_release_regions(h->pdev); 6373 return err; 6374 } 6375 6376 static void hpsa_hba_inquiry(struct ctlr_info *h) 6377 { 6378 int rc; 6379 6380 #define HBA_INQUIRY_BYTE_COUNT 64 6381 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 6382 if (!h->hba_inquiry_data) 6383 return; 6384 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 6385 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 6386 if (rc != 0) { 6387 kfree(h->hba_inquiry_data); 6388 h->hba_inquiry_data = NULL; 6389 } 6390 } 6391 6392 static int hpsa_init_reset_devices(struct pci_dev *pdev) 6393 { 6394 int rc, i; 6395 void __iomem *vaddr; 6396 6397 if (!reset_devices) 6398 return 0; 6399 6400 /* kdump kernel is loading, we don't know in which state is 6401 * the pci interface. The dev->enable_cnt is equal zero 6402 * so we call enable+disable, wait a while and switch it on. 6403 */ 6404 rc = pci_enable_device(pdev); 6405 if (rc) { 6406 dev_warn(&pdev->dev, "Failed to enable PCI device\n"); 6407 return -ENODEV; 6408 } 6409 pci_disable_device(pdev); 6410 msleep(260); /* a randomly chosen number */ 6411 rc = pci_enable_device(pdev); 6412 if (rc) { 6413 dev_warn(&pdev->dev, "failed to enable device.\n"); 6414 return -ENODEV; 6415 } 6416 6417 pci_set_master(pdev); 6418 6419 vaddr = pci_ioremap_bar(pdev, 0); 6420 if (vaddr == NULL) { 6421 rc = -ENOMEM; 6422 goto out_disable; 6423 } 6424 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); 6425 iounmap(vaddr); 6426 6427 /* Reset the controller with a PCI power-cycle or via doorbell */ 6428 rc = hpsa_kdump_hard_reset_controller(pdev); 6429 6430 /* -ENOTSUPP here means we cannot reset the controller 6431 * but it's already (and still) up and running in 6432 * "performant mode". Or, it might be 640x, which can't reset 6433 * due to concerns about shared bbwc between 6402/6404 pair. 6434 */ 6435 if (rc) 6436 goto out_disable; 6437 6438 /* Now try to get the controller to respond to a no-op */ 6439 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); 6440 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 6441 if (hpsa_noop(pdev) == 0) 6442 break; 6443 else 6444 dev_warn(&pdev->dev, "no-op failed%s\n", 6445 (i < 11 ? "; re-trying" : "")); 6446 } 6447 6448 out_disable: 6449 6450 pci_disable_device(pdev); 6451 return rc; 6452 } 6453 6454 static int hpsa_allocate_cmd_pool(struct ctlr_info *h) 6455 { 6456 h->cmd_pool_bits = kzalloc( 6457 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 6458 sizeof(unsigned long), GFP_KERNEL); 6459 h->cmd_pool = pci_alloc_consistent(h->pdev, 6460 h->nr_cmds * sizeof(*h->cmd_pool), 6461 &(h->cmd_pool_dhandle)); 6462 h->errinfo_pool = pci_alloc_consistent(h->pdev, 6463 h->nr_cmds * sizeof(*h->errinfo_pool), 6464 &(h->errinfo_pool_dhandle)); 6465 if ((h->cmd_pool_bits == NULL) 6466 || (h->cmd_pool == NULL) 6467 || (h->errinfo_pool == NULL)) { 6468 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 6469 return -ENOMEM; 6470 } 6471 return 0; 6472 } 6473 6474 static void hpsa_free_cmd_pool(struct ctlr_info *h) 6475 { 6476 kfree(h->cmd_pool_bits); 6477 if (h->cmd_pool) 6478 pci_free_consistent(h->pdev, 6479 h->nr_cmds * sizeof(struct CommandList), 6480 h->cmd_pool, h->cmd_pool_dhandle); 6481 if (h->ioaccel2_cmd_pool) 6482 pci_free_consistent(h->pdev, 6483 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 6484 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 6485 if (h->errinfo_pool) 6486 pci_free_consistent(h->pdev, 6487 h->nr_cmds * sizeof(struct ErrorInfo), 6488 h->errinfo_pool, 6489 h->errinfo_pool_dhandle); 6490 if (h->ioaccel_cmd_pool) 6491 pci_free_consistent(h->pdev, 6492 h->nr_cmds * sizeof(struct io_accel1_cmd), 6493 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 6494 } 6495 6496 static void hpsa_irq_affinity_hints(struct ctlr_info *h) 6497 { 6498 int i, cpu; 6499 6500 cpu = cpumask_first(cpu_online_mask); 6501 for (i = 0; i < h->msix_vector; i++) { 6502 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); 6503 cpu = cpumask_next(cpu, cpu_online_mask); 6504 } 6505 } 6506 6507 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ 6508 static void hpsa_free_irqs(struct ctlr_info *h) 6509 { 6510 int i; 6511 6512 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 6513 /* Single reply queue, only one irq to free */ 6514 i = h->intr_mode; 6515 irq_set_affinity_hint(h->intr[i], NULL); 6516 free_irq(h->intr[i], &h->q[i]); 6517 return; 6518 } 6519 6520 for (i = 0; i < h->msix_vector; i++) { 6521 irq_set_affinity_hint(h->intr[i], NULL); 6522 free_irq(h->intr[i], &h->q[i]); 6523 } 6524 for (; i < MAX_REPLY_QUEUES; i++) 6525 h->q[i] = 0; 6526 } 6527 6528 static int hpsa_request_irq(struct ctlr_info *h, 6529 irqreturn_t (*msixhandler)(int, void *), 6530 irqreturn_t (*intxhandler)(int, void *)) 6531 { 6532 int rc, i; 6533 6534 /* 6535 * initialize h->q[x] = x so that interrupt handlers know which 6536 * queue to process. 6537 */ 6538 for (i = 0; i < MAX_REPLY_QUEUES; i++) 6539 h->q[i] = (u8) i; 6540 6541 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { 6542 /* If performant mode and MSI-X, use multiple reply queues */ 6543 for (i = 0; i < h->msix_vector; i++) { 6544 rc = request_irq(h->intr[i], msixhandler, 6545 0, h->devname, 6546 &h->q[i]); 6547 if (rc) { 6548 int j; 6549 6550 dev_err(&h->pdev->dev, 6551 "failed to get irq %d for %s\n", 6552 h->intr[i], h->devname); 6553 for (j = 0; j < i; j++) { 6554 free_irq(h->intr[j], &h->q[j]); 6555 h->q[j] = 0; 6556 } 6557 for (; j < MAX_REPLY_QUEUES; j++) 6558 h->q[j] = 0; 6559 return rc; 6560 } 6561 } 6562 hpsa_irq_affinity_hints(h); 6563 } else { 6564 /* Use single reply pool */ 6565 if (h->msix_vector > 0 || h->msi_vector) { 6566 rc = request_irq(h->intr[h->intr_mode], 6567 msixhandler, 0, h->devname, 6568 &h->q[h->intr_mode]); 6569 } else { 6570 rc = request_irq(h->intr[h->intr_mode], 6571 intxhandler, IRQF_SHARED, h->devname, 6572 &h->q[h->intr_mode]); 6573 } 6574 } 6575 if (rc) { 6576 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 6577 h->intr[h->intr_mode], h->devname); 6578 return -ENODEV; 6579 } 6580 return 0; 6581 } 6582 6583 static int hpsa_kdump_soft_reset(struct ctlr_info *h) 6584 { 6585 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, 6586 HPSA_RESET_TYPE_CONTROLLER)) { 6587 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 6588 return -EIO; 6589 } 6590 6591 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 6592 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 6593 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 6594 return -1; 6595 } 6596 6597 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 6598 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 6599 dev_warn(&h->pdev->dev, "Board failed to become ready " 6600 "after soft reset.\n"); 6601 return -1; 6602 } 6603 6604 return 0; 6605 } 6606 6607 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) 6608 { 6609 hpsa_free_irqs(h); 6610 #ifdef CONFIG_PCI_MSI 6611 if (h->msix_vector) { 6612 if (h->pdev->msix_enabled) 6613 pci_disable_msix(h->pdev); 6614 } else if (h->msi_vector) { 6615 if (h->pdev->msi_enabled) 6616 pci_disable_msi(h->pdev); 6617 } 6618 #endif /* CONFIG_PCI_MSI */ 6619 } 6620 6621 static void hpsa_free_reply_queues(struct ctlr_info *h) 6622 { 6623 int i; 6624 6625 for (i = 0; i < h->nreply_queues; i++) { 6626 if (!h->reply_queue[i].head) 6627 continue; 6628 pci_free_consistent(h->pdev, h->reply_queue_size, 6629 h->reply_queue[i].head, h->reply_queue[i].busaddr); 6630 h->reply_queue[i].head = NULL; 6631 h->reply_queue[i].busaddr = 0; 6632 } 6633 } 6634 6635 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 6636 { 6637 hpsa_free_irqs_and_disable_msix(h); 6638 hpsa_free_sg_chain_blocks(h); 6639 hpsa_free_cmd_pool(h); 6640 kfree(h->ioaccel1_blockFetchTable); 6641 kfree(h->blockFetchTable); 6642 hpsa_free_reply_queues(h); 6643 if (h->vaddr) 6644 iounmap(h->vaddr); 6645 if (h->transtable) 6646 iounmap(h->transtable); 6647 if (h->cfgtable) 6648 iounmap(h->cfgtable); 6649 pci_disable_device(h->pdev); 6650 pci_release_regions(h->pdev); 6651 kfree(h); 6652 } 6653 6654 /* Called when controller lockup detected. */ 6655 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) 6656 { 6657 struct CommandList *c = NULL; 6658 6659 assert_spin_locked(&h->lock); 6660 /* Mark all outstanding commands as failed and complete them. */ 6661 while (!list_empty(list)) { 6662 c = list_entry(list->next, struct CommandList, list); 6663 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 6664 finish_cmd(c); 6665 } 6666 } 6667 6668 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) 6669 { 6670 int i, cpu; 6671 6672 cpu = cpumask_first(cpu_online_mask); 6673 for (i = 0; i < num_online_cpus(); i++) { 6674 u32 *lockup_detected; 6675 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 6676 *lockup_detected = value; 6677 cpu = cpumask_next(cpu, cpu_online_mask); 6678 } 6679 wmb(); /* be sure the per-cpu variables are out to memory */ 6680 } 6681 6682 static void controller_lockup_detected(struct ctlr_info *h) 6683 { 6684 unsigned long flags; 6685 u32 lockup_detected; 6686 6687 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6688 spin_lock_irqsave(&h->lock, flags); 6689 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6690 if (!lockup_detected) { 6691 /* no heartbeat, but controller gave us a zero. */ 6692 dev_warn(&h->pdev->dev, 6693 "lockup detected but scratchpad register is zero\n"); 6694 lockup_detected = 0xffffffff; 6695 } 6696 set_lockup_detected_for_all_cpus(h, lockup_detected); 6697 spin_unlock_irqrestore(&h->lock, flags); 6698 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 6699 lockup_detected); 6700 pci_disable_device(h->pdev); 6701 spin_lock_irqsave(&h->lock, flags); 6702 fail_all_cmds_on_list(h, &h->cmpQ); 6703 fail_all_cmds_on_list(h, &h->reqQ); 6704 spin_unlock_irqrestore(&h->lock, flags); 6705 } 6706 6707 static void detect_controller_lockup(struct ctlr_info *h) 6708 { 6709 u64 now; 6710 u32 heartbeat; 6711 unsigned long flags; 6712 6713 now = get_jiffies_64(); 6714 /* If we've received an interrupt recently, we're ok. */ 6715 if (time_after64(h->last_intr_timestamp + 6716 (h->heartbeat_sample_interval), now)) 6717 return; 6718 6719 /* 6720 * If we've already checked the heartbeat recently, we're ok. 6721 * This could happen if someone sends us a signal. We 6722 * otherwise don't care about signals in this thread. 6723 */ 6724 if (time_after64(h->last_heartbeat_timestamp + 6725 (h->heartbeat_sample_interval), now)) 6726 return; 6727 6728 /* If heartbeat has not changed since we last looked, we're not ok. */ 6729 spin_lock_irqsave(&h->lock, flags); 6730 heartbeat = readl(&h->cfgtable->HeartBeat); 6731 spin_unlock_irqrestore(&h->lock, flags); 6732 if (h->last_heartbeat == heartbeat) { 6733 controller_lockup_detected(h); 6734 return; 6735 } 6736 6737 /* We're ok. */ 6738 h->last_heartbeat = heartbeat; 6739 h->last_heartbeat_timestamp = now; 6740 } 6741 6742 static void hpsa_ack_ctlr_events(struct ctlr_info *h) 6743 { 6744 int i; 6745 char *event_type; 6746 6747 /* Clear the driver-requested rescan flag */ 6748 h->drv_req_rescan = 0; 6749 6750 /* Ask the controller to clear the events we're handling. */ 6751 if ((h->transMethod & (CFGTBL_Trans_io_accel1 6752 | CFGTBL_Trans_io_accel2)) && 6753 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || 6754 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { 6755 6756 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) 6757 event_type = "state change"; 6758 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) 6759 event_type = "configuration change"; 6760 /* Stop sending new RAID offload reqs via the IO accelerator */ 6761 scsi_block_requests(h->scsi_host); 6762 for (i = 0; i < h->ndevices; i++) 6763 h->dev[i]->offload_enabled = 0; 6764 hpsa_drain_accel_commands(h); 6765 /* Set 'accelerator path config change' bit */ 6766 dev_warn(&h->pdev->dev, 6767 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", 6768 h->events, event_type); 6769 writel(h->events, &(h->cfgtable->clear_event_notify)); 6770 /* Set the "clear event notify field update" bit 6 */ 6771 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6772 /* Wait until ctlr clears 'clear event notify field', bit 6 */ 6773 hpsa_wait_for_clear_event_notify_ack(h); 6774 scsi_unblock_requests(h->scsi_host); 6775 } else { 6776 /* Acknowledge controller notification events. */ 6777 writel(h->events, &(h->cfgtable->clear_event_notify)); 6778 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6779 hpsa_wait_for_clear_event_notify_ack(h); 6780 #if 0 6781 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6782 hpsa_wait_for_mode_change_ack(h); 6783 #endif 6784 } 6785 return; 6786 } 6787 6788 /* Check a register on the controller to see if there are configuration 6789 * changes (added/changed/removed logical drives, etc.) which mean that 6790 * we should rescan the controller for devices. 6791 * Also check flag for driver-initiated rescan. 6792 */ 6793 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) 6794 { 6795 if (h->drv_req_rescan) 6796 return 1; 6797 6798 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 6799 return 0; 6800 6801 h->events = readl(&(h->cfgtable->event_notify)); 6802 return h->events & RESCAN_REQUIRED_EVENT_BITS; 6803 } 6804 6805 /* 6806 * Check if any of the offline devices have become ready 6807 */ 6808 static int hpsa_offline_devices_ready(struct ctlr_info *h) 6809 { 6810 unsigned long flags; 6811 struct offline_device_entry *d; 6812 struct list_head *this, *tmp; 6813 6814 spin_lock_irqsave(&h->offline_device_lock, flags); 6815 list_for_each_safe(this, tmp, &h->offline_device_list) { 6816 d = list_entry(this, struct offline_device_entry, 6817 offline_list); 6818 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6819 if (!hpsa_volume_offline(h, d->scsi3addr)) { 6820 spin_lock_irqsave(&h->offline_device_lock, flags); 6821 list_del(&d->offline_list); 6822 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6823 return 1; 6824 } 6825 spin_lock_irqsave(&h->offline_device_lock, flags); 6826 } 6827 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6828 return 0; 6829 } 6830 6831 6832 static void hpsa_monitor_ctlr_worker(struct work_struct *work) 6833 { 6834 unsigned long flags; 6835 struct ctlr_info *h = container_of(to_delayed_work(work), 6836 struct ctlr_info, monitor_ctlr_work); 6837 detect_controller_lockup(h); 6838 if (lockup_detected(h)) 6839 return; 6840 6841 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 6842 scsi_host_get(h->scsi_host); 6843 h->drv_req_rescan = 0; 6844 hpsa_ack_ctlr_events(h); 6845 hpsa_scan_start(h->scsi_host); 6846 scsi_host_put(h->scsi_host); 6847 } 6848 6849 spin_lock_irqsave(&h->lock, flags); 6850 if (h->remove_in_progress) { 6851 spin_unlock_irqrestore(&h->lock, flags); 6852 return; 6853 } 6854 schedule_delayed_work(&h->monitor_ctlr_work, 6855 h->heartbeat_sample_interval); 6856 spin_unlock_irqrestore(&h->lock, flags); 6857 } 6858 6859 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6860 { 6861 int dac, rc; 6862 struct ctlr_info *h; 6863 int try_soft_reset = 0; 6864 unsigned long flags; 6865 6866 if (number_of_controllers == 0) 6867 printk(KERN_INFO DRIVER_NAME "\n"); 6868 6869 rc = hpsa_init_reset_devices(pdev); 6870 if (rc) { 6871 if (rc != -ENOTSUPP) 6872 return rc; 6873 /* If the reset fails in a particular way (it has no way to do 6874 * a proper hard reset, so returns -ENOTSUPP) we can try to do 6875 * a soft reset once we get the controller configured up to the 6876 * point that it can accept a command. 6877 */ 6878 try_soft_reset = 1; 6879 rc = 0; 6880 } 6881 6882 reinit_after_soft_reset: 6883 6884 /* Command structures must be aligned on a 32-byte boundary because 6885 * the 5 lower bits of the address are used by the hardware. and by 6886 * the driver. See comments in hpsa.h for more info. 6887 */ 6888 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 6889 h = kzalloc(sizeof(*h), GFP_KERNEL); 6890 if (!h) 6891 return -ENOMEM; 6892 6893 h->pdev = pdev; 6894 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 6895 INIT_LIST_HEAD(&h->cmpQ); 6896 INIT_LIST_HEAD(&h->reqQ); 6897 INIT_LIST_HEAD(&h->offline_device_list); 6898 spin_lock_init(&h->lock); 6899 spin_lock_init(&h->offline_device_lock); 6900 spin_lock_init(&h->scan_lock); 6901 spin_lock_init(&h->passthru_count_lock); 6902 6903 /* Allocate and clear per-cpu variable lockup_detected */ 6904 h->lockup_detected = alloc_percpu(u32); 6905 if (!h->lockup_detected) { 6906 rc = -ENOMEM; 6907 goto clean1; 6908 } 6909 set_lockup_detected_for_all_cpus(h, 0); 6910 6911 rc = hpsa_pci_init(h); 6912 if (rc != 0) 6913 goto clean1; 6914 6915 sprintf(h->devname, HPSA "%d", number_of_controllers); 6916 h->ctlr = number_of_controllers; 6917 number_of_controllers++; 6918 6919 /* configure PCI DMA stuff */ 6920 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 6921 if (rc == 0) { 6922 dac = 1; 6923 } else { 6924 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6925 if (rc == 0) { 6926 dac = 0; 6927 } else { 6928 dev_err(&pdev->dev, "no suitable DMA available\n"); 6929 goto clean1; 6930 } 6931 } 6932 6933 /* make sure the board interrupts are off */ 6934 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6935 6936 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 6937 goto clean2; 6938 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 6939 h->devname, pdev->device, 6940 h->intr[h->intr_mode], dac ? "" : " not"); 6941 if (hpsa_allocate_cmd_pool(h)) 6942 goto clean4; 6943 if (hpsa_allocate_sg_chain_blocks(h)) 6944 goto clean4; 6945 init_waitqueue_head(&h->scan_wait_queue); 6946 h->scan_finished = 1; /* no scan currently in progress */ 6947 6948 pci_set_drvdata(pdev, h); 6949 h->ndevices = 0; 6950 h->hba_mode_enabled = 0; 6951 h->scsi_host = NULL; 6952 spin_lock_init(&h->devlock); 6953 hpsa_put_ctlr_into_performant_mode(h); 6954 6955 /* At this point, the controller is ready to take commands. 6956 * Now, if reset_devices and the hard reset didn't work, try 6957 * the soft reset and see if that works. 6958 */ 6959 if (try_soft_reset) { 6960 6961 /* This is kind of gross. We may or may not get a completion 6962 * from the soft reset command, and if we do, then the value 6963 * from the fifo may or may not be valid. So, we wait 10 secs 6964 * after the reset throwing away any completions we get during 6965 * that time. Unregister the interrupt handler and register 6966 * fake ones to scoop up any residual completions. 6967 */ 6968 spin_lock_irqsave(&h->lock, flags); 6969 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6970 spin_unlock_irqrestore(&h->lock, flags); 6971 hpsa_free_irqs(h); 6972 rc = hpsa_request_irq(h, hpsa_msix_discard_completions, 6973 hpsa_intx_discard_completions); 6974 if (rc) { 6975 dev_warn(&h->pdev->dev, "Failed to request_irq after " 6976 "soft reset.\n"); 6977 goto clean4; 6978 } 6979 6980 rc = hpsa_kdump_soft_reset(h); 6981 if (rc) 6982 /* Neither hard nor soft reset worked, we're hosed. */ 6983 goto clean4; 6984 6985 dev_info(&h->pdev->dev, "Board READY.\n"); 6986 dev_info(&h->pdev->dev, 6987 "Waiting for stale completions to drain.\n"); 6988 h->access.set_intr_mask(h, HPSA_INTR_ON); 6989 msleep(10000); 6990 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6991 6992 rc = controller_reset_failed(h->cfgtable); 6993 if (rc) 6994 dev_info(&h->pdev->dev, 6995 "Soft reset appears to have failed.\n"); 6996 6997 /* since the controller's reset, we have to go back and re-init 6998 * everything. Easiest to just forget what we've done and do it 6999 * all over again. 7000 */ 7001 hpsa_undo_allocations_after_kdump_soft_reset(h); 7002 try_soft_reset = 0; 7003 if (rc) 7004 /* don't go to clean4, we already unallocated */ 7005 return -ENODEV; 7006 7007 goto reinit_after_soft_reset; 7008 } 7009 7010 /* Enable Accelerated IO path at driver layer */ 7011 h->acciopath_status = 1; 7012 7013 h->drv_req_rescan = 0; 7014 7015 /* Turn the interrupts on so we can service requests */ 7016 h->access.set_intr_mask(h, HPSA_INTR_ON); 7017 7018 hpsa_hba_inquiry(h); 7019 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 7020 7021 /* Monitor the controller for firmware lockups */ 7022 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 7023 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); 7024 schedule_delayed_work(&h->monitor_ctlr_work, 7025 h->heartbeat_sample_interval); 7026 return 0; 7027 7028 clean4: 7029 hpsa_free_sg_chain_blocks(h); 7030 hpsa_free_cmd_pool(h); 7031 hpsa_free_irqs(h); 7032 clean2: 7033 clean1: 7034 if (h->lockup_detected) 7035 free_percpu(h->lockup_detected); 7036 kfree(h); 7037 return rc; 7038 } 7039 7040 static void hpsa_flush_cache(struct ctlr_info *h) 7041 { 7042 char *flush_buf; 7043 struct CommandList *c; 7044 7045 /* Don't bother trying to flush the cache if locked up */ 7046 if (unlikely(lockup_detected(h))) 7047 return; 7048 flush_buf = kzalloc(4, GFP_KERNEL); 7049 if (!flush_buf) 7050 return; 7051 7052 c = cmd_special_alloc(h); 7053 if (!c) { 7054 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 7055 goto out_of_memory; 7056 } 7057 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 7058 RAID_CTLR_LUNID, TYPE_CMD)) { 7059 goto out; 7060 } 7061 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 7062 if (c->err_info->CommandStatus != 0) 7063 out: 7064 dev_warn(&h->pdev->dev, 7065 "error flushing cache on controller\n"); 7066 cmd_special_free(h, c); 7067 out_of_memory: 7068 kfree(flush_buf); 7069 } 7070 7071 static void hpsa_shutdown(struct pci_dev *pdev) 7072 { 7073 struct ctlr_info *h; 7074 7075 h = pci_get_drvdata(pdev); 7076 /* Turn board interrupts off and send the flush cache command 7077 * sendcmd will turn off interrupt, and send the flush... 7078 * To write all data in the battery backed cache to disks 7079 */ 7080 hpsa_flush_cache(h); 7081 h->access.set_intr_mask(h, HPSA_INTR_OFF); 7082 hpsa_free_irqs_and_disable_msix(h); 7083 } 7084 7085 static void hpsa_free_device_info(struct ctlr_info *h) 7086 { 7087 int i; 7088 7089 for (i = 0; i < h->ndevices; i++) 7090 kfree(h->dev[i]); 7091 } 7092 7093 static void hpsa_remove_one(struct pci_dev *pdev) 7094 { 7095 struct ctlr_info *h; 7096 unsigned long flags; 7097 7098 if (pci_get_drvdata(pdev) == NULL) { 7099 dev_err(&pdev->dev, "unable to remove device\n"); 7100 return; 7101 } 7102 h = pci_get_drvdata(pdev); 7103 7104 /* Get rid of any controller monitoring work items */ 7105 spin_lock_irqsave(&h->lock, flags); 7106 h->remove_in_progress = 1; 7107 cancel_delayed_work(&h->monitor_ctlr_work); 7108 spin_unlock_irqrestore(&h->lock, flags); 7109 7110 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 7111 hpsa_shutdown(pdev); 7112 iounmap(h->vaddr); 7113 iounmap(h->transtable); 7114 iounmap(h->cfgtable); 7115 hpsa_free_device_info(h); 7116 hpsa_free_sg_chain_blocks(h); 7117 pci_free_consistent(h->pdev, 7118 h->nr_cmds * sizeof(struct CommandList), 7119 h->cmd_pool, h->cmd_pool_dhandle); 7120 pci_free_consistent(h->pdev, 7121 h->nr_cmds * sizeof(struct ErrorInfo), 7122 h->errinfo_pool, h->errinfo_pool_dhandle); 7123 hpsa_free_reply_queues(h); 7124 kfree(h->cmd_pool_bits); 7125 kfree(h->blockFetchTable); 7126 kfree(h->ioaccel1_blockFetchTable); 7127 kfree(h->ioaccel2_blockFetchTable); 7128 kfree(h->hba_inquiry_data); 7129 pci_disable_device(pdev); 7130 pci_release_regions(pdev); 7131 free_percpu(h->lockup_detected); 7132 kfree(h); 7133 } 7134 7135 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 7136 __attribute__((unused)) pm_message_t state) 7137 { 7138 return -ENOSYS; 7139 } 7140 7141 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 7142 { 7143 return -ENOSYS; 7144 } 7145 7146 static struct pci_driver hpsa_pci_driver = { 7147 .name = HPSA, 7148 .probe = hpsa_init_one, 7149 .remove = hpsa_remove_one, 7150 .id_table = hpsa_pci_device_id, /* id_table */ 7151 .shutdown = hpsa_shutdown, 7152 .suspend = hpsa_suspend, 7153 .resume = hpsa_resume, 7154 }; 7155 7156 /* Fill in bucket_map[], given nsgs (the max number of 7157 * scatter gather elements supported) and bucket[], 7158 * which is an array of 8 integers. The bucket[] array 7159 * contains 8 different DMA transfer sizes (in 16 7160 * byte increments) which the controller uses to fetch 7161 * commands. This function fills in bucket_map[], which 7162 * maps a given number of scatter gather elements to one of 7163 * the 8 DMA transfer sizes. The point of it is to allow the 7164 * controller to only do as much DMA as needed to fetch the 7165 * command, with the DMA transfer size encoded in the lower 7166 * bits of the command address. 7167 */ 7168 static void calc_bucket_map(int bucket[], int num_buckets, 7169 int nsgs, int min_blocks, u32 *bucket_map) 7170 { 7171 int i, j, b, size; 7172 7173 /* Note, bucket_map must have nsgs+1 entries. */ 7174 for (i = 0; i <= nsgs; i++) { 7175 /* Compute size of a command with i SG entries */ 7176 size = i + min_blocks; 7177 b = num_buckets; /* Assume the biggest bucket */ 7178 /* Find the bucket that is just big enough */ 7179 for (j = 0; j < num_buckets; j++) { 7180 if (bucket[j] >= size) { 7181 b = j; 7182 break; 7183 } 7184 } 7185 /* for a command with i SG entries, use bucket b. */ 7186 bucket_map[i] = b; 7187 } 7188 } 7189 7190 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) 7191 { 7192 int i; 7193 unsigned long register_value; 7194 unsigned long transMethod = CFGTBL_Trans_Performant | 7195 (trans_support & CFGTBL_Trans_use_short_tags) | 7196 CFGTBL_Trans_enable_directed_msix | 7197 (trans_support & (CFGTBL_Trans_io_accel1 | 7198 CFGTBL_Trans_io_accel2)); 7199 struct access_method access = SA5_performant_access; 7200 7201 /* This is a bit complicated. There are 8 registers on 7202 * the controller which we write to to tell it 8 different 7203 * sizes of commands which there may be. It's a way of 7204 * reducing the DMA done to fetch each command. Encoded into 7205 * each command's tag are 3 bits which communicate to the controller 7206 * which of the eight sizes that command fits within. The size of 7207 * each command depends on how many scatter gather entries there are. 7208 * Each SG entry requires 16 bytes. The eight registers are programmed 7209 * with the number of 16-byte blocks a command of that size requires. 7210 * The smallest command possible requires 5 such 16 byte blocks. 7211 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte 7212 * blocks. Note, this only extends to the SG entries contained 7213 * within the command block, and does not extend to chained blocks 7214 * of SG elements. bft[] contains the eight values we write to 7215 * the registers. They are not evenly distributed, but have more 7216 * sizes for small commands, and fewer sizes for larger commands. 7217 */ 7218 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; 7219 #define MIN_IOACCEL2_BFT_ENTRY 5 7220 #define HPSA_IOACCEL2_HEADER_SZ 4 7221 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, 7222 13, 14, 15, 16, 17, 18, 19, 7223 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; 7224 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); 7225 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); 7226 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > 7227 16 * MIN_IOACCEL2_BFT_ENTRY); 7228 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); 7229 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); 7230 /* 5 = 1 s/g entry or 4k 7231 * 6 = 2 s/g entry or 8k 7232 * 8 = 4 s/g entry or 16k 7233 * 10 = 6 s/g entry or 24k 7234 */ 7235 7236 /* If the controller supports either ioaccel method then 7237 * we can also use the RAID stack submit path that does not 7238 * perform the superfluous readl() after each command submission. 7239 */ 7240 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) 7241 access = SA5_performant_access_no_read; 7242 7243 /* Controller spec: zero out this buffer. */ 7244 for (i = 0; i < h->nreply_queues; i++) 7245 memset(h->reply_queue[i].head, 0, h->reply_queue_size); 7246 7247 bft[7] = SG_ENTRIES_IN_CMD + 4; 7248 calc_bucket_map(bft, ARRAY_SIZE(bft), 7249 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); 7250 for (i = 0; i < 8; i++) 7251 writel(bft[i], &h->transtable->BlockFetch[i]); 7252 7253 /* size of controller ring buffer */ 7254 writel(h->max_commands, &h->transtable->RepQSize); 7255 writel(h->nreply_queues, &h->transtable->RepQCount); 7256 writel(0, &h->transtable->RepQCtrAddrLow32); 7257 writel(0, &h->transtable->RepQCtrAddrHigh32); 7258 7259 for (i = 0; i < h->nreply_queues; i++) { 7260 writel(0, &h->transtable->RepQAddr[i].upper); 7261 writel(h->reply_queue[i].busaddr, 7262 &h->transtable->RepQAddr[i].lower); 7263 } 7264 7265 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 7266 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); 7267 /* 7268 * enable outbound interrupt coalescing in accelerator mode; 7269 */ 7270 if (trans_support & CFGTBL_Trans_io_accel1) { 7271 access = SA5_ioaccel_mode1_access; 7272 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 7273 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 7274 } else { 7275 if (trans_support & CFGTBL_Trans_io_accel2) { 7276 access = SA5_ioaccel_mode2_access; 7277 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 7278 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 7279 } 7280 } 7281 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7282 hpsa_wait_for_mode_change_ack(h); 7283 register_value = readl(&(h->cfgtable->TransportActive)); 7284 if (!(register_value & CFGTBL_Trans_Performant)) { 7285 dev_warn(&h->pdev->dev, "unable to get board into" 7286 " performant mode\n"); 7287 return; 7288 } 7289 /* Change the access methods to the performant access methods */ 7290 h->access = access; 7291 h->transMethod = transMethod; 7292 7293 if (!((trans_support & CFGTBL_Trans_io_accel1) || 7294 (trans_support & CFGTBL_Trans_io_accel2))) 7295 return; 7296 7297 if (trans_support & CFGTBL_Trans_io_accel1) { 7298 /* Set up I/O accelerator mode */ 7299 for (i = 0; i < h->nreply_queues; i++) { 7300 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); 7301 h->reply_queue[i].current_entry = 7302 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); 7303 } 7304 bft[7] = h->ioaccel_maxsg + 8; 7305 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, 7306 h->ioaccel1_blockFetchTable); 7307 7308 /* initialize all reply queue entries to unused */ 7309 for (i = 0; i < h->nreply_queues; i++) 7310 memset(h->reply_queue[i].head, 7311 (u8) IOACCEL_MODE1_REPLY_UNUSED, 7312 h->reply_queue_size); 7313 7314 /* set all the constant fields in the accelerator command 7315 * frames once at init time to save CPU cycles later. 7316 */ 7317 for (i = 0; i < h->nr_cmds; i++) { 7318 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; 7319 7320 cp->function = IOACCEL1_FUNCTION_SCSIIO; 7321 cp->err_info = (u32) (h->errinfo_pool_dhandle + 7322 (i * sizeof(struct ErrorInfo))); 7323 cp->err_info_len = sizeof(struct ErrorInfo); 7324 cp->sgl_offset = IOACCEL1_SGLOFFSET; 7325 cp->host_context_flags = 7326 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); 7327 cp->timeout_sec = 0; 7328 cp->ReplyQueue = 0; 7329 cp->tag = 7330 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT) | 7331 DIRECT_LOOKUP_BIT); 7332 cp->host_addr = 7333 cpu_to_le64(h->ioaccel_cmd_pool_dhandle + 7334 (i * sizeof(struct io_accel1_cmd))); 7335 } 7336 } else if (trans_support & CFGTBL_Trans_io_accel2) { 7337 u64 cfg_offset, cfg_base_addr_index; 7338 u32 bft2_offset, cfg_base_addr; 7339 int rc; 7340 7341 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 7342 &cfg_base_addr_index, &cfg_offset); 7343 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); 7344 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; 7345 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, 7346 4, h->ioaccel2_blockFetchTable); 7347 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); 7348 BUILD_BUG_ON(offsetof(struct CfgTable, 7349 io_accel_request_size_offset) != 0xb8); 7350 h->ioaccel2_bft2_regs = 7351 remap_pci_mem(pci_resource_start(h->pdev, 7352 cfg_base_addr_index) + 7353 cfg_offset + bft2_offset, 7354 ARRAY_SIZE(bft2) * 7355 sizeof(*h->ioaccel2_bft2_regs)); 7356 for (i = 0; i < ARRAY_SIZE(bft2); i++) 7357 writel(bft2[i], &h->ioaccel2_bft2_regs[i]); 7358 } 7359 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7360 hpsa_wait_for_mode_change_ack(h); 7361 } 7362 7363 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) 7364 { 7365 h->ioaccel_maxsg = 7366 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 7367 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) 7368 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; 7369 7370 /* Command structures must be aligned on a 128-byte boundary 7371 * because the 7 lower bits of the address are used by the 7372 * hardware. 7373 */ 7374 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 7375 IOACCEL1_COMMANDLIST_ALIGNMENT); 7376 h->ioaccel_cmd_pool = 7377 pci_alloc_consistent(h->pdev, 7378 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 7379 &(h->ioaccel_cmd_pool_dhandle)); 7380 7381 h->ioaccel1_blockFetchTable = 7382 kmalloc(((h->ioaccel_maxsg + 1) * 7383 sizeof(u32)), GFP_KERNEL); 7384 7385 if ((h->ioaccel_cmd_pool == NULL) || 7386 (h->ioaccel1_blockFetchTable == NULL)) 7387 goto clean_up; 7388 7389 memset(h->ioaccel_cmd_pool, 0, 7390 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); 7391 return 0; 7392 7393 clean_up: 7394 if (h->ioaccel_cmd_pool) 7395 pci_free_consistent(h->pdev, 7396 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 7397 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 7398 kfree(h->ioaccel1_blockFetchTable); 7399 return 1; 7400 } 7401 7402 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) 7403 { 7404 /* Allocate ioaccel2 mode command blocks and block fetch table */ 7405 7406 h->ioaccel_maxsg = 7407 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 7408 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 7409 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 7410 7411 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 7412 IOACCEL2_COMMANDLIST_ALIGNMENT); 7413 h->ioaccel2_cmd_pool = 7414 pci_alloc_consistent(h->pdev, 7415 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 7416 &(h->ioaccel2_cmd_pool_dhandle)); 7417 7418 h->ioaccel2_blockFetchTable = 7419 kmalloc(((h->ioaccel_maxsg + 1) * 7420 sizeof(u32)), GFP_KERNEL); 7421 7422 if ((h->ioaccel2_cmd_pool == NULL) || 7423 (h->ioaccel2_blockFetchTable == NULL)) 7424 goto clean_up; 7425 7426 memset(h->ioaccel2_cmd_pool, 0, 7427 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); 7428 return 0; 7429 7430 clean_up: 7431 if (h->ioaccel2_cmd_pool) 7432 pci_free_consistent(h->pdev, 7433 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 7434 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 7435 kfree(h->ioaccel2_blockFetchTable); 7436 return 1; 7437 } 7438 7439 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 7440 { 7441 u32 trans_support; 7442 unsigned long transMethod = CFGTBL_Trans_Performant | 7443 CFGTBL_Trans_use_short_tags; 7444 int i; 7445 7446 if (hpsa_simple_mode) 7447 return; 7448 7449 trans_support = readl(&(h->cfgtable->TransportSupport)); 7450 if (!(trans_support & PERFORMANT_MODE)) 7451 return; 7452 7453 /* Check for I/O accelerator mode support */ 7454 if (trans_support & CFGTBL_Trans_io_accel1) { 7455 transMethod |= CFGTBL_Trans_io_accel1 | 7456 CFGTBL_Trans_enable_directed_msix; 7457 if (hpsa_alloc_ioaccel_cmd_and_bft(h)) 7458 goto clean_up; 7459 } else { 7460 if (trans_support & CFGTBL_Trans_io_accel2) { 7461 transMethod |= CFGTBL_Trans_io_accel2 | 7462 CFGTBL_Trans_enable_directed_msix; 7463 if (ioaccel2_alloc_cmds_and_bft(h)) 7464 goto clean_up; 7465 } 7466 } 7467 7468 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 7469 hpsa_get_max_perf_mode_cmds(h); 7470 /* Performant mode ring buffer and supporting data structures */ 7471 h->reply_queue_size = h->max_commands * sizeof(u64); 7472 7473 for (i = 0; i < h->nreply_queues; i++) { 7474 h->reply_queue[i].head = pci_alloc_consistent(h->pdev, 7475 h->reply_queue_size, 7476 &(h->reply_queue[i].busaddr)); 7477 if (!h->reply_queue[i].head) 7478 goto clean_up; 7479 h->reply_queue[i].size = h->max_commands; 7480 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 7481 h->reply_queue[i].current_entry = 0; 7482 } 7483 7484 /* Need a block fetch table for performant mode */ 7485 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 7486 sizeof(u32)), GFP_KERNEL); 7487 if (!h->blockFetchTable) 7488 goto clean_up; 7489 7490 hpsa_enter_performant_mode(h, trans_support); 7491 return; 7492 7493 clean_up: 7494 hpsa_free_reply_queues(h); 7495 kfree(h->blockFetchTable); 7496 } 7497 7498 static int is_accelerated_cmd(struct CommandList *c) 7499 { 7500 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; 7501 } 7502 7503 static void hpsa_drain_accel_commands(struct ctlr_info *h) 7504 { 7505 struct CommandList *c = NULL; 7506 unsigned long flags; 7507 int accel_cmds_out; 7508 7509 do { /* wait for all outstanding commands to drain out */ 7510 accel_cmds_out = 0; 7511 spin_lock_irqsave(&h->lock, flags); 7512 list_for_each_entry(c, &h->cmpQ, list) 7513 accel_cmds_out += is_accelerated_cmd(c); 7514 list_for_each_entry(c, &h->reqQ, list) 7515 accel_cmds_out += is_accelerated_cmd(c); 7516 spin_unlock_irqrestore(&h->lock, flags); 7517 if (accel_cmds_out <= 0) 7518 break; 7519 msleep(100); 7520 } while (1); 7521 } 7522 7523 /* 7524 * This is it. Register the PCI driver information for the cards we control 7525 * the OS will call our registered routines when it finds one of our cards. 7526 */ 7527 static int __init hpsa_init(void) 7528 { 7529 return pci_register_driver(&hpsa_pci_driver); 7530 } 7531 7532 static void __exit hpsa_cleanup(void) 7533 { 7534 pci_unregister_driver(&hpsa_pci_driver); 7535 } 7536 7537 static void __attribute__((unused)) verify_offsets(void) 7538 { 7539 #define VERIFY_OFFSET(member, offset) \ 7540 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) 7541 7542 VERIFY_OFFSET(structure_size, 0); 7543 VERIFY_OFFSET(volume_blk_size, 4); 7544 VERIFY_OFFSET(volume_blk_cnt, 8); 7545 VERIFY_OFFSET(phys_blk_shift, 16); 7546 VERIFY_OFFSET(parity_rotation_shift, 17); 7547 VERIFY_OFFSET(strip_size, 18); 7548 VERIFY_OFFSET(disk_starting_blk, 20); 7549 VERIFY_OFFSET(disk_blk_cnt, 28); 7550 VERIFY_OFFSET(data_disks_per_row, 36); 7551 VERIFY_OFFSET(metadata_disks_per_row, 38); 7552 VERIFY_OFFSET(row_cnt, 40); 7553 VERIFY_OFFSET(layout_map_count, 42); 7554 VERIFY_OFFSET(flags, 44); 7555 VERIFY_OFFSET(dekindex, 46); 7556 /* VERIFY_OFFSET(reserved, 48 */ 7557 VERIFY_OFFSET(data, 64); 7558 7559 #undef VERIFY_OFFSET 7560 7561 #define VERIFY_OFFSET(member, offset) \ 7562 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) 7563 7564 VERIFY_OFFSET(IU_type, 0); 7565 VERIFY_OFFSET(direction, 1); 7566 VERIFY_OFFSET(reply_queue, 2); 7567 /* VERIFY_OFFSET(reserved1, 3); */ 7568 VERIFY_OFFSET(scsi_nexus, 4); 7569 VERIFY_OFFSET(Tag, 8); 7570 VERIFY_OFFSET(cdb, 16); 7571 VERIFY_OFFSET(cciss_lun, 32); 7572 VERIFY_OFFSET(data_len, 40); 7573 VERIFY_OFFSET(cmd_priority_task_attr, 44); 7574 VERIFY_OFFSET(sg_count, 45); 7575 /* VERIFY_OFFSET(reserved3 */ 7576 VERIFY_OFFSET(err_ptr, 48); 7577 VERIFY_OFFSET(err_len, 56); 7578 /* VERIFY_OFFSET(reserved4 */ 7579 VERIFY_OFFSET(sg, 64); 7580 7581 #undef VERIFY_OFFSET 7582 7583 #define VERIFY_OFFSET(member, offset) \ 7584 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) 7585 7586 VERIFY_OFFSET(dev_handle, 0x00); 7587 VERIFY_OFFSET(reserved1, 0x02); 7588 VERIFY_OFFSET(function, 0x03); 7589 VERIFY_OFFSET(reserved2, 0x04); 7590 VERIFY_OFFSET(err_info, 0x0C); 7591 VERIFY_OFFSET(reserved3, 0x10); 7592 VERIFY_OFFSET(err_info_len, 0x12); 7593 VERIFY_OFFSET(reserved4, 0x13); 7594 VERIFY_OFFSET(sgl_offset, 0x14); 7595 VERIFY_OFFSET(reserved5, 0x15); 7596 VERIFY_OFFSET(transfer_len, 0x1C); 7597 VERIFY_OFFSET(reserved6, 0x20); 7598 VERIFY_OFFSET(io_flags, 0x24); 7599 VERIFY_OFFSET(reserved7, 0x26); 7600 VERIFY_OFFSET(LUN, 0x34); 7601 VERIFY_OFFSET(control, 0x3C); 7602 VERIFY_OFFSET(CDB, 0x40); 7603 VERIFY_OFFSET(reserved8, 0x50); 7604 VERIFY_OFFSET(host_context_flags, 0x60); 7605 VERIFY_OFFSET(timeout_sec, 0x62); 7606 VERIFY_OFFSET(ReplyQueue, 0x64); 7607 VERIFY_OFFSET(reserved9, 0x65); 7608 VERIFY_OFFSET(tag, 0x68); 7609 VERIFY_OFFSET(host_addr, 0x70); 7610 VERIFY_OFFSET(CISS_LUN, 0x78); 7611 VERIFY_OFFSET(SG, 0x78 + 8); 7612 #undef VERIFY_OFFSET 7613 } 7614 7615 module_init(hpsa_init); 7616 module_exit(hpsa_cleanup); 7617