1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/pci-aspm.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/delay.h> 30 #include <linux/fs.h> 31 #include <linux/timer.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/compat.h> 35 #include <linux/blktrace_api.h> 36 #include <linux/uaccess.h> 37 #include <linux/io.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/completion.h> 40 #include <linux/moduleparam.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_tcq.h> 46 #include <linux/cciss_ioctl.h> 47 #include <linux/string.h> 48 #include <linux/bitmap.h> 49 #include <linux/atomic.h> 50 #include <linux/jiffies.h> 51 #include <asm/div64.h> 52 #include "hpsa_cmd.h" 53 #include "hpsa.h" 54 55 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 56 #define HPSA_DRIVER_VERSION "3.4.4-1" 57 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 58 #define HPSA "hpsa" 59 60 /* How long to wait (in milliseconds) for board to go into simple mode */ 61 #define MAX_CONFIG_WAIT 30000 62 #define MAX_IOCTL_CONFIG_WAIT 1000 63 64 /*define how many times we will try a command because of bus resets */ 65 #define MAX_CMD_RETRIES 3 66 67 /* Embedded module documentation macros - see modules.h */ 68 MODULE_AUTHOR("Hewlett-Packard Company"); 69 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 70 HPSA_DRIVER_VERSION); 71 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 72 MODULE_VERSION(HPSA_DRIVER_VERSION); 73 MODULE_LICENSE("GPL"); 74 75 static int hpsa_allow_any; 76 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 77 MODULE_PARM_DESC(hpsa_allow_any, 78 "Allow hpsa driver to access unknown HP Smart Array hardware"); 79 static int hpsa_simple_mode; 80 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 81 MODULE_PARM_DESC(hpsa_simple_mode, 82 "Use 'simple mode' rather than 'performant mode'"); 83 84 /* define the PCI info for the cards we can control */ 85 static const struct pci_device_id hpsa_pci_device_id[] = { 86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, 112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, 113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, 114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, 115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, 116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, 117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, 118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 121 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, 122 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, 123 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, 124 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, 125 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, 126 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 127 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 128 {0,} 129 }; 130 131 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 132 133 /* board_id = Subsystem Device ID & Vendor ID 134 * product = Marketing Name for the board 135 * access = Address of the struct of function pointers 136 */ 137 static struct board_type products[] = { 138 {0x3241103C, "Smart Array P212", &SA5_access}, 139 {0x3243103C, "Smart Array P410", &SA5_access}, 140 {0x3245103C, "Smart Array P410i", &SA5_access}, 141 {0x3247103C, "Smart Array P411", &SA5_access}, 142 {0x3249103C, "Smart Array P812", &SA5_access}, 143 {0x324A103C, "Smart Array P712m", &SA5_access}, 144 {0x324B103C, "Smart Array P711m", &SA5_access}, 145 {0x3350103C, "Smart Array P222", &SA5_access}, 146 {0x3351103C, "Smart Array P420", &SA5_access}, 147 {0x3352103C, "Smart Array P421", &SA5_access}, 148 {0x3353103C, "Smart Array P822", &SA5_access}, 149 {0x3354103C, "Smart Array P420i", &SA5_access}, 150 {0x3355103C, "Smart Array P220i", &SA5_access}, 151 {0x3356103C, "Smart Array P721m", &SA5_access}, 152 {0x1921103C, "Smart Array P830i", &SA5_access}, 153 {0x1922103C, "Smart Array P430", &SA5_access}, 154 {0x1923103C, "Smart Array P431", &SA5_access}, 155 {0x1924103C, "Smart Array P830", &SA5_access}, 156 {0x1926103C, "Smart Array P731m", &SA5_access}, 157 {0x1928103C, "Smart Array P230i", &SA5_access}, 158 {0x1929103C, "Smart Array P530", &SA5_access}, 159 {0x21BD103C, "Smart Array", &SA5_access}, 160 {0x21BE103C, "Smart Array", &SA5_access}, 161 {0x21BF103C, "Smart Array", &SA5_access}, 162 {0x21C0103C, "Smart Array", &SA5_access}, 163 {0x21C1103C, "Smart Array", &SA5_access}, 164 {0x21C2103C, "Smart Array", &SA5_access}, 165 {0x21C3103C, "Smart Array", &SA5_access}, 166 {0x21C4103C, "Smart Array", &SA5_access}, 167 {0x21C5103C, "Smart Array", &SA5_access}, 168 {0x21C7103C, "Smart Array", &SA5_access}, 169 {0x21C8103C, "Smart Array", &SA5_access}, 170 {0x21C9103C, "Smart Array", &SA5_access}, 171 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, 172 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, 173 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, 174 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, 175 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, 176 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 177 }; 178 179 static int number_of_controllers; 180 181 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 182 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 183 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 184 static void start_io(struct ctlr_info *h); 185 186 #ifdef CONFIG_COMPAT 187 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 188 #endif 189 190 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 191 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); 192 static struct CommandList *cmd_alloc(struct ctlr_info *h); 193 static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 194 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 195 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 196 int cmd_type); 197 #define VPD_PAGE (1 << 8) 198 199 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 200 static void hpsa_scan_start(struct Scsi_Host *); 201 static int hpsa_scan_finished(struct Scsi_Host *sh, 202 unsigned long elapsed_time); 203 static int hpsa_change_queue_depth(struct scsi_device *sdev, 204 int qdepth, int reason); 205 206 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 207 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); 208 static int hpsa_slave_alloc(struct scsi_device *sdev); 209 static void hpsa_slave_destroy(struct scsi_device *sdev); 210 211 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 212 static int check_for_unit_attention(struct ctlr_info *h, 213 struct CommandList *c); 214 static void check_ioctl_unit_attention(struct ctlr_info *h, 215 struct CommandList *c); 216 /* performant mode helper functions */ 217 static void calc_bucket_map(int *bucket, int num_buckets, 218 int nsgs, int min_blocks, int *bucket_map); 219 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 220 static inline u32 next_command(struct ctlr_info *h, u8 q); 221 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 222 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 223 u64 *cfg_offset); 224 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 225 unsigned long *memory_bar); 226 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 227 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 228 int wait_for_ready); 229 static inline void finish_cmd(struct CommandList *c); 230 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h); 231 #define BOARD_NOT_READY 0 232 #define BOARD_READY 1 233 static void hpsa_drain_accel_commands(struct ctlr_info *h); 234 static void hpsa_flush_cache(struct ctlr_info *h); 235 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 236 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 237 u8 *scsi3addr); 238 239 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 240 { 241 unsigned long *priv = shost_priv(sdev->host); 242 return (struct ctlr_info *) *priv; 243 } 244 245 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 246 { 247 unsigned long *priv = shost_priv(sh); 248 return (struct ctlr_info *) *priv; 249 } 250 251 static int check_for_unit_attention(struct ctlr_info *h, 252 struct CommandList *c) 253 { 254 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 255 return 0; 256 257 switch (c->err_info->SenseInfo[12]) { 258 case STATE_CHANGED: 259 dev_warn(&h->pdev->dev, HPSA "%d: a state change " 260 "detected, command retried\n", h->ctlr); 261 break; 262 case LUN_FAILED: 263 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure " 264 "detected, action required\n", h->ctlr); 265 break; 266 case REPORT_LUNS_CHANGED: 267 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data " 268 "changed, action required\n", h->ctlr); 269 /* 270 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 271 * target (array) devices. 272 */ 273 break; 274 case POWER_OR_RESET: 275 dev_warn(&h->pdev->dev, HPSA "%d: a power on " 276 "or device reset detected\n", h->ctlr); 277 break; 278 case UNIT_ATTENTION_CLEARED: 279 dev_warn(&h->pdev->dev, HPSA "%d: unit attention " 280 "cleared by another initiator\n", h->ctlr); 281 break; 282 default: 283 dev_warn(&h->pdev->dev, HPSA "%d: unknown " 284 "unit attention detected\n", h->ctlr); 285 break; 286 } 287 return 1; 288 } 289 290 static int check_for_busy(struct ctlr_info *h, struct CommandList *c) 291 { 292 if (c->err_info->CommandStatus != CMD_TARGET_STATUS || 293 (c->err_info->ScsiStatus != SAM_STAT_BUSY && 294 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) 295 return 0; 296 dev_warn(&h->pdev->dev, HPSA "device busy"); 297 return 1; 298 } 299 300 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, 301 struct device_attribute *attr, 302 const char *buf, size_t count) 303 { 304 int status, len; 305 struct ctlr_info *h; 306 struct Scsi_Host *shost = class_to_shost(dev); 307 char tmpbuf[10]; 308 309 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 310 return -EACCES; 311 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 312 strncpy(tmpbuf, buf, len); 313 tmpbuf[len] = '\0'; 314 if (sscanf(tmpbuf, "%d", &status) != 1) 315 return -EINVAL; 316 h = shost_to_hba(shost); 317 h->acciopath_status = !!status; 318 dev_warn(&h->pdev->dev, 319 "hpsa: HP SSD Smart Path %s via sysfs update.\n", 320 h->acciopath_status ? "enabled" : "disabled"); 321 return count; 322 } 323 324 static ssize_t host_store_raid_offload_debug(struct device *dev, 325 struct device_attribute *attr, 326 const char *buf, size_t count) 327 { 328 int debug_level, len; 329 struct ctlr_info *h; 330 struct Scsi_Host *shost = class_to_shost(dev); 331 char tmpbuf[10]; 332 333 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 334 return -EACCES; 335 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 336 strncpy(tmpbuf, buf, len); 337 tmpbuf[len] = '\0'; 338 if (sscanf(tmpbuf, "%d", &debug_level) != 1) 339 return -EINVAL; 340 if (debug_level < 0) 341 debug_level = 0; 342 h = shost_to_hba(shost); 343 h->raid_offload_debug = debug_level; 344 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", 345 h->raid_offload_debug); 346 return count; 347 } 348 349 static ssize_t host_store_rescan(struct device *dev, 350 struct device_attribute *attr, 351 const char *buf, size_t count) 352 { 353 struct ctlr_info *h; 354 struct Scsi_Host *shost = class_to_shost(dev); 355 h = shost_to_hba(shost); 356 hpsa_scan_start(h->scsi_host); 357 return count; 358 } 359 360 static ssize_t host_show_firmware_revision(struct device *dev, 361 struct device_attribute *attr, char *buf) 362 { 363 struct ctlr_info *h; 364 struct Scsi_Host *shost = class_to_shost(dev); 365 unsigned char *fwrev; 366 367 h = shost_to_hba(shost); 368 if (!h->hba_inquiry_data) 369 return 0; 370 fwrev = &h->hba_inquiry_data[32]; 371 return snprintf(buf, 20, "%c%c%c%c\n", 372 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 373 } 374 375 static ssize_t host_show_commands_outstanding(struct device *dev, 376 struct device_attribute *attr, char *buf) 377 { 378 struct Scsi_Host *shost = class_to_shost(dev); 379 struct ctlr_info *h = shost_to_hba(shost); 380 381 return snprintf(buf, 20, "%d\n", h->commands_outstanding); 382 } 383 384 static ssize_t host_show_transport_mode(struct device *dev, 385 struct device_attribute *attr, char *buf) 386 { 387 struct ctlr_info *h; 388 struct Scsi_Host *shost = class_to_shost(dev); 389 390 h = shost_to_hba(shost); 391 return snprintf(buf, 20, "%s\n", 392 h->transMethod & CFGTBL_Trans_Performant ? 393 "performant" : "simple"); 394 } 395 396 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, 397 struct device_attribute *attr, char *buf) 398 { 399 struct ctlr_info *h; 400 struct Scsi_Host *shost = class_to_shost(dev); 401 402 h = shost_to_hba(shost); 403 return snprintf(buf, 30, "HP SSD Smart Path %s\n", 404 (h->acciopath_status == 1) ? "enabled" : "disabled"); 405 } 406 407 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 408 static u32 unresettable_controller[] = { 409 0x324a103C, /* Smart Array P712m */ 410 0x324b103C, /* SmartArray P711m */ 411 0x3223103C, /* Smart Array P800 */ 412 0x3234103C, /* Smart Array P400 */ 413 0x3235103C, /* Smart Array P400i */ 414 0x3211103C, /* Smart Array E200i */ 415 0x3212103C, /* Smart Array E200 */ 416 0x3213103C, /* Smart Array E200i */ 417 0x3214103C, /* Smart Array E200i */ 418 0x3215103C, /* Smart Array E200i */ 419 0x3237103C, /* Smart Array E500 */ 420 0x323D103C, /* Smart Array P700m */ 421 0x40800E11, /* Smart Array 5i */ 422 0x409C0E11, /* Smart Array 6400 */ 423 0x409D0E11, /* Smart Array 6400 EM */ 424 0x40700E11, /* Smart Array 5300 */ 425 0x40820E11, /* Smart Array 532 */ 426 0x40830E11, /* Smart Array 5312 */ 427 0x409A0E11, /* Smart Array 641 */ 428 0x409B0E11, /* Smart Array 642 */ 429 0x40910E11, /* Smart Array 6i */ 430 }; 431 432 /* List of controllers which cannot even be soft reset */ 433 static u32 soft_unresettable_controller[] = { 434 0x40800E11, /* Smart Array 5i */ 435 0x40700E11, /* Smart Array 5300 */ 436 0x40820E11, /* Smart Array 532 */ 437 0x40830E11, /* Smart Array 5312 */ 438 0x409A0E11, /* Smart Array 641 */ 439 0x409B0E11, /* Smart Array 642 */ 440 0x40910E11, /* Smart Array 6i */ 441 /* Exclude 640x boards. These are two pci devices in one slot 442 * which share a battery backed cache module. One controls the 443 * cache, the other accesses the cache through the one that controls 444 * it. If we reset the one controlling the cache, the other will 445 * likely not be happy. Just forbid resetting this conjoined mess. 446 * The 640x isn't really supported by hpsa anyway. 447 */ 448 0x409C0E11, /* Smart Array 6400 */ 449 0x409D0E11, /* Smart Array 6400 EM */ 450 }; 451 452 static int ctlr_is_hard_resettable(u32 board_id) 453 { 454 int i; 455 456 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 457 if (unresettable_controller[i] == board_id) 458 return 0; 459 return 1; 460 } 461 462 static int ctlr_is_soft_resettable(u32 board_id) 463 { 464 int i; 465 466 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 467 if (soft_unresettable_controller[i] == board_id) 468 return 0; 469 return 1; 470 } 471 472 static int ctlr_is_resettable(u32 board_id) 473 { 474 return ctlr_is_hard_resettable(board_id) || 475 ctlr_is_soft_resettable(board_id); 476 } 477 478 static ssize_t host_show_resettable(struct device *dev, 479 struct device_attribute *attr, char *buf) 480 { 481 struct ctlr_info *h; 482 struct Scsi_Host *shost = class_to_shost(dev); 483 484 h = shost_to_hba(shost); 485 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 486 } 487 488 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 489 { 490 return (scsi3addr[3] & 0xC0) == 0x40; 491 } 492 493 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 494 "1(ADM)", "UNKNOWN" 495 }; 496 #define HPSA_RAID_0 0 497 #define HPSA_RAID_4 1 498 #define HPSA_RAID_1 2 /* also used for RAID 10 */ 499 #define HPSA_RAID_5 3 /* also used for RAID 50 */ 500 #define HPSA_RAID_51 4 501 #define HPSA_RAID_6 5 /* also used for RAID 60 */ 502 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 503 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 504 505 static ssize_t raid_level_show(struct device *dev, 506 struct device_attribute *attr, char *buf) 507 { 508 ssize_t l = 0; 509 unsigned char rlevel; 510 struct ctlr_info *h; 511 struct scsi_device *sdev; 512 struct hpsa_scsi_dev_t *hdev; 513 unsigned long flags; 514 515 sdev = to_scsi_device(dev); 516 h = sdev_to_hba(sdev); 517 spin_lock_irqsave(&h->lock, flags); 518 hdev = sdev->hostdata; 519 if (!hdev) { 520 spin_unlock_irqrestore(&h->lock, flags); 521 return -ENODEV; 522 } 523 524 /* Is this even a logical drive? */ 525 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 526 spin_unlock_irqrestore(&h->lock, flags); 527 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 528 return l; 529 } 530 531 rlevel = hdev->raid_level; 532 spin_unlock_irqrestore(&h->lock, flags); 533 if (rlevel > RAID_UNKNOWN) 534 rlevel = RAID_UNKNOWN; 535 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 536 return l; 537 } 538 539 static ssize_t lunid_show(struct device *dev, 540 struct device_attribute *attr, char *buf) 541 { 542 struct ctlr_info *h; 543 struct scsi_device *sdev; 544 struct hpsa_scsi_dev_t *hdev; 545 unsigned long flags; 546 unsigned char lunid[8]; 547 548 sdev = to_scsi_device(dev); 549 h = sdev_to_hba(sdev); 550 spin_lock_irqsave(&h->lock, flags); 551 hdev = sdev->hostdata; 552 if (!hdev) { 553 spin_unlock_irqrestore(&h->lock, flags); 554 return -ENODEV; 555 } 556 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 557 spin_unlock_irqrestore(&h->lock, flags); 558 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 559 lunid[0], lunid[1], lunid[2], lunid[3], 560 lunid[4], lunid[5], lunid[6], lunid[7]); 561 } 562 563 static ssize_t unique_id_show(struct device *dev, 564 struct device_attribute *attr, char *buf) 565 { 566 struct ctlr_info *h; 567 struct scsi_device *sdev; 568 struct hpsa_scsi_dev_t *hdev; 569 unsigned long flags; 570 unsigned char sn[16]; 571 572 sdev = to_scsi_device(dev); 573 h = sdev_to_hba(sdev); 574 spin_lock_irqsave(&h->lock, flags); 575 hdev = sdev->hostdata; 576 if (!hdev) { 577 spin_unlock_irqrestore(&h->lock, flags); 578 return -ENODEV; 579 } 580 memcpy(sn, hdev->device_id, sizeof(sn)); 581 spin_unlock_irqrestore(&h->lock, flags); 582 return snprintf(buf, 16 * 2 + 2, 583 "%02X%02X%02X%02X%02X%02X%02X%02X" 584 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 585 sn[0], sn[1], sn[2], sn[3], 586 sn[4], sn[5], sn[6], sn[7], 587 sn[8], sn[9], sn[10], sn[11], 588 sn[12], sn[13], sn[14], sn[15]); 589 } 590 591 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, 592 struct device_attribute *attr, char *buf) 593 { 594 struct ctlr_info *h; 595 struct scsi_device *sdev; 596 struct hpsa_scsi_dev_t *hdev; 597 unsigned long flags; 598 int offload_enabled; 599 600 sdev = to_scsi_device(dev); 601 h = sdev_to_hba(sdev); 602 spin_lock_irqsave(&h->lock, flags); 603 hdev = sdev->hostdata; 604 if (!hdev) { 605 spin_unlock_irqrestore(&h->lock, flags); 606 return -ENODEV; 607 } 608 offload_enabled = hdev->offload_enabled; 609 spin_unlock_irqrestore(&h->lock, flags); 610 return snprintf(buf, 20, "%d\n", offload_enabled); 611 } 612 613 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 614 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 615 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 616 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 617 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, 618 host_show_hp_ssd_smart_path_enabled, NULL); 619 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, 620 host_show_hp_ssd_smart_path_status, 621 host_store_hp_ssd_smart_path_status); 622 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, 623 host_store_raid_offload_debug); 624 static DEVICE_ATTR(firmware_revision, S_IRUGO, 625 host_show_firmware_revision, NULL); 626 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 627 host_show_commands_outstanding, NULL); 628 static DEVICE_ATTR(transport_mode, S_IRUGO, 629 host_show_transport_mode, NULL); 630 static DEVICE_ATTR(resettable, S_IRUGO, 631 host_show_resettable, NULL); 632 633 static struct device_attribute *hpsa_sdev_attrs[] = { 634 &dev_attr_raid_level, 635 &dev_attr_lunid, 636 &dev_attr_unique_id, 637 &dev_attr_hp_ssd_smart_path_enabled, 638 NULL, 639 }; 640 641 static struct device_attribute *hpsa_shost_attrs[] = { 642 &dev_attr_rescan, 643 &dev_attr_firmware_revision, 644 &dev_attr_commands_outstanding, 645 &dev_attr_transport_mode, 646 &dev_attr_resettable, 647 &dev_attr_hp_ssd_smart_path_status, 648 &dev_attr_raid_offload_debug, 649 NULL, 650 }; 651 652 static struct scsi_host_template hpsa_driver_template = { 653 .module = THIS_MODULE, 654 .name = HPSA, 655 .proc_name = HPSA, 656 .queuecommand = hpsa_scsi_queue_command, 657 .scan_start = hpsa_scan_start, 658 .scan_finished = hpsa_scan_finished, 659 .change_queue_depth = hpsa_change_queue_depth, 660 .this_id = -1, 661 .use_clustering = ENABLE_CLUSTERING, 662 .eh_abort_handler = hpsa_eh_abort_handler, 663 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 664 .ioctl = hpsa_ioctl, 665 .slave_alloc = hpsa_slave_alloc, 666 .slave_destroy = hpsa_slave_destroy, 667 #ifdef CONFIG_COMPAT 668 .compat_ioctl = hpsa_compat_ioctl, 669 #endif 670 .sdev_attrs = hpsa_sdev_attrs, 671 .shost_attrs = hpsa_shost_attrs, 672 .max_sectors = 8192, 673 .no_write_same = 1, 674 }; 675 676 677 /* Enqueuing and dequeuing functions for cmdlists. */ 678 static inline void addQ(struct list_head *list, struct CommandList *c) 679 { 680 list_add_tail(&c->list, list); 681 } 682 683 static inline u32 next_command(struct ctlr_info *h, u8 q) 684 { 685 u32 a; 686 struct reply_pool *rq = &h->reply_queue[q]; 687 unsigned long flags; 688 689 if (h->transMethod & CFGTBL_Trans_io_accel1) 690 return h->access.command_completed(h, q); 691 692 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 693 return h->access.command_completed(h, q); 694 695 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 696 a = rq->head[rq->current_entry]; 697 rq->current_entry++; 698 spin_lock_irqsave(&h->lock, flags); 699 h->commands_outstanding--; 700 spin_unlock_irqrestore(&h->lock, flags); 701 } else { 702 a = FIFO_EMPTY; 703 } 704 /* Check for wraparound */ 705 if (rq->current_entry == h->max_commands) { 706 rq->current_entry = 0; 707 rq->wraparound ^= 1; 708 } 709 return a; 710 } 711 712 /* 713 * There are some special bits in the bus address of the 714 * command that we have to set for the controller to know 715 * how to process the command: 716 * 717 * Normal performant mode: 718 * bit 0: 1 means performant mode, 0 means simple mode. 719 * bits 1-3 = block fetch table entry 720 * bits 4-6 = command type (== 0) 721 * 722 * ioaccel1 mode: 723 * bit 0 = "performant mode" bit. 724 * bits 1-3 = block fetch table entry 725 * bits 4-6 = command type (== 110) 726 * (command type is needed because ioaccel1 mode 727 * commands are submitted through the same register as normal 728 * mode commands, so this is how the controller knows whether 729 * the command is normal mode or ioaccel1 mode.) 730 * 731 * ioaccel2 mode: 732 * bit 0 = "performant mode" bit. 733 * bits 1-4 = block fetch table entry (note extra bit) 734 * bits 4-6 = not needed, because ioaccel2 mode has 735 * a separate special register for submitting commands. 736 */ 737 738 /* set_performant_mode: Modify the tag for cciss performant 739 * set bit 0 for pull model, bits 3-1 for block fetch 740 * register number 741 */ 742 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 743 { 744 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 745 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 746 if (likely(h->msix_vector > 0)) 747 c->Header.ReplyQueue = 748 raw_smp_processor_id() % h->nreply_queues; 749 } 750 } 751 752 static void set_ioaccel1_performant_mode(struct ctlr_info *h, 753 struct CommandList *c) 754 { 755 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 756 757 /* Tell the controller to post the reply to the queue for this 758 * processor. This seems to give the best I/O throughput. 759 */ 760 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; 761 /* Set the bits in the address sent down to include: 762 * - performant mode bit (bit 0) 763 * - pull count (bits 1-3) 764 * - command type (bits 4-6) 765 */ 766 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | 767 IOACCEL1_BUSADDR_CMDTYPE; 768 } 769 770 static void set_ioaccel2_performant_mode(struct ctlr_info *h, 771 struct CommandList *c) 772 { 773 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 774 775 /* Tell the controller to post the reply to the queue for this 776 * processor. This seems to give the best I/O throughput. 777 */ 778 cp->reply_queue = smp_processor_id() % h->nreply_queues; 779 /* Set the bits in the address sent down to include: 780 * - performant mode bit not used in ioaccel mode 2 781 * - pull count (bits 0-3) 782 * - command type isn't needed for ioaccel2 783 */ 784 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); 785 } 786 787 static int is_firmware_flash_cmd(u8 *cdb) 788 { 789 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; 790 } 791 792 /* 793 * During firmware flash, the heartbeat register may not update as frequently 794 * as it should. So we dial down lockup detection during firmware flash. and 795 * dial it back up when firmware flash completes. 796 */ 797 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 798 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) 799 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 800 struct CommandList *c) 801 { 802 if (!is_firmware_flash_cmd(c->Request.CDB)) 803 return; 804 atomic_inc(&h->firmware_flash_in_progress); 805 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; 806 } 807 808 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, 809 struct CommandList *c) 810 { 811 if (is_firmware_flash_cmd(c->Request.CDB) && 812 atomic_dec_and_test(&h->firmware_flash_in_progress)) 813 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 814 } 815 816 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 817 struct CommandList *c) 818 { 819 unsigned long flags; 820 821 switch (c->cmd_type) { 822 case CMD_IOACCEL1: 823 set_ioaccel1_performant_mode(h, c); 824 break; 825 case CMD_IOACCEL2: 826 set_ioaccel2_performant_mode(h, c); 827 break; 828 default: 829 set_performant_mode(h, c); 830 } 831 dial_down_lockup_detection_during_fw_flash(h, c); 832 spin_lock_irqsave(&h->lock, flags); 833 addQ(&h->reqQ, c); 834 h->Qdepth++; 835 spin_unlock_irqrestore(&h->lock, flags); 836 start_io(h); 837 } 838 839 static inline void removeQ(struct CommandList *c) 840 { 841 if (WARN_ON(list_empty(&c->list))) 842 return; 843 list_del_init(&c->list); 844 } 845 846 static inline int is_hba_lunid(unsigned char scsi3addr[]) 847 { 848 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 849 } 850 851 static inline int is_scsi_rev_5(struct ctlr_info *h) 852 { 853 if (!h->hba_inquiry_data) 854 return 0; 855 if ((h->hba_inquiry_data[2] & 0x07) == 5) 856 return 1; 857 return 0; 858 } 859 860 static int hpsa_find_target_lun(struct ctlr_info *h, 861 unsigned char scsi3addr[], int bus, int *target, int *lun) 862 { 863 /* finds an unused bus, target, lun for a new physical device 864 * assumes h->devlock is held 865 */ 866 int i, found = 0; 867 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 868 869 bitmap_zero(lun_taken, HPSA_MAX_DEVICES); 870 871 for (i = 0; i < h->ndevices; i++) { 872 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 873 __set_bit(h->dev[i]->target, lun_taken); 874 } 875 876 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); 877 if (i < HPSA_MAX_DEVICES) { 878 /* *bus = 1; */ 879 *target = i; 880 *lun = 0; 881 found = 1; 882 } 883 return !found; 884 } 885 886 /* Add an entry into h->dev[] array. */ 887 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 888 struct hpsa_scsi_dev_t *device, 889 struct hpsa_scsi_dev_t *added[], int *nadded) 890 { 891 /* assumes h->devlock is held */ 892 int n = h->ndevices; 893 int i; 894 unsigned char addr1[8], addr2[8]; 895 struct hpsa_scsi_dev_t *sd; 896 897 if (n >= HPSA_MAX_DEVICES) { 898 dev_err(&h->pdev->dev, "too many devices, some will be " 899 "inaccessible.\n"); 900 return -1; 901 } 902 903 /* physical devices do not have lun or target assigned until now. */ 904 if (device->lun != -1) 905 /* Logical device, lun is already assigned. */ 906 goto lun_assigned; 907 908 /* If this device a non-zero lun of a multi-lun device 909 * byte 4 of the 8-byte LUN addr will contain the logical 910 * unit no, zero otherise. 911 */ 912 if (device->scsi3addr[4] == 0) { 913 /* This is not a non-zero lun of a multi-lun device */ 914 if (hpsa_find_target_lun(h, device->scsi3addr, 915 device->bus, &device->target, &device->lun) != 0) 916 return -1; 917 goto lun_assigned; 918 } 919 920 /* This is a non-zero lun of a multi-lun device. 921 * Search through our list and find the device which 922 * has the same 8 byte LUN address, excepting byte 4. 923 * Assign the same bus and target for this new LUN. 924 * Use the logical unit number from the firmware. 925 */ 926 memcpy(addr1, device->scsi3addr, 8); 927 addr1[4] = 0; 928 for (i = 0; i < n; i++) { 929 sd = h->dev[i]; 930 memcpy(addr2, sd->scsi3addr, 8); 931 addr2[4] = 0; 932 /* differ only in byte 4? */ 933 if (memcmp(addr1, addr2, 8) == 0) { 934 device->bus = sd->bus; 935 device->target = sd->target; 936 device->lun = device->scsi3addr[4]; 937 break; 938 } 939 } 940 if (device->lun == -1) { 941 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 942 " suspect firmware bug or unsupported hardware " 943 "configuration.\n"); 944 return -1; 945 } 946 947 lun_assigned: 948 949 h->dev[n] = device; 950 h->ndevices++; 951 added[*nadded] = device; 952 (*nadded)++; 953 954 /* initially, (before registering with scsi layer) we don't 955 * know our hostno and we don't want to print anything first 956 * time anyway (the scsi layer's inquiries will show that info) 957 */ 958 /* if (hostno != -1) */ 959 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 960 scsi_device_type(device->devtype), hostno, 961 device->bus, device->target, device->lun); 962 return 0; 963 } 964 965 /* Update an entry in h->dev[] array. */ 966 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, 967 int entry, struct hpsa_scsi_dev_t *new_entry) 968 { 969 /* assumes h->devlock is held */ 970 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 971 972 /* Raid level changed. */ 973 h->dev[entry]->raid_level = new_entry->raid_level; 974 975 /* Raid offload parameters changed. */ 976 h->dev[entry]->offload_config = new_entry->offload_config; 977 h->dev[entry]->offload_enabled = new_entry->offload_enabled; 978 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 979 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; 980 h->dev[entry]->raid_map = new_entry->raid_map; 981 982 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", 983 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 984 new_entry->target, new_entry->lun); 985 } 986 987 /* Replace an entry from h->dev[] array. */ 988 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 989 int entry, struct hpsa_scsi_dev_t *new_entry, 990 struct hpsa_scsi_dev_t *added[], int *nadded, 991 struct hpsa_scsi_dev_t *removed[], int *nremoved) 992 { 993 /* assumes h->devlock is held */ 994 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 995 removed[*nremoved] = h->dev[entry]; 996 (*nremoved)++; 997 998 /* 999 * New physical devices won't have target/lun assigned yet 1000 * so we need to preserve the values in the slot we are replacing. 1001 */ 1002 if (new_entry->target == -1) { 1003 new_entry->target = h->dev[entry]->target; 1004 new_entry->lun = h->dev[entry]->lun; 1005 } 1006 1007 h->dev[entry] = new_entry; 1008 added[*nadded] = new_entry; 1009 (*nadded)++; 1010 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 1011 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 1012 new_entry->target, new_entry->lun); 1013 } 1014 1015 /* Remove an entry from h->dev[] array. */ 1016 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 1017 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1018 { 1019 /* assumes h->devlock is held */ 1020 int i; 1021 struct hpsa_scsi_dev_t *sd; 1022 1023 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1024 1025 sd = h->dev[entry]; 1026 removed[*nremoved] = h->dev[entry]; 1027 (*nremoved)++; 1028 1029 for (i = entry; i < h->ndevices-1; i++) 1030 h->dev[i] = h->dev[i+1]; 1031 h->ndevices--; 1032 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 1033 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 1034 sd->lun); 1035 } 1036 1037 #define SCSI3ADDR_EQ(a, b) ( \ 1038 (a)[7] == (b)[7] && \ 1039 (a)[6] == (b)[6] && \ 1040 (a)[5] == (b)[5] && \ 1041 (a)[4] == (b)[4] && \ 1042 (a)[3] == (b)[3] && \ 1043 (a)[2] == (b)[2] && \ 1044 (a)[1] == (b)[1] && \ 1045 (a)[0] == (b)[0]) 1046 1047 static void fixup_botched_add(struct ctlr_info *h, 1048 struct hpsa_scsi_dev_t *added) 1049 { 1050 /* called when scsi_add_device fails in order to re-adjust 1051 * h->dev[] to match the mid layer's view. 1052 */ 1053 unsigned long flags; 1054 int i, j; 1055 1056 spin_lock_irqsave(&h->lock, flags); 1057 for (i = 0; i < h->ndevices; i++) { 1058 if (h->dev[i] == added) { 1059 for (j = i; j < h->ndevices-1; j++) 1060 h->dev[j] = h->dev[j+1]; 1061 h->ndevices--; 1062 break; 1063 } 1064 } 1065 spin_unlock_irqrestore(&h->lock, flags); 1066 kfree(added); 1067 } 1068 1069 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 1070 struct hpsa_scsi_dev_t *dev2) 1071 { 1072 /* we compare everything except lun and target as these 1073 * are not yet assigned. Compare parts likely 1074 * to differ first 1075 */ 1076 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 1077 sizeof(dev1->scsi3addr)) != 0) 1078 return 0; 1079 if (memcmp(dev1->device_id, dev2->device_id, 1080 sizeof(dev1->device_id)) != 0) 1081 return 0; 1082 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 1083 return 0; 1084 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 1085 return 0; 1086 if (dev1->devtype != dev2->devtype) 1087 return 0; 1088 if (dev1->bus != dev2->bus) 1089 return 0; 1090 return 1; 1091 } 1092 1093 static inline int device_updated(struct hpsa_scsi_dev_t *dev1, 1094 struct hpsa_scsi_dev_t *dev2) 1095 { 1096 /* Device attributes that can change, but don't mean 1097 * that the device is a different device, nor that the OS 1098 * needs to be told anything about the change. 1099 */ 1100 if (dev1->raid_level != dev2->raid_level) 1101 return 1; 1102 if (dev1->offload_config != dev2->offload_config) 1103 return 1; 1104 if (dev1->offload_enabled != dev2->offload_enabled) 1105 return 1; 1106 return 0; 1107 } 1108 1109 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 1110 * and return needle location in *index. If scsi3addr matches, but not 1111 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 1112 * location in *index. 1113 * In the case of a minor device attribute change, such as RAID level, just 1114 * return DEVICE_UPDATED, along with the updated device's location in index. 1115 * If needle not found, return DEVICE_NOT_FOUND. 1116 */ 1117 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 1118 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 1119 int *index) 1120 { 1121 int i; 1122 #define DEVICE_NOT_FOUND 0 1123 #define DEVICE_CHANGED 1 1124 #define DEVICE_SAME 2 1125 #define DEVICE_UPDATED 3 1126 for (i = 0; i < haystack_size; i++) { 1127 if (haystack[i] == NULL) /* previously removed. */ 1128 continue; 1129 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 1130 *index = i; 1131 if (device_is_the_same(needle, haystack[i])) { 1132 if (device_updated(needle, haystack[i])) 1133 return DEVICE_UPDATED; 1134 return DEVICE_SAME; 1135 } else { 1136 /* Keep offline devices offline */ 1137 if (needle->volume_offline) 1138 return DEVICE_NOT_FOUND; 1139 return DEVICE_CHANGED; 1140 } 1141 } 1142 } 1143 *index = -1; 1144 return DEVICE_NOT_FOUND; 1145 } 1146 1147 static void hpsa_monitor_offline_device(struct ctlr_info *h, 1148 unsigned char scsi3addr[]) 1149 { 1150 struct offline_device_entry *device; 1151 unsigned long flags; 1152 1153 /* Check to see if device is already on the list */ 1154 spin_lock_irqsave(&h->offline_device_lock, flags); 1155 list_for_each_entry(device, &h->offline_device_list, offline_list) { 1156 if (memcmp(device->scsi3addr, scsi3addr, 1157 sizeof(device->scsi3addr)) == 0) { 1158 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1159 return; 1160 } 1161 } 1162 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1163 1164 /* Device is not on the list, add it. */ 1165 device = kmalloc(sizeof(*device), GFP_KERNEL); 1166 if (!device) { 1167 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); 1168 return; 1169 } 1170 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1171 spin_lock_irqsave(&h->offline_device_lock, flags); 1172 list_add_tail(&device->offline_list, &h->offline_device_list); 1173 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1174 } 1175 1176 /* Print a message explaining various offline volume states */ 1177 static void hpsa_show_volume_status(struct ctlr_info *h, 1178 struct hpsa_scsi_dev_t *sd) 1179 { 1180 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) 1181 dev_info(&h->pdev->dev, 1182 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", 1183 h->scsi_host->host_no, 1184 sd->bus, sd->target, sd->lun); 1185 switch (sd->volume_offline) { 1186 case HPSA_LV_OK: 1187 break; 1188 case HPSA_LV_UNDERGOING_ERASE: 1189 dev_info(&h->pdev->dev, 1190 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", 1191 h->scsi_host->host_no, 1192 sd->bus, sd->target, sd->lun); 1193 break; 1194 case HPSA_LV_UNDERGOING_RPI: 1195 dev_info(&h->pdev->dev, 1196 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n", 1197 h->scsi_host->host_no, 1198 sd->bus, sd->target, sd->lun); 1199 break; 1200 case HPSA_LV_PENDING_RPI: 1201 dev_info(&h->pdev->dev, 1202 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", 1203 h->scsi_host->host_no, 1204 sd->bus, sd->target, sd->lun); 1205 break; 1206 case HPSA_LV_ENCRYPTED_NO_KEY: 1207 dev_info(&h->pdev->dev, 1208 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", 1209 h->scsi_host->host_no, 1210 sd->bus, sd->target, sd->lun); 1211 break; 1212 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 1213 dev_info(&h->pdev->dev, 1214 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", 1215 h->scsi_host->host_no, 1216 sd->bus, sd->target, sd->lun); 1217 break; 1218 case HPSA_LV_UNDERGOING_ENCRYPTION: 1219 dev_info(&h->pdev->dev, 1220 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", 1221 h->scsi_host->host_no, 1222 sd->bus, sd->target, sd->lun); 1223 break; 1224 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 1225 dev_info(&h->pdev->dev, 1226 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", 1227 h->scsi_host->host_no, 1228 sd->bus, sd->target, sd->lun); 1229 break; 1230 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1231 dev_info(&h->pdev->dev, 1232 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", 1233 h->scsi_host->host_no, 1234 sd->bus, sd->target, sd->lun); 1235 break; 1236 case HPSA_LV_PENDING_ENCRYPTION: 1237 dev_info(&h->pdev->dev, 1238 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", 1239 h->scsi_host->host_no, 1240 sd->bus, sd->target, sd->lun); 1241 break; 1242 case HPSA_LV_PENDING_ENCRYPTION_REKEYING: 1243 dev_info(&h->pdev->dev, 1244 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", 1245 h->scsi_host->host_no, 1246 sd->bus, sd->target, sd->lun); 1247 break; 1248 } 1249 } 1250 1251 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 1252 struct hpsa_scsi_dev_t *sd[], int nsds) 1253 { 1254 /* sd contains scsi3 addresses and devtypes, and inquiry 1255 * data. This function takes what's in sd to be the current 1256 * reality and updates h->dev[] to reflect that reality. 1257 */ 1258 int i, entry, device_change, changes = 0; 1259 struct hpsa_scsi_dev_t *csd; 1260 unsigned long flags; 1261 struct hpsa_scsi_dev_t **added, **removed; 1262 int nadded, nremoved; 1263 struct Scsi_Host *sh = NULL; 1264 1265 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 1266 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 1267 1268 if (!added || !removed) { 1269 dev_warn(&h->pdev->dev, "out of memory in " 1270 "adjust_hpsa_scsi_table\n"); 1271 goto free_and_out; 1272 } 1273 1274 spin_lock_irqsave(&h->devlock, flags); 1275 1276 /* find any devices in h->dev[] that are not in 1277 * sd[] and remove them from h->dev[], and for any 1278 * devices which have changed, remove the old device 1279 * info and add the new device info. 1280 * If minor device attributes change, just update 1281 * the existing device structure. 1282 */ 1283 i = 0; 1284 nremoved = 0; 1285 nadded = 0; 1286 while (i < h->ndevices) { 1287 csd = h->dev[i]; 1288 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 1289 if (device_change == DEVICE_NOT_FOUND) { 1290 changes++; 1291 hpsa_scsi_remove_entry(h, hostno, i, 1292 removed, &nremoved); 1293 continue; /* remove ^^^, hence i not incremented */ 1294 } else if (device_change == DEVICE_CHANGED) { 1295 changes++; 1296 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 1297 added, &nadded, removed, &nremoved); 1298 /* Set it to NULL to prevent it from being freed 1299 * at the bottom of hpsa_update_scsi_devices() 1300 */ 1301 sd[entry] = NULL; 1302 } else if (device_change == DEVICE_UPDATED) { 1303 hpsa_scsi_update_entry(h, hostno, i, sd[entry]); 1304 } 1305 i++; 1306 } 1307 1308 /* Now, make sure every device listed in sd[] is also 1309 * listed in h->dev[], adding them if they aren't found 1310 */ 1311 1312 for (i = 0; i < nsds; i++) { 1313 if (!sd[i]) /* if already added above. */ 1314 continue; 1315 1316 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS 1317 * as the SCSI mid-layer does not handle such devices well. 1318 * It relentlessly loops sending TUR at 3Hz, then READ(10) 1319 * at 160Hz, and prevents the system from coming up. 1320 */ 1321 if (sd[i]->volume_offline) { 1322 hpsa_show_volume_status(h, sd[i]); 1323 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", 1324 h->scsi_host->host_no, 1325 sd[i]->bus, sd[i]->target, sd[i]->lun); 1326 continue; 1327 } 1328 1329 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 1330 h->ndevices, &entry); 1331 if (device_change == DEVICE_NOT_FOUND) { 1332 changes++; 1333 if (hpsa_scsi_add_entry(h, hostno, sd[i], 1334 added, &nadded) != 0) 1335 break; 1336 sd[i] = NULL; /* prevent from being freed later. */ 1337 } else if (device_change == DEVICE_CHANGED) { 1338 /* should never happen... */ 1339 changes++; 1340 dev_warn(&h->pdev->dev, 1341 "device unexpectedly changed.\n"); 1342 /* but if it does happen, we just ignore that device */ 1343 } 1344 } 1345 spin_unlock_irqrestore(&h->devlock, flags); 1346 1347 /* Monitor devices which are in one of several NOT READY states to be 1348 * brought online later. This must be done without holding h->devlock, 1349 * so don't touch h->dev[] 1350 */ 1351 for (i = 0; i < nsds; i++) { 1352 if (!sd[i]) /* if already added above. */ 1353 continue; 1354 if (sd[i]->volume_offline) 1355 hpsa_monitor_offline_device(h, sd[i]->scsi3addr); 1356 } 1357 1358 /* Don't notify scsi mid layer of any changes the first time through 1359 * (or if there are no changes) scsi_scan_host will do it later the 1360 * first time through. 1361 */ 1362 if (hostno == -1 || !changes) 1363 goto free_and_out; 1364 1365 sh = h->scsi_host; 1366 /* Notify scsi mid layer of any removed devices */ 1367 for (i = 0; i < nremoved; i++) { 1368 struct scsi_device *sdev = 1369 scsi_device_lookup(sh, removed[i]->bus, 1370 removed[i]->target, removed[i]->lun); 1371 if (sdev != NULL) { 1372 scsi_remove_device(sdev); 1373 scsi_device_put(sdev); 1374 } else { 1375 /* We don't expect to get here. 1376 * future cmds to this device will get selection 1377 * timeout as if the device was gone. 1378 */ 1379 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 1380 " for removal.", hostno, removed[i]->bus, 1381 removed[i]->target, removed[i]->lun); 1382 } 1383 kfree(removed[i]); 1384 removed[i] = NULL; 1385 } 1386 1387 /* Notify scsi mid layer of any added devices */ 1388 for (i = 0; i < nadded; i++) { 1389 if (scsi_add_device(sh, added[i]->bus, 1390 added[i]->target, added[i]->lun) == 0) 1391 continue; 1392 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 1393 "device not added.\n", hostno, added[i]->bus, 1394 added[i]->target, added[i]->lun); 1395 /* now we have to remove it from h->dev, 1396 * since it didn't get added to scsi mid layer 1397 */ 1398 fixup_botched_add(h, added[i]); 1399 } 1400 1401 free_and_out: 1402 kfree(added); 1403 kfree(removed); 1404 } 1405 1406 /* 1407 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * 1408 * Assume's h->devlock is held. 1409 */ 1410 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 1411 int bus, int target, int lun) 1412 { 1413 int i; 1414 struct hpsa_scsi_dev_t *sd; 1415 1416 for (i = 0; i < h->ndevices; i++) { 1417 sd = h->dev[i]; 1418 if (sd->bus == bus && sd->target == target && sd->lun == lun) 1419 return sd; 1420 } 1421 return NULL; 1422 } 1423 1424 /* link sdev->hostdata to our per-device structure. */ 1425 static int hpsa_slave_alloc(struct scsi_device *sdev) 1426 { 1427 struct hpsa_scsi_dev_t *sd; 1428 unsigned long flags; 1429 struct ctlr_info *h; 1430 1431 h = sdev_to_hba(sdev); 1432 spin_lock_irqsave(&h->devlock, flags); 1433 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 1434 sdev_id(sdev), sdev->lun); 1435 if (sd != NULL) 1436 sdev->hostdata = sd; 1437 spin_unlock_irqrestore(&h->devlock, flags); 1438 return 0; 1439 } 1440 1441 static void hpsa_slave_destroy(struct scsi_device *sdev) 1442 { 1443 /* nothing to do. */ 1444 } 1445 1446 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 1447 { 1448 int i; 1449 1450 if (!h->cmd_sg_list) 1451 return; 1452 for (i = 0; i < h->nr_cmds; i++) { 1453 kfree(h->cmd_sg_list[i]); 1454 h->cmd_sg_list[i] = NULL; 1455 } 1456 kfree(h->cmd_sg_list); 1457 h->cmd_sg_list = NULL; 1458 } 1459 1460 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 1461 { 1462 int i; 1463 1464 if (h->chainsize <= 0) 1465 return 0; 1466 1467 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 1468 GFP_KERNEL); 1469 if (!h->cmd_sg_list) 1470 return -ENOMEM; 1471 for (i = 0; i < h->nr_cmds; i++) { 1472 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 1473 h->chainsize, GFP_KERNEL); 1474 if (!h->cmd_sg_list[i]) 1475 goto clean; 1476 } 1477 return 0; 1478 1479 clean: 1480 hpsa_free_sg_chain_blocks(h); 1481 return -ENOMEM; 1482 } 1483 1484 static int hpsa_map_sg_chain_block(struct ctlr_info *h, 1485 struct CommandList *c) 1486 { 1487 struct SGDescriptor *chain_sg, *chain_block; 1488 u64 temp64; 1489 1490 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1491 chain_block = h->cmd_sg_list[c->cmdindex]; 1492 chain_sg->Ext = HPSA_SG_CHAIN; 1493 chain_sg->Len = sizeof(*chain_sg) * 1494 (c->Header.SGTotal - h->max_cmd_sg_entries); 1495 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 1496 PCI_DMA_TODEVICE); 1497 if (dma_mapping_error(&h->pdev->dev, temp64)) { 1498 /* prevent subsequent unmapping */ 1499 chain_sg->Addr.lower = 0; 1500 chain_sg->Addr.upper = 0; 1501 return -1; 1502 } 1503 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 1504 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); 1505 return 0; 1506 } 1507 1508 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 1509 struct CommandList *c) 1510 { 1511 struct SGDescriptor *chain_sg; 1512 union u64bit temp64; 1513 1514 if (c->Header.SGTotal <= h->max_cmd_sg_entries) 1515 return; 1516 1517 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1518 temp64.val32.lower = chain_sg->Addr.lower; 1519 temp64.val32.upper = chain_sg->Addr.upper; 1520 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1521 } 1522 1523 1524 /* Decode the various types of errors on ioaccel2 path. 1525 * Return 1 for any error that should generate a RAID path retry. 1526 * Return 0 for errors that don't require a RAID path retry. 1527 */ 1528 static int handle_ioaccel_mode2_error(struct ctlr_info *h, 1529 struct CommandList *c, 1530 struct scsi_cmnd *cmd, 1531 struct io_accel2_cmd *c2) 1532 { 1533 int data_len; 1534 int retry = 0; 1535 1536 switch (c2->error_data.serv_response) { 1537 case IOACCEL2_SERV_RESPONSE_COMPLETE: 1538 switch (c2->error_data.status) { 1539 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: 1540 break; 1541 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: 1542 dev_warn(&h->pdev->dev, 1543 "%s: task complete with check condition.\n", 1544 "HP SSD Smart Path"); 1545 if (c2->error_data.data_present != 1546 IOACCEL2_SENSE_DATA_PRESENT) 1547 break; 1548 /* copy the sense data */ 1549 data_len = c2->error_data.sense_data_len; 1550 if (data_len > SCSI_SENSE_BUFFERSIZE) 1551 data_len = SCSI_SENSE_BUFFERSIZE; 1552 if (data_len > sizeof(c2->error_data.sense_data_buff)) 1553 data_len = 1554 sizeof(c2->error_data.sense_data_buff); 1555 memcpy(cmd->sense_buffer, 1556 c2->error_data.sense_data_buff, data_len); 1557 cmd->result |= SAM_STAT_CHECK_CONDITION; 1558 retry = 1; 1559 break; 1560 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 1561 dev_warn(&h->pdev->dev, 1562 "%s: task complete with BUSY status.\n", 1563 "HP SSD Smart Path"); 1564 retry = 1; 1565 break; 1566 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: 1567 dev_warn(&h->pdev->dev, 1568 "%s: task complete with reservation conflict.\n", 1569 "HP SSD Smart Path"); 1570 retry = 1; 1571 break; 1572 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: 1573 /* Make scsi midlayer do unlimited retries */ 1574 cmd->result = DID_IMM_RETRY << 16; 1575 break; 1576 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: 1577 dev_warn(&h->pdev->dev, 1578 "%s: task complete with aborted status.\n", 1579 "HP SSD Smart Path"); 1580 retry = 1; 1581 break; 1582 default: 1583 dev_warn(&h->pdev->dev, 1584 "%s: task complete with unrecognized status: 0x%02x\n", 1585 "HP SSD Smart Path", c2->error_data.status); 1586 retry = 1; 1587 break; 1588 } 1589 break; 1590 case IOACCEL2_SERV_RESPONSE_FAILURE: 1591 /* don't expect to get here. */ 1592 dev_warn(&h->pdev->dev, 1593 "unexpected delivery or target failure, status = 0x%02x\n", 1594 c2->error_data.status); 1595 retry = 1; 1596 break; 1597 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 1598 break; 1599 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 1600 break; 1601 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 1602 dev_warn(&h->pdev->dev, "task management function rejected.\n"); 1603 retry = 1; 1604 break; 1605 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 1606 dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); 1607 break; 1608 default: 1609 dev_warn(&h->pdev->dev, 1610 "%s: Unrecognized server response: 0x%02x\n", 1611 "HP SSD Smart Path", 1612 c2->error_data.serv_response); 1613 retry = 1; 1614 break; 1615 } 1616 1617 return retry; /* retry on raid path? */ 1618 } 1619 1620 static void process_ioaccel2_completion(struct ctlr_info *h, 1621 struct CommandList *c, struct scsi_cmnd *cmd, 1622 struct hpsa_scsi_dev_t *dev) 1623 { 1624 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 1625 int raid_retry = 0; 1626 1627 /* check for good status */ 1628 if (likely(c2->error_data.serv_response == 0 && 1629 c2->error_data.status == 0)) { 1630 cmd_free(h, c); 1631 cmd->scsi_done(cmd); 1632 return; 1633 } 1634 1635 /* Any RAID offload error results in retry which will use 1636 * the normal I/O path so the controller can handle whatever's 1637 * wrong. 1638 */ 1639 if (is_logical_dev_addr_mode(dev->scsi3addr) && 1640 c2->error_data.serv_response == 1641 IOACCEL2_SERV_RESPONSE_FAILURE) { 1642 if (c2->error_data.status == 1643 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) 1644 dev_warn(&h->pdev->dev, 1645 "%s: Path is unavailable, retrying on standard path.\n", 1646 "HP SSD Smart Path"); 1647 else 1648 dev_warn(&h->pdev->dev, 1649 "%s: Error 0x%02x, retrying on standard path.\n", 1650 "HP SSD Smart Path", c2->error_data.status); 1651 1652 dev->offload_enabled = 0; 1653 h->drv_req_rescan = 1; /* schedule controller for a rescan */ 1654 cmd->result = DID_SOFT_ERROR << 16; 1655 cmd_free(h, c); 1656 cmd->scsi_done(cmd); 1657 return; 1658 } 1659 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2); 1660 /* If error found, disable Smart Path, schedule a rescan, 1661 * and force a retry on the standard path. 1662 */ 1663 if (raid_retry) { 1664 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n", 1665 "HP SSD Smart Path"); 1666 dev->offload_enabled = 0; /* Disable Smart Path */ 1667 h->drv_req_rescan = 1; /* schedule controller rescan */ 1668 cmd->result = DID_SOFT_ERROR << 16; 1669 } 1670 cmd_free(h, c); 1671 cmd->scsi_done(cmd); 1672 } 1673 1674 static void complete_scsi_command(struct CommandList *cp) 1675 { 1676 struct scsi_cmnd *cmd; 1677 struct ctlr_info *h; 1678 struct ErrorInfo *ei; 1679 struct hpsa_scsi_dev_t *dev; 1680 1681 unsigned char sense_key; 1682 unsigned char asc; /* additional sense code */ 1683 unsigned char ascq; /* additional sense code qualifier */ 1684 unsigned long sense_data_size; 1685 1686 ei = cp->err_info; 1687 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1688 h = cp->h; 1689 dev = cmd->device->hostdata; 1690 1691 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1692 if ((cp->cmd_type == CMD_SCSI) && 1693 (cp->Header.SGTotal > h->max_cmd_sg_entries)) 1694 hpsa_unmap_sg_chain_block(h, cp); 1695 1696 cmd->result = (DID_OK << 16); /* host byte */ 1697 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1698 1699 if (cp->cmd_type == CMD_IOACCEL2) 1700 return process_ioaccel2_completion(h, cp, cmd, dev); 1701 1702 cmd->result |= ei->ScsiStatus; 1703 1704 /* copy the sense data whether we need to or not. */ 1705 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 1706 sense_data_size = SCSI_SENSE_BUFFERSIZE; 1707 else 1708 sense_data_size = sizeof(ei->SenseInfo); 1709 if (ei->SenseLen < sense_data_size) 1710 sense_data_size = ei->SenseLen; 1711 1712 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 1713 scsi_set_resid(cmd, ei->ResidualCnt); 1714 1715 if (ei->CommandStatus == 0) { 1716 cmd_free(h, cp); 1717 cmd->scsi_done(cmd); 1718 return; 1719 } 1720 1721 /* For I/O accelerator commands, copy over some fields to the normal 1722 * CISS header used below for error handling. 1723 */ 1724 if (cp->cmd_type == CMD_IOACCEL1) { 1725 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 1726 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); 1727 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; 1728 cp->Header.Tag.lower = c->Tag.lower; 1729 cp->Header.Tag.upper = c->Tag.upper; 1730 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 1731 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 1732 1733 /* Any RAID offload error results in retry which will use 1734 * the normal I/O path so the controller can handle whatever's 1735 * wrong. 1736 */ 1737 if (is_logical_dev_addr_mode(dev->scsi3addr)) { 1738 if (ei->CommandStatus == CMD_IOACCEL_DISABLED) 1739 dev->offload_enabled = 0; 1740 cmd->result = DID_SOFT_ERROR << 16; 1741 cmd_free(h, cp); 1742 cmd->scsi_done(cmd); 1743 return; 1744 } 1745 } 1746 1747 /* an error has occurred */ 1748 switch (ei->CommandStatus) { 1749 1750 case CMD_TARGET_STATUS: 1751 if (ei->ScsiStatus) { 1752 /* Get sense key */ 1753 sense_key = 0xf & ei->SenseInfo[2]; 1754 /* Get additional sense code */ 1755 asc = ei->SenseInfo[12]; 1756 /* Get addition sense code qualifier */ 1757 ascq = ei->SenseInfo[13]; 1758 } 1759 1760 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1761 if (check_for_unit_attention(h, cp)) 1762 break; 1763 if (sense_key == ILLEGAL_REQUEST) { 1764 /* 1765 * SCSI REPORT_LUNS is commonly unsupported on 1766 * Smart Array. Suppress noisy complaint. 1767 */ 1768 if (cp->Request.CDB[0] == REPORT_LUNS) 1769 break; 1770 1771 /* If ASC/ASCQ indicate Logical Unit 1772 * Not Supported condition, 1773 */ 1774 if ((asc == 0x25) && (ascq == 0x0)) { 1775 dev_warn(&h->pdev->dev, "cp %p " 1776 "has check condition\n", cp); 1777 break; 1778 } 1779 } 1780 1781 if (sense_key == NOT_READY) { 1782 /* If Sense is Not Ready, Logical Unit 1783 * Not ready, Manual Intervention 1784 * required 1785 */ 1786 if ((asc == 0x04) && (ascq == 0x03)) { 1787 dev_warn(&h->pdev->dev, "cp %p " 1788 "has check condition: unit " 1789 "not ready, manual " 1790 "intervention required\n", cp); 1791 break; 1792 } 1793 } 1794 if (sense_key == ABORTED_COMMAND) { 1795 /* Aborted command is retryable */ 1796 dev_warn(&h->pdev->dev, "cp %p " 1797 "has check condition: aborted command: " 1798 "ASC: 0x%x, ASCQ: 0x%x\n", 1799 cp, asc, ascq); 1800 cmd->result |= DID_SOFT_ERROR << 16; 1801 break; 1802 } 1803 /* Must be some other type of check condition */ 1804 dev_dbg(&h->pdev->dev, "cp %p has check condition: " 1805 "unknown type: " 1806 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1807 "Returning result: 0x%x, " 1808 "cmd=[%02x %02x %02x %02x %02x " 1809 "%02x %02x %02x %02x %02x %02x " 1810 "%02x %02x %02x %02x %02x]\n", 1811 cp, sense_key, asc, ascq, 1812 cmd->result, 1813 cmd->cmnd[0], cmd->cmnd[1], 1814 cmd->cmnd[2], cmd->cmnd[3], 1815 cmd->cmnd[4], cmd->cmnd[5], 1816 cmd->cmnd[6], cmd->cmnd[7], 1817 cmd->cmnd[8], cmd->cmnd[9], 1818 cmd->cmnd[10], cmd->cmnd[11], 1819 cmd->cmnd[12], cmd->cmnd[13], 1820 cmd->cmnd[14], cmd->cmnd[15]); 1821 break; 1822 } 1823 1824 1825 /* Problem was not a check condition 1826 * Pass it up to the upper layers... 1827 */ 1828 if (ei->ScsiStatus) { 1829 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1830 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1831 "Returning result: 0x%x\n", 1832 cp, ei->ScsiStatus, 1833 sense_key, asc, ascq, 1834 cmd->result); 1835 } else { /* scsi status is zero??? How??? */ 1836 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1837 "Returning no connection.\n", cp), 1838 1839 /* Ordinarily, this case should never happen, 1840 * but there is a bug in some released firmware 1841 * revisions that allows it to happen if, for 1842 * example, a 4100 backplane loses power and 1843 * the tape drive is in it. We assume that 1844 * it's a fatal error of some kind because we 1845 * can't show that it wasn't. We will make it 1846 * look like selection timeout since that is 1847 * the most common reason for this to occur, 1848 * and it's severe enough. 1849 */ 1850 1851 cmd->result = DID_NO_CONNECT << 16; 1852 } 1853 break; 1854 1855 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1856 break; 1857 case CMD_DATA_OVERRUN: 1858 dev_warn(&h->pdev->dev, "cp %p has" 1859 " completed with data overrun " 1860 "reported\n", cp); 1861 break; 1862 case CMD_INVALID: { 1863 /* print_bytes(cp, sizeof(*cp), 1, 0); 1864 print_cmd(cp); */ 1865 /* We get CMD_INVALID if you address a non-existent device 1866 * instead of a selection timeout (no response). You will 1867 * see this if you yank out a drive, then try to access it. 1868 * This is kind of a shame because it means that any other 1869 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1870 * missing target. */ 1871 cmd->result = DID_NO_CONNECT << 16; 1872 } 1873 break; 1874 case CMD_PROTOCOL_ERR: 1875 cmd->result = DID_ERROR << 16; 1876 dev_warn(&h->pdev->dev, "cp %p has " 1877 "protocol error\n", cp); 1878 break; 1879 case CMD_HARDWARE_ERR: 1880 cmd->result = DID_ERROR << 16; 1881 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1882 break; 1883 case CMD_CONNECTION_LOST: 1884 cmd->result = DID_ERROR << 16; 1885 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1886 break; 1887 case CMD_ABORTED: 1888 cmd->result = DID_ABORT << 16; 1889 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1890 cp, ei->ScsiStatus); 1891 break; 1892 case CMD_ABORT_FAILED: 1893 cmd->result = DID_ERROR << 16; 1894 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1895 break; 1896 case CMD_UNSOLICITED_ABORT: 1897 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 1898 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " 1899 "abort\n", cp); 1900 break; 1901 case CMD_TIMEOUT: 1902 cmd->result = DID_TIME_OUT << 16; 1903 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1904 break; 1905 case CMD_UNABORTABLE: 1906 cmd->result = DID_ERROR << 16; 1907 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1908 break; 1909 case CMD_IOACCEL_DISABLED: 1910 /* This only handles the direct pass-through case since RAID 1911 * offload is handled above. Just attempt a retry. 1912 */ 1913 cmd->result = DID_SOFT_ERROR << 16; 1914 dev_warn(&h->pdev->dev, 1915 "cp %p had HP SSD Smart Path error\n", cp); 1916 break; 1917 default: 1918 cmd->result = DID_ERROR << 16; 1919 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1920 cp, ei->CommandStatus); 1921 } 1922 cmd_free(h, cp); 1923 cmd->scsi_done(cmd); 1924 } 1925 1926 static void hpsa_pci_unmap(struct pci_dev *pdev, 1927 struct CommandList *c, int sg_used, int data_direction) 1928 { 1929 int i; 1930 union u64bit addr64; 1931 1932 for (i = 0; i < sg_used; i++) { 1933 addr64.val32.lower = c->SG[i].Addr.lower; 1934 addr64.val32.upper = c->SG[i].Addr.upper; 1935 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, 1936 data_direction); 1937 } 1938 } 1939 1940 static int hpsa_map_one(struct pci_dev *pdev, 1941 struct CommandList *cp, 1942 unsigned char *buf, 1943 size_t buflen, 1944 int data_direction) 1945 { 1946 u64 addr64; 1947 1948 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1949 cp->Header.SGList = 0; 1950 cp->Header.SGTotal = 0; 1951 return 0; 1952 } 1953 1954 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); 1955 if (dma_mapping_error(&pdev->dev, addr64)) { 1956 /* Prevent subsequent unmap of something never mapped */ 1957 cp->Header.SGList = 0; 1958 cp->Header.SGTotal = 0; 1959 return -1; 1960 } 1961 cp->SG[0].Addr.lower = 1962 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1963 cp->SG[0].Addr.upper = 1964 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1965 cp->SG[0].Len = buflen; 1966 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */ 1967 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ 1968 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ 1969 return 0; 1970 } 1971 1972 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1973 struct CommandList *c) 1974 { 1975 DECLARE_COMPLETION_ONSTACK(wait); 1976 1977 c->waiting = &wait; 1978 enqueue_cmd_and_start_io(h, c); 1979 wait_for_completion(&wait); 1980 } 1981 1982 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 1983 struct CommandList *c) 1984 { 1985 unsigned long flags; 1986 1987 /* If controller lockup detected, fake a hardware error. */ 1988 spin_lock_irqsave(&h->lock, flags); 1989 if (unlikely(h->lockup_detected)) { 1990 spin_unlock_irqrestore(&h->lock, flags); 1991 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 1992 } else { 1993 spin_unlock_irqrestore(&h->lock, flags); 1994 hpsa_scsi_do_simple_cmd_core(h, c); 1995 } 1996 } 1997 1998 #define MAX_DRIVER_CMD_RETRIES 25 1999 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 2000 struct CommandList *c, int data_direction) 2001 { 2002 int backoff_time = 10, retry_count = 0; 2003 2004 do { 2005 memset(c->err_info, 0, sizeof(*c->err_info)); 2006 hpsa_scsi_do_simple_cmd_core(h, c); 2007 retry_count++; 2008 if (retry_count > 3) { 2009 msleep(backoff_time); 2010 if (backoff_time < 1000) 2011 backoff_time *= 2; 2012 } 2013 } while ((check_for_unit_attention(h, c) || 2014 check_for_busy(h, c)) && 2015 retry_count <= MAX_DRIVER_CMD_RETRIES); 2016 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 2017 } 2018 2019 static void hpsa_print_cmd(struct ctlr_info *h, char *txt, 2020 struct CommandList *c) 2021 { 2022 const u8 *cdb = c->Request.CDB; 2023 const u8 *lun = c->Header.LUN.LunAddrBytes; 2024 2025 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" 2026 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", 2027 txt, lun[0], lun[1], lun[2], lun[3], 2028 lun[4], lun[5], lun[6], lun[7], 2029 cdb[0], cdb[1], cdb[2], cdb[3], 2030 cdb[4], cdb[5], cdb[6], cdb[7], 2031 cdb[8], cdb[9], cdb[10], cdb[11], 2032 cdb[12], cdb[13], cdb[14], cdb[15]); 2033 } 2034 2035 static void hpsa_scsi_interpret_error(struct ctlr_info *h, 2036 struct CommandList *cp) 2037 { 2038 const struct ErrorInfo *ei = cp->err_info; 2039 struct device *d = &cp->h->pdev->dev; 2040 const u8 *sd = ei->SenseInfo; 2041 2042 switch (ei->CommandStatus) { 2043 case CMD_TARGET_STATUS: 2044 hpsa_print_cmd(h, "SCSI status", cp); 2045 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) 2046 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", 2047 sd[2] & 0x0f, sd[12], sd[13]); 2048 else 2049 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus); 2050 if (ei->ScsiStatus == 0) 2051 dev_warn(d, "SCSI status is abnormally zero. " 2052 "(probably indicates selection timeout " 2053 "reported incorrectly due to a known " 2054 "firmware bug, circa July, 2001.)\n"); 2055 break; 2056 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 2057 break; 2058 case CMD_DATA_OVERRUN: 2059 hpsa_print_cmd(h, "overrun condition", cp); 2060 break; 2061 case CMD_INVALID: { 2062 /* controller unfortunately reports SCSI passthru's 2063 * to non-existent targets as invalid commands. 2064 */ 2065 hpsa_print_cmd(h, "invalid command", cp); 2066 dev_warn(d, "probably means device no longer present\n"); 2067 } 2068 break; 2069 case CMD_PROTOCOL_ERR: 2070 hpsa_print_cmd(h, "protocol error", cp); 2071 break; 2072 case CMD_HARDWARE_ERR: 2073 hpsa_print_cmd(h, "hardware error", cp); 2074 break; 2075 case CMD_CONNECTION_LOST: 2076 hpsa_print_cmd(h, "connection lost", cp); 2077 break; 2078 case CMD_ABORTED: 2079 hpsa_print_cmd(h, "aborted", cp); 2080 break; 2081 case CMD_ABORT_FAILED: 2082 hpsa_print_cmd(h, "abort failed", cp); 2083 break; 2084 case CMD_UNSOLICITED_ABORT: 2085 hpsa_print_cmd(h, "unsolicited abort", cp); 2086 break; 2087 case CMD_TIMEOUT: 2088 hpsa_print_cmd(h, "timed out", cp); 2089 break; 2090 case CMD_UNABORTABLE: 2091 hpsa_print_cmd(h, "unabortable", cp); 2092 break; 2093 default: 2094 hpsa_print_cmd(h, "unknown status", cp); 2095 dev_warn(d, "Unknown command status %x\n", 2096 ei->CommandStatus); 2097 } 2098 } 2099 2100 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 2101 u16 page, unsigned char *buf, 2102 unsigned char bufsize) 2103 { 2104 int rc = IO_OK; 2105 struct CommandList *c; 2106 struct ErrorInfo *ei; 2107 2108 c = cmd_special_alloc(h); 2109 2110 if (c == NULL) { /* trouble... */ 2111 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2112 return -ENOMEM; 2113 } 2114 2115 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, 2116 page, scsi3addr, TYPE_CMD)) { 2117 rc = -1; 2118 goto out; 2119 } 2120 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2121 ei = c->err_info; 2122 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2123 hpsa_scsi_interpret_error(h, c); 2124 rc = -1; 2125 } 2126 out: 2127 cmd_special_free(h, c); 2128 return rc; 2129 } 2130 2131 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, 2132 unsigned char *scsi3addr, unsigned char page, 2133 struct bmic_controller_parameters *buf, size_t bufsize) 2134 { 2135 int rc = IO_OK; 2136 struct CommandList *c; 2137 struct ErrorInfo *ei; 2138 2139 c = cmd_special_alloc(h); 2140 2141 if (c == NULL) { /* trouble... */ 2142 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2143 return -ENOMEM; 2144 } 2145 2146 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, 2147 page, scsi3addr, TYPE_CMD)) { 2148 rc = -1; 2149 goto out; 2150 } 2151 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2152 ei = c->err_info; 2153 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2154 hpsa_scsi_interpret_error(h, c); 2155 rc = -1; 2156 } 2157 out: 2158 cmd_special_free(h, c); 2159 return rc; 2160 } 2161 2162 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, 2163 u8 reset_type) 2164 { 2165 int rc = IO_OK; 2166 struct CommandList *c; 2167 struct ErrorInfo *ei; 2168 2169 c = cmd_special_alloc(h); 2170 2171 if (c == NULL) { /* trouble... */ 2172 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2173 return -ENOMEM; 2174 } 2175 2176 /* fill_cmd can't fail here, no data buffer to map. */ 2177 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 2178 scsi3addr, TYPE_MSG); 2179 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */ 2180 hpsa_scsi_do_simple_cmd_core(h, c); 2181 /* no unmap needed here because no data xfer. */ 2182 2183 ei = c->err_info; 2184 if (ei->CommandStatus != 0) { 2185 hpsa_scsi_interpret_error(h, c); 2186 rc = -1; 2187 } 2188 cmd_special_free(h, c); 2189 return rc; 2190 } 2191 2192 static void hpsa_get_raid_level(struct ctlr_info *h, 2193 unsigned char *scsi3addr, unsigned char *raid_level) 2194 { 2195 int rc; 2196 unsigned char *buf; 2197 2198 *raid_level = RAID_UNKNOWN; 2199 buf = kzalloc(64, GFP_KERNEL); 2200 if (!buf) 2201 return; 2202 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); 2203 if (rc == 0) 2204 *raid_level = buf[8]; 2205 if (*raid_level > RAID_UNKNOWN) 2206 *raid_level = RAID_UNKNOWN; 2207 kfree(buf); 2208 return; 2209 } 2210 2211 #define HPSA_MAP_DEBUG 2212 #ifdef HPSA_MAP_DEBUG 2213 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, 2214 struct raid_map_data *map_buff) 2215 { 2216 struct raid_map_disk_data *dd = &map_buff->data[0]; 2217 int map, row, col; 2218 u16 map_cnt, row_cnt, disks_per_row; 2219 2220 if (rc != 0) 2221 return; 2222 2223 /* Show details only if debugging has been activated. */ 2224 if (h->raid_offload_debug < 2) 2225 return; 2226 2227 dev_info(&h->pdev->dev, "structure_size = %u\n", 2228 le32_to_cpu(map_buff->structure_size)); 2229 dev_info(&h->pdev->dev, "volume_blk_size = %u\n", 2230 le32_to_cpu(map_buff->volume_blk_size)); 2231 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", 2232 le64_to_cpu(map_buff->volume_blk_cnt)); 2233 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", 2234 map_buff->phys_blk_shift); 2235 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", 2236 map_buff->parity_rotation_shift); 2237 dev_info(&h->pdev->dev, "strip_size = %u\n", 2238 le16_to_cpu(map_buff->strip_size)); 2239 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", 2240 le64_to_cpu(map_buff->disk_starting_blk)); 2241 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", 2242 le64_to_cpu(map_buff->disk_blk_cnt)); 2243 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", 2244 le16_to_cpu(map_buff->data_disks_per_row)); 2245 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", 2246 le16_to_cpu(map_buff->metadata_disks_per_row)); 2247 dev_info(&h->pdev->dev, "row_cnt = %u\n", 2248 le16_to_cpu(map_buff->row_cnt)); 2249 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 2250 le16_to_cpu(map_buff->layout_map_count)); 2251 dev_info(&h->pdev->dev, "flags = %u\n", 2252 le16_to_cpu(map_buff->flags)); 2253 if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON) 2254 dev_info(&h->pdev->dev, "encrypytion = ON\n"); 2255 else 2256 dev_info(&h->pdev->dev, "encrypytion = OFF\n"); 2257 dev_info(&h->pdev->dev, "dekindex = %u\n", 2258 le16_to_cpu(map_buff->dekindex)); 2259 2260 map_cnt = le16_to_cpu(map_buff->layout_map_count); 2261 for (map = 0; map < map_cnt; map++) { 2262 dev_info(&h->pdev->dev, "Map%u:\n", map); 2263 row_cnt = le16_to_cpu(map_buff->row_cnt); 2264 for (row = 0; row < row_cnt; row++) { 2265 dev_info(&h->pdev->dev, " Row%u:\n", row); 2266 disks_per_row = 2267 le16_to_cpu(map_buff->data_disks_per_row); 2268 for (col = 0; col < disks_per_row; col++, dd++) 2269 dev_info(&h->pdev->dev, 2270 " D%02u: h=0x%04x xor=%u,%u\n", 2271 col, dd->ioaccel_handle, 2272 dd->xor_mult[0], dd->xor_mult[1]); 2273 disks_per_row = 2274 le16_to_cpu(map_buff->metadata_disks_per_row); 2275 for (col = 0; col < disks_per_row; col++, dd++) 2276 dev_info(&h->pdev->dev, 2277 " M%02u: h=0x%04x xor=%u,%u\n", 2278 col, dd->ioaccel_handle, 2279 dd->xor_mult[0], dd->xor_mult[1]); 2280 } 2281 } 2282 } 2283 #else 2284 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, 2285 __attribute__((unused)) int rc, 2286 __attribute__((unused)) struct raid_map_data *map_buff) 2287 { 2288 } 2289 #endif 2290 2291 static int hpsa_get_raid_map(struct ctlr_info *h, 2292 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2293 { 2294 int rc = 0; 2295 struct CommandList *c; 2296 struct ErrorInfo *ei; 2297 2298 c = cmd_special_alloc(h); 2299 if (c == NULL) { 2300 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2301 return -ENOMEM; 2302 } 2303 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, 2304 sizeof(this_device->raid_map), 0, 2305 scsi3addr, TYPE_CMD)) { 2306 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); 2307 cmd_special_free(h, c); 2308 return -ENOMEM; 2309 } 2310 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2311 ei = c->err_info; 2312 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2313 hpsa_scsi_interpret_error(h, c); 2314 cmd_special_free(h, c); 2315 return -1; 2316 } 2317 cmd_special_free(h, c); 2318 2319 /* @todo in the future, dynamically allocate RAID map memory */ 2320 if (le32_to_cpu(this_device->raid_map.structure_size) > 2321 sizeof(this_device->raid_map)) { 2322 dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); 2323 rc = -1; 2324 } 2325 hpsa_debug_map_buff(h, rc, &this_device->raid_map); 2326 return rc; 2327 } 2328 2329 static int hpsa_vpd_page_supported(struct ctlr_info *h, 2330 unsigned char scsi3addr[], u8 page) 2331 { 2332 int rc; 2333 int i; 2334 int pages; 2335 unsigned char *buf, bufsize; 2336 2337 buf = kzalloc(256, GFP_KERNEL); 2338 if (!buf) 2339 return 0; 2340 2341 /* Get the size of the page list first */ 2342 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2343 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2344 buf, HPSA_VPD_HEADER_SZ); 2345 if (rc != 0) 2346 goto exit_unsupported; 2347 pages = buf[3]; 2348 if ((pages + HPSA_VPD_HEADER_SZ) <= 255) 2349 bufsize = pages + HPSA_VPD_HEADER_SZ; 2350 else 2351 bufsize = 255; 2352 2353 /* Get the whole VPD page list */ 2354 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2355 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2356 buf, bufsize); 2357 if (rc != 0) 2358 goto exit_unsupported; 2359 2360 pages = buf[3]; 2361 for (i = 1; i <= pages; i++) 2362 if (buf[3 + i] == page) 2363 goto exit_supported; 2364 exit_unsupported: 2365 kfree(buf); 2366 return 0; 2367 exit_supported: 2368 kfree(buf); 2369 return 1; 2370 } 2371 2372 static void hpsa_get_ioaccel_status(struct ctlr_info *h, 2373 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2374 { 2375 int rc; 2376 unsigned char *buf; 2377 u8 ioaccel_status; 2378 2379 this_device->offload_config = 0; 2380 this_device->offload_enabled = 0; 2381 2382 buf = kzalloc(64, GFP_KERNEL); 2383 if (!buf) 2384 return; 2385 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) 2386 goto out; 2387 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2388 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); 2389 if (rc != 0) 2390 goto out; 2391 2392 #define IOACCEL_STATUS_BYTE 4 2393 #define OFFLOAD_CONFIGURED_BIT 0x01 2394 #define OFFLOAD_ENABLED_BIT 0x02 2395 ioaccel_status = buf[IOACCEL_STATUS_BYTE]; 2396 this_device->offload_config = 2397 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 2398 if (this_device->offload_config) { 2399 this_device->offload_enabled = 2400 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 2401 if (hpsa_get_raid_map(h, scsi3addr, this_device)) 2402 this_device->offload_enabled = 0; 2403 } 2404 out: 2405 kfree(buf); 2406 return; 2407 } 2408 2409 /* Get the device id from inquiry page 0x83 */ 2410 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 2411 unsigned char *device_id, int buflen) 2412 { 2413 int rc; 2414 unsigned char *buf; 2415 2416 if (buflen > 16) 2417 buflen = 16; 2418 buf = kzalloc(64, GFP_KERNEL); 2419 if (!buf) 2420 return -1; 2421 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 2422 if (rc == 0) 2423 memcpy(device_id, &buf[8], buflen); 2424 kfree(buf); 2425 return rc != 0; 2426 } 2427 2428 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 2429 struct ReportLUNdata *buf, int bufsize, 2430 int extended_response) 2431 { 2432 int rc = IO_OK; 2433 struct CommandList *c; 2434 unsigned char scsi3addr[8]; 2435 struct ErrorInfo *ei; 2436 2437 c = cmd_special_alloc(h); 2438 if (c == NULL) { /* trouble... */ 2439 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2440 return -1; 2441 } 2442 /* address the controller */ 2443 memset(scsi3addr, 0, sizeof(scsi3addr)); 2444 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 2445 buf, bufsize, 0, scsi3addr, TYPE_CMD)) { 2446 rc = -1; 2447 goto out; 2448 } 2449 if (extended_response) 2450 c->Request.CDB[1] = extended_response; 2451 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2452 ei = c->err_info; 2453 if (ei->CommandStatus != 0 && 2454 ei->CommandStatus != CMD_DATA_UNDERRUN) { 2455 hpsa_scsi_interpret_error(h, c); 2456 rc = -1; 2457 } else { 2458 if (buf->extended_response_flag != extended_response) { 2459 dev_err(&h->pdev->dev, 2460 "report luns requested format %u, got %u\n", 2461 extended_response, 2462 buf->extended_response_flag); 2463 rc = -1; 2464 } 2465 } 2466 out: 2467 cmd_special_free(h, c); 2468 return rc; 2469 } 2470 2471 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 2472 struct ReportLUNdata *buf, 2473 int bufsize, int extended_response) 2474 { 2475 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 2476 } 2477 2478 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 2479 struct ReportLUNdata *buf, int bufsize) 2480 { 2481 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 2482 } 2483 2484 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 2485 int bus, int target, int lun) 2486 { 2487 device->bus = bus; 2488 device->target = target; 2489 device->lun = lun; 2490 } 2491 2492 /* Use VPD inquiry to get details of volume status */ 2493 static int hpsa_get_volume_status(struct ctlr_info *h, 2494 unsigned char scsi3addr[]) 2495 { 2496 int rc; 2497 int status; 2498 int size; 2499 unsigned char *buf; 2500 2501 buf = kzalloc(64, GFP_KERNEL); 2502 if (!buf) 2503 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2504 2505 /* Does controller have VPD for logical volume status? */ 2506 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) { 2507 dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n"); 2508 goto exit_failed; 2509 } 2510 2511 /* Get the size of the VPD return buffer */ 2512 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2513 buf, HPSA_VPD_HEADER_SZ); 2514 if (rc != 0) { 2515 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n"); 2516 goto exit_failed; 2517 } 2518 size = buf[3]; 2519 2520 /* Now get the whole VPD buffer */ 2521 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2522 buf, size + HPSA_VPD_HEADER_SZ); 2523 if (rc != 0) { 2524 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n"); 2525 goto exit_failed; 2526 } 2527 status = buf[4]; /* status byte */ 2528 2529 kfree(buf); 2530 return status; 2531 exit_failed: 2532 kfree(buf); 2533 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2534 } 2535 2536 /* Determine offline status of a volume. 2537 * Return either: 2538 * 0 (not offline) 2539 * -1 (offline for unknown reasons) 2540 * # (integer code indicating one of several NOT READY states 2541 * describing why a volume is to be kept offline) 2542 */ 2543 static unsigned char hpsa_volume_offline(struct ctlr_info *h, 2544 unsigned char scsi3addr[]) 2545 { 2546 struct CommandList *c; 2547 unsigned char *sense, sense_key, asc, ascq; 2548 int ldstat = 0; 2549 u16 cmd_status; 2550 u8 scsi_status; 2551 #define ASC_LUN_NOT_READY 0x04 2552 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 2553 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 2554 2555 c = cmd_alloc(h); 2556 if (!c) 2557 return 0; 2558 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); 2559 hpsa_scsi_do_simple_cmd_core(h, c); 2560 sense = c->err_info->SenseInfo; 2561 sense_key = sense[2]; 2562 asc = sense[12]; 2563 ascq = sense[13]; 2564 cmd_status = c->err_info->CommandStatus; 2565 scsi_status = c->err_info->ScsiStatus; 2566 cmd_free(h, c); 2567 /* Is the volume 'not ready'? */ 2568 if (cmd_status != CMD_TARGET_STATUS || 2569 scsi_status != SAM_STAT_CHECK_CONDITION || 2570 sense_key != NOT_READY || 2571 asc != ASC_LUN_NOT_READY) { 2572 return 0; 2573 } 2574 2575 /* Determine the reason for not ready state */ 2576 ldstat = hpsa_get_volume_status(h, scsi3addr); 2577 2578 /* Keep volume offline in certain cases: */ 2579 switch (ldstat) { 2580 case HPSA_LV_UNDERGOING_ERASE: 2581 case HPSA_LV_UNDERGOING_RPI: 2582 case HPSA_LV_PENDING_RPI: 2583 case HPSA_LV_ENCRYPTED_NO_KEY: 2584 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 2585 case HPSA_LV_UNDERGOING_ENCRYPTION: 2586 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 2587 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 2588 return ldstat; 2589 case HPSA_VPD_LV_STATUS_UNSUPPORTED: 2590 /* If VPD status page isn't available, 2591 * use ASC/ASCQ to determine state 2592 */ 2593 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || 2594 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) 2595 return ldstat; 2596 break; 2597 default: 2598 break; 2599 } 2600 return 0; 2601 } 2602 2603 static int hpsa_update_device_info(struct ctlr_info *h, 2604 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 2605 unsigned char *is_OBDR_device) 2606 { 2607 2608 #define OBDR_SIG_OFFSET 43 2609 #define OBDR_TAPE_SIG "$DR-10" 2610 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 2611 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 2612 2613 unsigned char *inq_buff; 2614 unsigned char *obdr_sig; 2615 2616 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 2617 if (!inq_buff) 2618 goto bail_out; 2619 2620 /* Do an inquiry to the device to see what it is. */ 2621 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 2622 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 2623 /* Inquiry failed (msg printed already) */ 2624 dev_err(&h->pdev->dev, 2625 "hpsa_update_device_info: inquiry failed\n"); 2626 goto bail_out; 2627 } 2628 2629 this_device->devtype = (inq_buff[0] & 0x1f); 2630 memcpy(this_device->scsi3addr, scsi3addr, 8); 2631 memcpy(this_device->vendor, &inq_buff[8], 2632 sizeof(this_device->vendor)); 2633 memcpy(this_device->model, &inq_buff[16], 2634 sizeof(this_device->model)); 2635 memset(this_device->device_id, 0, 2636 sizeof(this_device->device_id)); 2637 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 2638 sizeof(this_device->device_id)); 2639 2640 if (this_device->devtype == TYPE_DISK && 2641 is_logical_dev_addr_mode(scsi3addr)) { 2642 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 2643 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 2644 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 2645 this_device->volume_offline = 2646 hpsa_volume_offline(h, scsi3addr); 2647 } else { 2648 this_device->raid_level = RAID_UNKNOWN; 2649 this_device->offload_config = 0; 2650 this_device->offload_enabled = 0; 2651 this_device->volume_offline = 0; 2652 } 2653 2654 if (is_OBDR_device) { 2655 /* See if this is a One-Button-Disaster-Recovery device 2656 * by looking for "$DR-10" at offset 43 in inquiry data. 2657 */ 2658 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 2659 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 2660 strncmp(obdr_sig, OBDR_TAPE_SIG, 2661 OBDR_SIG_LEN) == 0); 2662 } 2663 2664 kfree(inq_buff); 2665 return 0; 2666 2667 bail_out: 2668 kfree(inq_buff); 2669 return 1; 2670 } 2671 2672 static unsigned char *ext_target_model[] = { 2673 "MSA2012", 2674 "MSA2024", 2675 "MSA2312", 2676 "MSA2324", 2677 "P2000 G3 SAS", 2678 "MSA 2040 SAS", 2679 NULL, 2680 }; 2681 2682 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 2683 { 2684 int i; 2685 2686 for (i = 0; ext_target_model[i]; i++) 2687 if (strncmp(device->model, ext_target_model[i], 2688 strlen(ext_target_model[i])) == 0) 2689 return 1; 2690 return 0; 2691 } 2692 2693 /* Helper function to assign bus, target, lun mapping of devices. 2694 * Puts non-external target logical volumes on bus 0, external target logical 2695 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 2696 * Logical drive target and lun are assigned at this time, but 2697 * physical device lun and target assignment are deferred (assigned 2698 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 2699 */ 2700 static void figure_bus_target_lun(struct ctlr_info *h, 2701 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) 2702 { 2703 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 2704 2705 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 2706 /* physical device, target and lun filled in later */ 2707 if (is_hba_lunid(lunaddrbytes)) 2708 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff); 2709 else 2710 /* defer target, lun assignment for physical devices */ 2711 hpsa_set_bus_target_lun(device, 2, -1, -1); 2712 return; 2713 } 2714 /* It's a logical device */ 2715 if (is_ext_target(h, device)) { 2716 /* external target way, put logicals on bus 1 2717 * and match target/lun numbers box 2718 * reports, other smart array, bus 0, target 0, match lunid 2719 */ 2720 hpsa_set_bus_target_lun(device, 2721 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff); 2722 return; 2723 } 2724 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff); 2725 } 2726 2727 /* 2728 * If there is no lun 0 on a target, linux won't find any devices. 2729 * For the external targets (arrays), we have to manually detect the enclosure 2730 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 2731 * it for some reason. *tmpdevice is the target we're adding, 2732 * this_device is a pointer into the current element of currentsd[] 2733 * that we're building up in update_scsi_devices(), below. 2734 * lunzerobits is a bitmap that tracks which targets already have a 2735 * lun 0 assigned. 2736 * Returns 1 if an enclosure was added, 0 if not. 2737 */ 2738 static int add_ext_target_dev(struct ctlr_info *h, 2739 struct hpsa_scsi_dev_t *tmpdevice, 2740 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 2741 unsigned long lunzerobits[], int *n_ext_target_devs) 2742 { 2743 unsigned char scsi3addr[8]; 2744 2745 if (test_bit(tmpdevice->target, lunzerobits)) 2746 return 0; /* There is already a lun 0 on this target. */ 2747 2748 if (!is_logical_dev_addr_mode(lunaddrbytes)) 2749 return 0; /* It's the logical targets that may lack lun 0. */ 2750 2751 if (!is_ext_target(h, tmpdevice)) 2752 return 0; /* Only external target devices have this problem. */ 2753 2754 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */ 2755 return 0; 2756 2757 memset(scsi3addr, 0, 8); 2758 scsi3addr[3] = tmpdevice->target; 2759 if (is_hba_lunid(scsi3addr)) 2760 return 0; /* Don't add the RAID controller here. */ 2761 2762 if (is_scsi_rev_5(h)) 2763 return 0; /* p1210m doesn't need to do this. */ 2764 2765 if (*n_ext_target_devs >= MAX_EXT_TARGETS) { 2766 dev_warn(&h->pdev->dev, "Maximum number of external " 2767 "target devices exceeded. Check your hardware " 2768 "configuration."); 2769 return 0; 2770 } 2771 2772 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 2773 return 0; 2774 (*n_ext_target_devs)++; 2775 hpsa_set_bus_target_lun(this_device, 2776 tmpdevice->bus, tmpdevice->target, 0); 2777 set_bit(tmpdevice->target, lunzerobits); 2778 return 1; 2779 } 2780 2781 /* 2782 * Get address of physical disk used for an ioaccel2 mode command: 2783 * 1. Extract ioaccel2 handle from the command. 2784 * 2. Find a matching ioaccel2 handle from list of physical disks. 2785 * 3. Return: 2786 * 1 and set scsi3addr to address of matching physical 2787 * 0 if no matching physical disk was found. 2788 */ 2789 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, 2790 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) 2791 { 2792 struct ReportExtendedLUNdata *physicals = NULL; 2793 int responsesize = 24; /* size of physical extended response */ 2794 int extended = 2; /* flag forces reporting 'other dev info'. */ 2795 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; 2796 u32 nphysicals = 0; /* number of reported physical devs */ 2797 int found = 0; /* found match (1) or not (0) */ 2798 u32 find; /* handle we need to match */ 2799 int i; 2800 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 2801 struct hpsa_scsi_dev_t *d; /* device of request being aborted */ 2802 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ 2803 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2804 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2805 2806 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) 2807 return 0; /* no match */ 2808 2809 /* point to the ioaccel2 device handle */ 2810 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; 2811 if (c2a == NULL) 2812 return 0; /* no match */ 2813 2814 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; 2815 if (scmd == NULL) 2816 return 0; /* no match */ 2817 2818 d = scmd->device->hostdata; 2819 if (d == NULL) 2820 return 0; /* no match */ 2821 2822 it_nexus = cpu_to_le32((u32) d->ioaccel_handle); 2823 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus); 2824 find = c2a->scsi_nexus; 2825 2826 if (h->raid_offload_debug > 0) 2827 dev_info(&h->pdev->dev, 2828 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", 2829 __func__, scsi_nexus, 2830 d->device_id[0], d->device_id[1], d->device_id[2], 2831 d->device_id[3], d->device_id[4], d->device_id[5], 2832 d->device_id[6], d->device_id[7], d->device_id[8], 2833 d->device_id[9], d->device_id[10], d->device_id[11], 2834 d->device_id[12], d->device_id[13], d->device_id[14], 2835 d->device_id[15]); 2836 2837 /* Get the list of physical devices */ 2838 physicals = kzalloc(reportsize, GFP_KERNEL); 2839 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, 2840 reportsize, extended)) { 2841 dev_err(&h->pdev->dev, 2842 "Can't lookup %s device handle: report physical LUNs failed.\n", 2843 "HP SSD Smart Path"); 2844 kfree(physicals); 2845 return 0; 2846 } 2847 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / 2848 responsesize; 2849 2850 2851 /* find ioaccel2 handle in list of physicals: */ 2852 for (i = 0; i < nphysicals; i++) { 2853 /* handle is in bytes 28-31 of each lun */ 2854 if (memcmp(&((struct ReportExtendedLUNdata *) 2855 physicals)->LUN[i][20], &find, 4) != 0) { 2856 continue; /* didn't match */ 2857 } 2858 found = 1; 2859 memcpy(scsi3addr, &((struct ReportExtendedLUNdata *) 2860 physicals)->LUN[i][0], 8); 2861 if (h->raid_offload_debug > 0) 2862 dev_info(&h->pdev->dev, 2863 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 2864 __func__, find, 2865 ((struct ReportExtendedLUNdata *) 2866 physicals)->LUN[i][20], 2867 scsi3addr[0], scsi3addr[1], scsi3addr[2], 2868 scsi3addr[3], scsi3addr[4], scsi3addr[5], 2869 scsi3addr[6], scsi3addr[7]); 2870 break; /* found it */ 2871 } 2872 2873 kfree(physicals); 2874 if (found) 2875 return 1; 2876 else 2877 return 0; 2878 2879 } 2880 /* 2881 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 2882 * logdev. The number of luns in physdev and logdev are returned in 2883 * *nphysicals and *nlogicals, respectively. 2884 * Returns 0 on success, -1 otherwise. 2885 */ 2886 static int hpsa_gather_lun_info(struct ctlr_info *h, 2887 int reportlunsize, 2888 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, 2889 struct ReportLUNdata *logdev, u32 *nlogicals) 2890 { 2891 int physical_entry_size = 8; 2892 2893 *physical_mode = 0; 2894 2895 /* For I/O accelerator mode we need to read physical device handles */ 2896 if (h->transMethod & CFGTBL_Trans_io_accel1 || 2897 h->transMethod & CFGTBL_Trans_io_accel2) { 2898 *physical_mode = HPSA_REPORT_PHYS_EXTENDED; 2899 physical_entry_size = 24; 2900 } 2901 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 2902 *physical_mode)) { 2903 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 2904 return -1; 2905 } 2906 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 2907 physical_entry_size; 2908 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 2909 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 2910 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2911 *nphysicals - HPSA_MAX_PHYS_LUN); 2912 *nphysicals = HPSA_MAX_PHYS_LUN; 2913 } 2914 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { 2915 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 2916 return -1; 2917 } 2918 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 2919 /* Reject Logicals in excess of our max capability. */ 2920 if (*nlogicals > HPSA_MAX_LUN) { 2921 dev_warn(&h->pdev->dev, 2922 "maximum logical LUNs (%d) exceeded. " 2923 "%d LUNs ignored.\n", HPSA_MAX_LUN, 2924 *nlogicals - HPSA_MAX_LUN); 2925 *nlogicals = HPSA_MAX_LUN; 2926 } 2927 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 2928 dev_warn(&h->pdev->dev, 2929 "maximum logical + physical LUNs (%d) exceeded. " 2930 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2931 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 2932 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 2933 } 2934 return 0; 2935 } 2936 2937 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 2938 int nphysicals, int nlogicals, 2939 struct ReportExtendedLUNdata *physdev_list, 2940 struct ReportLUNdata *logdev_list) 2941 { 2942 /* Helper function, figure out where the LUN ID info is coming from 2943 * given index i, lists of physical and logical devices, where in 2944 * the list the raid controller is supposed to appear (first or last) 2945 */ 2946 2947 int logicals_start = nphysicals + (raid_ctlr_position == 0); 2948 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 2949 2950 if (i == raid_ctlr_position) 2951 return RAID_CTLR_LUNID; 2952 2953 if (i < logicals_start) 2954 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 2955 2956 if (i < last_device) 2957 return &logdev_list->LUN[i - nphysicals - 2958 (raid_ctlr_position == 0)][0]; 2959 BUG(); 2960 return NULL; 2961 } 2962 2963 static int hpsa_hba_mode_enabled(struct ctlr_info *h) 2964 { 2965 int rc; 2966 struct bmic_controller_parameters *ctlr_params; 2967 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), 2968 GFP_KERNEL); 2969 2970 if (!ctlr_params) 2971 return 0; 2972 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, 2973 sizeof(struct bmic_controller_parameters)); 2974 if (rc != 0) { 2975 kfree(ctlr_params); 2976 return 0; 2977 } 2978 return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0; 2979 } 2980 2981 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 2982 { 2983 /* the idea here is we could get notified 2984 * that some devices have changed, so we do a report 2985 * physical luns and report logical luns cmd, and adjust 2986 * our list of devices accordingly. 2987 * 2988 * The scsi3addr's of devices won't change so long as the 2989 * adapter is not reset. That means we can rescan and 2990 * tell which devices we already know about, vs. new 2991 * devices, vs. disappearing devices. 2992 */ 2993 struct ReportExtendedLUNdata *physdev_list = NULL; 2994 struct ReportLUNdata *logdev_list = NULL; 2995 u32 nphysicals = 0; 2996 u32 nlogicals = 0; 2997 int physical_mode = 0; 2998 u32 ndev_allocated = 0; 2999 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 3000 int ncurrent = 0; 3001 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; 3002 int i, n_ext_target_devs, ndevs_to_allocate; 3003 int raid_ctlr_position; 3004 u8 rescan_hba_mode; 3005 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 3006 3007 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 3008 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 3009 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 3010 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 3011 3012 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 3013 dev_err(&h->pdev->dev, "out of memory\n"); 3014 goto out; 3015 } 3016 memset(lunzerobits, 0, sizeof(lunzerobits)); 3017 3018 rescan_hba_mode = hpsa_hba_mode_enabled(h); 3019 3020 if (!h->hba_mode_enabled && rescan_hba_mode) 3021 dev_warn(&h->pdev->dev, "HBA mode enabled\n"); 3022 else if (h->hba_mode_enabled && !rescan_hba_mode) 3023 dev_warn(&h->pdev->dev, "HBA mode disabled\n"); 3024 3025 h->hba_mode_enabled = rescan_hba_mode; 3026 3027 if (hpsa_gather_lun_info(h, reportlunsize, 3028 (struct ReportLUNdata *) physdev_list, &nphysicals, 3029 &physical_mode, logdev_list, &nlogicals)) 3030 goto out; 3031 3032 /* We might see up to the maximum number of logical and physical disks 3033 * plus external target devices, and a device for the local RAID 3034 * controller. 3035 */ 3036 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; 3037 3038 /* Allocate the per device structures */ 3039 for (i = 0; i < ndevs_to_allocate; i++) { 3040 if (i >= HPSA_MAX_DEVICES) { 3041 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." 3042 " %d devices ignored.\n", HPSA_MAX_DEVICES, 3043 ndevs_to_allocate - HPSA_MAX_DEVICES); 3044 break; 3045 } 3046 3047 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 3048 if (!currentsd[i]) { 3049 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 3050 __FILE__, __LINE__); 3051 goto out; 3052 } 3053 ndev_allocated++; 3054 } 3055 3056 if (unlikely(is_scsi_rev_5(h))) 3057 raid_ctlr_position = 0; 3058 else 3059 raid_ctlr_position = nphysicals + nlogicals; 3060 3061 /* adjust our table of devices */ 3062 n_ext_target_devs = 0; 3063 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 3064 u8 *lunaddrbytes, is_OBDR = 0; 3065 3066 /* Figure out where the LUN ID info is coming from */ 3067 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 3068 i, nphysicals, nlogicals, physdev_list, logdev_list); 3069 /* skip masked physical devices. */ 3070 if (lunaddrbytes[3] & 0xC0 && 3071 i < nphysicals + (raid_ctlr_position == 0)) 3072 continue; 3073 3074 /* Get device type, vendor, model, device id */ 3075 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 3076 &is_OBDR)) 3077 continue; /* skip it if we can't talk to it. */ 3078 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 3079 this_device = currentsd[ncurrent]; 3080 3081 /* 3082 * For external target devices, we have to insert a LUN 0 which 3083 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 3084 * is nonetheless an enclosure device there. We have to 3085 * present that otherwise linux won't find anything if 3086 * there is no lun 0. 3087 */ 3088 if (add_ext_target_dev(h, tmpdevice, this_device, 3089 lunaddrbytes, lunzerobits, 3090 &n_ext_target_devs)) { 3091 ncurrent++; 3092 this_device = currentsd[ncurrent]; 3093 } 3094 3095 *this_device = *tmpdevice; 3096 3097 switch (this_device->devtype) { 3098 case TYPE_ROM: 3099 /* We don't *really* support actual CD-ROM devices, 3100 * just "One Button Disaster Recovery" tape drive 3101 * which temporarily pretends to be a CD-ROM drive. 3102 * So we check that the device is really an OBDR tape 3103 * device by checking for "$DR-10" in bytes 43-48 of 3104 * the inquiry data. 3105 */ 3106 if (is_OBDR) 3107 ncurrent++; 3108 break; 3109 case TYPE_DISK: 3110 if (h->hba_mode_enabled) { 3111 /* never use raid mapper in HBA mode */ 3112 this_device->offload_enabled = 0; 3113 ncurrent++; 3114 break; 3115 } else if (h->acciopath_status) { 3116 if (i >= nphysicals) { 3117 ncurrent++; 3118 break; 3119 } 3120 } else { 3121 if (i < nphysicals) 3122 break; 3123 ncurrent++; 3124 break; 3125 } 3126 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { 3127 memcpy(&this_device->ioaccel_handle, 3128 &lunaddrbytes[20], 3129 sizeof(this_device->ioaccel_handle)); 3130 ncurrent++; 3131 } 3132 break; 3133 case TYPE_TAPE: 3134 case TYPE_MEDIUM_CHANGER: 3135 ncurrent++; 3136 break; 3137 case TYPE_RAID: 3138 /* Only present the Smartarray HBA as a RAID controller. 3139 * If it's a RAID controller other than the HBA itself 3140 * (an external RAID controller, MSA500 or similar) 3141 * don't present it. 3142 */ 3143 if (!is_hba_lunid(lunaddrbytes)) 3144 break; 3145 ncurrent++; 3146 break; 3147 default: 3148 break; 3149 } 3150 if (ncurrent >= HPSA_MAX_DEVICES) 3151 break; 3152 } 3153 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 3154 out: 3155 kfree(tmpdevice); 3156 for (i = 0; i < ndev_allocated; i++) 3157 kfree(currentsd[i]); 3158 kfree(currentsd); 3159 kfree(physdev_list); 3160 kfree(logdev_list); 3161 } 3162 3163 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 3164 * dma mapping and fills in the scatter gather entries of the 3165 * hpsa command, cp. 3166 */ 3167 static int hpsa_scatter_gather(struct ctlr_info *h, 3168 struct CommandList *cp, 3169 struct scsi_cmnd *cmd) 3170 { 3171 unsigned int len; 3172 struct scatterlist *sg; 3173 u64 addr64; 3174 int use_sg, i, sg_index, chained; 3175 struct SGDescriptor *curr_sg; 3176 3177 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 3178 3179 use_sg = scsi_dma_map(cmd); 3180 if (use_sg < 0) 3181 return use_sg; 3182 3183 if (!use_sg) 3184 goto sglist_finished; 3185 3186 curr_sg = cp->SG; 3187 chained = 0; 3188 sg_index = 0; 3189 scsi_for_each_sg(cmd, sg, use_sg, i) { 3190 if (i == h->max_cmd_sg_entries - 1 && 3191 use_sg > h->max_cmd_sg_entries) { 3192 chained = 1; 3193 curr_sg = h->cmd_sg_list[cp->cmdindex]; 3194 sg_index = 0; 3195 } 3196 addr64 = (u64) sg_dma_address(sg); 3197 len = sg_dma_len(sg); 3198 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 3199 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 3200 curr_sg->Len = len; 3201 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST; 3202 curr_sg++; 3203 } 3204 3205 if (use_sg + chained > h->maxSG) 3206 h->maxSG = use_sg + chained; 3207 3208 if (chained) { 3209 cp->Header.SGList = h->max_cmd_sg_entries; 3210 cp->Header.SGTotal = (u16) (use_sg + 1); 3211 if (hpsa_map_sg_chain_block(h, cp)) { 3212 scsi_dma_unmap(cmd); 3213 return -1; 3214 } 3215 return 0; 3216 } 3217 3218 sglist_finished: 3219 3220 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 3221 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ 3222 return 0; 3223 } 3224 3225 #define IO_ACCEL_INELIGIBLE (1) 3226 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) 3227 { 3228 int is_write = 0; 3229 u32 block; 3230 u32 block_cnt; 3231 3232 /* Perform some CDB fixups if needed using 10 byte reads/writes only */ 3233 switch (cdb[0]) { 3234 case WRITE_6: 3235 case WRITE_12: 3236 is_write = 1; 3237 case READ_6: 3238 case READ_12: 3239 if (*cdb_len == 6) { 3240 block = (((u32) cdb[2]) << 8) | cdb[3]; 3241 block_cnt = cdb[4]; 3242 } else { 3243 BUG_ON(*cdb_len != 12); 3244 block = (((u32) cdb[2]) << 24) | 3245 (((u32) cdb[3]) << 16) | 3246 (((u32) cdb[4]) << 8) | 3247 cdb[5]; 3248 block_cnt = 3249 (((u32) cdb[6]) << 24) | 3250 (((u32) cdb[7]) << 16) | 3251 (((u32) cdb[8]) << 8) | 3252 cdb[9]; 3253 } 3254 if (block_cnt > 0xffff) 3255 return IO_ACCEL_INELIGIBLE; 3256 3257 cdb[0] = is_write ? WRITE_10 : READ_10; 3258 cdb[1] = 0; 3259 cdb[2] = (u8) (block >> 24); 3260 cdb[3] = (u8) (block >> 16); 3261 cdb[4] = (u8) (block >> 8); 3262 cdb[5] = (u8) (block); 3263 cdb[6] = 0; 3264 cdb[7] = (u8) (block_cnt >> 8); 3265 cdb[8] = (u8) (block_cnt); 3266 cdb[9] = 0; 3267 *cdb_len = 10; 3268 break; 3269 } 3270 return 0; 3271 } 3272 3273 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, 3274 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3275 u8 *scsi3addr) 3276 { 3277 struct scsi_cmnd *cmd = c->scsi_cmd; 3278 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 3279 unsigned int len; 3280 unsigned int total_len = 0; 3281 struct scatterlist *sg; 3282 u64 addr64; 3283 int use_sg, i; 3284 struct SGDescriptor *curr_sg; 3285 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; 3286 3287 /* TODO: implement chaining support */ 3288 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 3289 return IO_ACCEL_INELIGIBLE; 3290 3291 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 3292 3293 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 3294 return IO_ACCEL_INELIGIBLE; 3295 3296 c->cmd_type = CMD_IOACCEL1; 3297 3298 /* Adjust the DMA address to point to the accelerated command buffer */ 3299 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + 3300 (c->cmdindex * sizeof(*cp)); 3301 BUG_ON(c->busaddr & 0x0000007F); 3302 3303 use_sg = scsi_dma_map(cmd); 3304 if (use_sg < 0) 3305 return use_sg; 3306 3307 if (use_sg) { 3308 curr_sg = cp->SG; 3309 scsi_for_each_sg(cmd, sg, use_sg, i) { 3310 addr64 = (u64) sg_dma_address(sg); 3311 len = sg_dma_len(sg); 3312 total_len += len; 3313 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 3314 curr_sg->Addr.upper = 3315 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 3316 curr_sg->Len = len; 3317 3318 if (i == (scsi_sg_count(cmd) - 1)) 3319 curr_sg->Ext = HPSA_SG_LAST; 3320 else 3321 curr_sg->Ext = 0; /* we are not chaining */ 3322 curr_sg++; 3323 } 3324 3325 switch (cmd->sc_data_direction) { 3326 case DMA_TO_DEVICE: 3327 control |= IOACCEL1_CONTROL_DATA_OUT; 3328 break; 3329 case DMA_FROM_DEVICE: 3330 control |= IOACCEL1_CONTROL_DATA_IN; 3331 break; 3332 case DMA_NONE: 3333 control |= IOACCEL1_CONTROL_NODATAXFER; 3334 break; 3335 default: 3336 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3337 cmd->sc_data_direction); 3338 BUG(); 3339 break; 3340 } 3341 } else { 3342 control |= IOACCEL1_CONTROL_NODATAXFER; 3343 } 3344 3345 c->Header.SGList = use_sg; 3346 /* Fill out the command structure to submit */ 3347 cp->dev_handle = ioaccel_handle & 0xFFFF; 3348 cp->transfer_len = total_len; 3349 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ | 3350 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK); 3351 cp->control = control; 3352 memcpy(cp->CDB, cdb, cdb_len); 3353 memcpy(cp->CISS_LUN, scsi3addr, 8); 3354 /* Tag was already set at init time. */ 3355 enqueue_cmd_and_start_io(h, c); 3356 return 0; 3357 } 3358 3359 /* 3360 * Queue a command directly to a device behind the controller using the 3361 * I/O accelerator path. 3362 */ 3363 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, 3364 struct CommandList *c) 3365 { 3366 struct scsi_cmnd *cmd = c->scsi_cmd; 3367 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3368 3369 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, 3370 cmd->cmnd, cmd->cmd_len, dev->scsi3addr); 3371 } 3372 3373 /* 3374 * Set encryption parameters for the ioaccel2 request 3375 */ 3376 static void set_encrypt_ioaccel2(struct ctlr_info *h, 3377 struct CommandList *c, struct io_accel2_cmd *cp) 3378 { 3379 struct scsi_cmnd *cmd = c->scsi_cmd; 3380 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3381 struct raid_map_data *map = &dev->raid_map; 3382 u64 first_block; 3383 3384 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3385 3386 /* Are we doing encryption on this device */ 3387 if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON)) 3388 return; 3389 /* Set the data encryption key index. */ 3390 cp->dekindex = map->dekindex; 3391 3392 /* Set the encryption enable flag, encoded into direction field. */ 3393 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; 3394 3395 /* Set encryption tweak values based on logical block address 3396 * If block size is 512, tweak value is LBA. 3397 * For other block sizes, tweak is (LBA * block size)/ 512) 3398 */ 3399 switch (cmd->cmnd[0]) { 3400 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ 3401 case WRITE_6: 3402 case READ_6: 3403 if (map->volume_blk_size == 512) { 3404 cp->tweak_lower = 3405 (((u32) cmd->cmnd[2]) << 8) | 3406 cmd->cmnd[3]; 3407 cp->tweak_upper = 0; 3408 } else { 3409 first_block = 3410 (((u64) cmd->cmnd[2]) << 8) | 3411 cmd->cmnd[3]; 3412 first_block = (first_block * map->volume_blk_size)/512; 3413 cp->tweak_lower = (u32)first_block; 3414 cp->tweak_upper = (u32)(first_block >> 32); 3415 } 3416 break; 3417 case WRITE_10: 3418 case READ_10: 3419 if (map->volume_blk_size == 512) { 3420 cp->tweak_lower = 3421 (((u32) cmd->cmnd[2]) << 24) | 3422 (((u32) cmd->cmnd[3]) << 16) | 3423 (((u32) cmd->cmnd[4]) << 8) | 3424 cmd->cmnd[5]; 3425 cp->tweak_upper = 0; 3426 } else { 3427 first_block = 3428 (((u64) cmd->cmnd[2]) << 24) | 3429 (((u64) cmd->cmnd[3]) << 16) | 3430 (((u64) cmd->cmnd[4]) << 8) | 3431 cmd->cmnd[5]; 3432 first_block = (first_block * map->volume_blk_size)/512; 3433 cp->tweak_lower = (u32)first_block; 3434 cp->tweak_upper = (u32)(first_block >> 32); 3435 } 3436 break; 3437 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ 3438 case WRITE_12: 3439 case READ_12: 3440 if (map->volume_blk_size == 512) { 3441 cp->tweak_lower = 3442 (((u32) cmd->cmnd[2]) << 24) | 3443 (((u32) cmd->cmnd[3]) << 16) | 3444 (((u32) cmd->cmnd[4]) << 8) | 3445 cmd->cmnd[5]; 3446 cp->tweak_upper = 0; 3447 } else { 3448 first_block = 3449 (((u64) cmd->cmnd[2]) << 24) | 3450 (((u64) cmd->cmnd[3]) << 16) | 3451 (((u64) cmd->cmnd[4]) << 8) | 3452 cmd->cmnd[5]; 3453 first_block = (first_block * map->volume_blk_size)/512; 3454 cp->tweak_lower = (u32)first_block; 3455 cp->tweak_upper = (u32)(first_block >> 32); 3456 } 3457 break; 3458 case WRITE_16: 3459 case READ_16: 3460 if (map->volume_blk_size == 512) { 3461 cp->tweak_lower = 3462 (((u32) cmd->cmnd[6]) << 24) | 3463 (((u32) cmd->cmnd[7]) << 16) | 3464 (((u32) cmd->cmnd[8]) << 8) | 3465 cmd->cmnd[9]; 3466 cp->tweak_upper = 3467 (((u32) cmd->cmnd[2]) << 24) | 3468 (((u32) cmd->cmnd[3]) << 16) | 3469 (((u32) cmd->cmnd[4]) << 8) | 3470 cmd->cmnd[5]; 3471 } else { 3472 first_block = 3473 (((u64) cmd->cmnd[2]) << 56) | 3474 (((u64) cmd->cmnd[3]) << 48) | 3475 (((u64) cmd->cmnd[4]) << 40) | 3476 (((u64) cmd->cmnd[5]) << 32) | 3477 (((u64) cmd->cmnd[6]) << 24) | 3478 (((u64) cmd->cmnd[7]) << 16) | 3479 (((u64) cmd->cmnd[8]) << 8) | 3480 cmd->cmnd[9]; 3481 first_block = (first_block * map->volume_blk_size)/512; 3482 cp->tweak_lower = (u32)first_block; 3483 cp->tweak_upper = (u32)(first_block >> 32); 3484 } 3485 break; 3486 default: 3487 dev_err(&h->pdev->dev, 3488 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n", 3489 __func__); 3490 BUG(); 3491 break; 3492 } 3493 } 3494 3495 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 3496 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3497 u8 *scsi3addr) 3498 { 3499 struct scsi_cmnd *cmd = c->scsi_cmd; 3500 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 3501 struct ioaccel2_sg_element *curr_sg; 3502 int use_sg, i; 3503 struct scatterlist *sg; 3504 u64 addr64; 3505 u32 len; 3506 u32 total_len = 0; 3507 3508 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 3509 return IO_ACCEL_INELIGIBLE; 3510 3511 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 3512 return IO_ACCEL_INELIGIBLE; 3513 c->cmd_type = CMD_IOACCEL2; 3514 /* Adjust the DMA address to point to the accelerated command buffer */ 3515 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 3516 (c->cmdindex * sizeof(*cp)); 3517 BUG_ON(c->busaddr & 0x0000007F); 3518 3519 memset(cp, 0, sizeof(*cp)); 3520 cp->IU_type = IOACCEL2_IU_TYPE; 3521 3522 use_sg = scsi_dma_map(cmd); 3523 if (use_sg < 0) 3524 return use_sg; 3525 3526 if (use_sg) { 3527 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); 3528 curr_sg = cp->sg; 3529 scsi_for_each_sg(cmd, sg, use_sg, i) { 3530 addr64 = (u64) sg_dma_address(sg); 3531 len = sg_dma_len(sg); 3532 total_len += len; 3533 curr_sg->address = cpu_to_le64(addr64); 3534 curr_sg->length = cpu_to_le32(len); 3535 curr_sg->reserved[0] = 0; 3536 curr_sg->reserved[1] = 0; 3537 curr_sg->reserved[2] = 0; 3538 curr_sg->chain_indicator = 0; 3539 curr_sg++; 3540 } 3541 3542 switch (cmd->sc_data_direction) { 3543 case DMA_TO_DEVICE: 3544 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3545 cp->direction |= IOACCEL2_DIR_DATA_OUT; 3546 break; 3547 case DMA_FROM_DEVICE: 3548 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3549 cp->direction |= IOACCEL2_DIR_DATA_IN; 3550 break; 3551 case DMA_NONE: 3552 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3553 cp->direction |= IOACCEL2_DIR_NO_DATA; 3554 break; 3555 default: 3556 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3557 cmd->sc_data_direction); 3558 BUG(); 3559 break; 3560 } 3561 } else { 3562 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3563 cp->direction |= IOACCEL2_DIR_NO_DATA; 3564 } 3565 3566 /* Set encryption parameters, if necessary */ 3567 set_encrypt_ioaccel2(h, c, cp); 3568 3569 cp->scsi_nexus = ioaccel_handle; 3570 cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | 3571 DIRECT_LOOKUP_BIT; 3572 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 3573 3574 /* fill in sg elements */ 3575 cp->sg_count = (u8) use_sg; 3576 3577 cp->data_len = cpu_to_le32(total_len); 3578 cp->err_ptr = cpu_to_le64(c->busaddr + 3579 offsetof(struct io_accel2_cmd, error_data)); 3580 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data)); 3581 3582 enqueue_cmd_and_start_io(h, c); 3583 return 0; 3584 } 3585 3586 /* 3587 * Queue a command to the correct I/O accelerator path. 3588 */ 3589 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 3590 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3591 u8 *scsi3addr) 3592 { 3593 if (h->transMethod & CFGTBL_Trans_io_accel1) 3594 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, 3595 cdb, cdb_len, scsi3addr); 3596 else 3597 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, 3598 cdb, cdb_len, scsi3addr); 3599 } 3600 3601 static void raid_map_helper(struct raid_map_data *map, 3602 int offload_to_mirror, u32 *map_index, u32 *current_group) 3603 { 3604 if (offload_to_mirror == 0) { 3605 /* use physical disk in the first mirrored group. */ 3606 *map_index %= map->data_disks_per_row; 3607 return; 3608 } 3609 do { 3610 /* determine mirror group that *map_index indicates */ 3611 *current_group = *map_index / map->data_disks_per_row; 3612 if (offload_to_mirror == *current_group) 3613 continue; 3614 if (*current_group < (map->layout_map_count - 1)) { 3615 /* select map index from next group */ 3616 *map_index += map->data_disks_per_row; 3617 (*current_group)++; 3618 } else { 3619 /* select map index from first group */ 3620 *map_index %= map->data_disks_per_row; 3621 *current_group = 0; 3622 } 3623 } while (offload_to_mirror != *current_group); 3624 } 3625 3626 /* 3627 * Attempt to perform offload RAID mapping for a logical volume I/O. 3628 */ 3629 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, 3630 struct CommandList *c) 3631 { 3632 struct scsi_cmnd *cmd = c->scsi_cmd; 3633 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3634 struct raid_map_data *map = &dev->raid_map; 3635 struct raid_map_disk_data *dd = &map->data[0]; 3636 int is_write = 0; 3637 u32 map_index; 3638 u64 first_block, last_block; 3639 u32 block_cnt; 3640 u32 blocks_per_row; 3641 u64 first_row, last_row; 3642 u32 first_row_offset, last_row_offset; 3643 u32 first_column, last_column; 3644 u64 r0_first_row, r0_last_row; 3645 u32 r5or6_blocks_per_row; 3646 u64 r5or6_first_row, r5or6_last_row; 3647 u32 r5or6_first_row_offset, r5or6_last_row_offset; 3648 u32 r5or6_first_column, r5or6_last_column; 3649 u32 total_disks_per_row; 3650 u32 stripesize; 3651 u32 first_group, last_group, current_group; 3652 u32 map_row; 3653 u32 disk_handle; 3654 u64 disk_block; 3655 u32 disk_block_cnt; 3656 u8 cdb[16]; 3657 u8 cdb_len; 3658 #if BITS_PER_LONG == 32 3659 u64 tmpdiv; 3660 #endif 3661 int offload_to_mirror; 3662 3663 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3664 3665 /* check for valid opcode, get LBA and block count */ 3666 switch (cmd->cmnd[0]) { 3667 case WRITE_6: 3668 is_write = 1; 3669 case READ_6: 3670 first_block = 3671 (((u64) cmd->cmnd[2]) << 8) | 3672 cmd->cmnd[3]; 3673 block_cnt = cmd->cmnd[4]; 3674 break; 3675 case WRITE_10: 3676 is_write = 1; 3677 case READ_10: 3678 first_block = 3679 (((u64) cmd->cmnd[2]) << 24) | 3680 (((u64) cmd->cmnd[3]) << 16) | 3681 (((u64) cmd->cmnd[4]) << 8) | 3682 cmd->cmnd[5]; 3683 block_cnt = 3684 (((u32) cmd->cmnd[7]) << 8) | 3685 cmd->cmnd[8]; 3686 break; 3687 case WRITE_12: 3688 is_write = 1; 3689 case READ_12: 3690 first_block = 3691 (((u64) cmd->cmnd[2]) << 24) | 3692 (((u64) cmd->cmnd[3]) << 16) | 3693 (((u64) cmd->cmnd[4]) << 8) | 3694 cmd->cmnd[5]; 3695 block_cnt = 3696 (((u32) cmd->cmnd[6]) << 24) | 3697 (((u32) cmd->cmnd[7]) << 16) | 3698 (((u32) cmd->cmnd[8]) << 8) | 3699 cmd->cmnd[9]; 3700 break; 3701 case WRITE_16: 3702 is_write = 1; 3703 case READ_16: 3704 first_block = 3705 (((u64) cmd->cmnd[2]) << 56) | 3706 (((u64) cmd->cmnd[3]) << 48) | 3707 (((u64) cmd->cmnd[4]) << 40) | 3708 (((u64) cmd->cmnd[5]) << 32) | 3709 (((u64) cmd->cmnd[6]) << 24) | 3710 (((u64) cmd->cmnd[7]) << 16) | 3711 (((u64) cmd->cmnd[8]) << 8) | 3712 cmd->cmnd[9]; 3713 block_cnt = 3714 (((u32) cmd->cmnd[10]) << 24) | 3715 (((u32) cmd->cmnd[11]) << 16) | 3716 (((u32) cmd->cmnd[12]) << 8) | 3717 cmd->cmnd[13]; 3718 break; 3719 default: 3720 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ 3721 } 3722 BUG_ON(block_cnt == 0); 3723 last_block = first_block + block_cnt - 1; 3724 3725 /* check for write to non-RAID-0 */ 3726 if (is_write && dev->raid_level != 0) 3727 return IO_ACCEL_INELIGIBLE; 3728 3729 /* check for invalid block or wraparound */ 3730 if (last_block >= map->volume_blk_cnt || last_block < first_block) 3731 return IO_ACCEL_INELIGIBLE; 3732 3733 /* calculate stripe information for the request */ 3734 blocks_per_row = map->data_disks_per_row * map->strip_size; 3735 #if BITS_PER_LONG == 32 3736 tmpdiv = first_block; 3737 (void) do_div(tmpdiv, blocks_per_row); 3738 first_row = tmpdiv; 3739 tmpdiv = last_block; 3740 (void) do_div(tmpdiv, blocks_per_row); 3741 last_row = tmpdiv; 3742 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3743 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3744 tmpdiv = first_row_offset; 3745 (void) do_div(tmpdiv, map->strip_size); 3746 first_column = tmpdiv; 3747 tmpdiv = last_row_offset; 3748 (void) do_div(tmpdiv, map->strip_size); 3749 last_column = tmpdiv; 3750 #else 3751 first_row = first_block / blocks_per_row; 3752 last_row = last_block / blocks_per_row; 3753 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3754 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3755 first_column = first_row_offset / map->strip_size; 3756 last_column = last_row_offset / map->strip_size; 3757 #endif 3758 3759 /* if this isn't a single row/column then give to the controller */ 3760 if ((first_row != last_row) || (first_column != last_column)) 3761 return IO_ACCEL_INELIGIBLE; 3762 3763 /* proceeding with driver mapping */ 3764 total_disks_per_row = map->data_disks_per_row + 3765 map->metadata_disks_per_row; 3766 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3767 map->row_cnt; 3768 map_index = (map_row * total_disks_per_row) + first_column; 3769 3770 switch (dev->raid_level) { 3771 case HPSA_RAID_0: 3772 break; /* nothing special to do */ 3773 case HPSA_RAID_1: 3774 /* Handles load balance across RAID 1 members. 3775 * (2-drive R1 and R10 with even # of drives.) 3776 * Appropriate for SSDs, not optimal for HDDs 3777 */ 3778 BUG_ON(map->layout_map_count != 2); 3779 if (dev->offload_to_mirror) 3780 map_index += map->data_disks_per_row; 3781 dev->offload_to_mirror = !dev->offload_to_mirror; 3782 break; 3783 case HPSA_RAID_ADM: 3784 /* Handles N-way mirrors (R1-ADM) 3785 * and R10 with # of drives divisible by 3.) 3786 */ 3787 BUG_ON(map->layout_map_count != 3); 3788 3789 offload_to_mirror = dev->offload_to_mirror; 3790 raid_map_helper(map, offload_to_mirror, 3791 &map_index, ¤t_group); 3792 /* set mirror group to use next time */ 3793 offload_to_mirror = 3794 (offload_to_mirror >= map->layout_map_count - 1) 3795 ? 0 : offload_to_mirror + 1; 3796 /* FIXME: remove after debug/dev */ 3797 BUG_ON(offload_to_mirror >= map->layout_map_count); 3798 dev_warn(&h->pdev->dev, 3799 "DEBUG: Using physical disk map index %d from mirror group %d\n", 3800 map_index, offload_to_mirror); 3801 dev->offload_to_mirror = offload_to_mirror; 3802 /* Avoid direct use of dev->offload_to_mirror within this 3803 * function since multiple threads might simultaneously 3804 * increment it beyond the range of dev->layout_map_count -1. 3805 */ 3806 break; 3807 case HPSA_RAID_5: 3808 case HPSA_RAID_6: 3809 if (map->layout_map_count <= 1) 3810 break; 3811 3812 /* Verify first and last block are in same RAID group */ 3813 r5or6_blocks_per_row = 3814 map->strip_size * map->data_disks_per_row; 3815 BUG_ON(r5or6_blocks_per_row == 0); 3816 stripesize = r5or6_blocks_per_row * map->layout_map_count; 3817 #if BITS_PER_LONG == 32 3818 tmpdiv = first_block; 3819 first_group = do_div(tmpdiv, stripesize); 3820 tmpdiv = first_group; 3821 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3822 first_group = tmpdiv; 3823 tmpdiv = last_block; 3824 last_group = do_div(tmpdiv, stripesize); 3825 tmpdiv = last_group; 3826 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3827 last_group = tmpdiv; 3828 #else 3829 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 3830 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 3831 #endif 3832 if (first_group != last_group) 3833 return IO_ACCEL_INELIGIBLE; 3834 3835 /* Verify request is in a single row of RAID 5/6 */ 3836 #if BITS_PER_LONG == 32 3837 tmpdiv = first_block; 3838 (void) do_div(tmpdiv, stripesize); 3839 first_row = r5or6_first_row = r0_first_row = tmpdiv; 3840 tmpdiv = last_block; 3841 (void) do_div(tmpdiv, stripesize); 3842 r5or6_last_row = r0_last_row = tmpdiv; 3843 #else 3844 first_row = r5or6_first_row = r0_first_row = 3845 first_block / stripesize; 3846 r5or6_last_row = r0_last_row = last_block / stripesize; 3847 #endif 3848 if (r5or6_first_row != r5or6_last_row) 3849 return IO_ACCEL_INELIGIBLE; 3850 3851 3852 /* Verify request is in a single column */ 3853 #if BITS_PER_LONG == 32 3854 tmpdiv = first_block; 3855 first_row_offset = do_div(tmpdiv, stripesize); 3856 tmpdiv = first_row_offset; 3857 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); 3858 r5or6_first_row_offset = first_row_offset; 3859 tmpdiv = last_block; 3860 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 3861 tmpdiv = r5or6_last_row_offset; 3862 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 3863 tmpdiv = r5or6_first_row_offset; 3864 (void) do_div(tmpdiv, map->strip_size); 3865 first_column = r5or6_first_column = tmpdiv; 3866 tmpdiv = r5or6_last_row_offset; 3867 (void) do_div(tmpdiv, map->strip_size); 3868 r5or6_last_column = tmpdiv; 3869 #else 3870 first_row_offset = r5or6_first_row_offset = 3871 (u32)((first_block % stripesize) % 3872 r5or6_blocks_per_row); 3873 3874 r5or6_last_row_offset = 3875 (u32)((last_block % stripesize) % 3876 r5or6_blocks_per_row); 3877 3878 first_column = r5or6_first_column = 3879 r5or6_first_row_offset / map->strip_size; 3880 r5or6_last_column = 3881 r5or6_last_row_offset / map->strip_size; 3882 #endif 3883 if (r5or6_first_column != r5or6_last_column) 3884 return IO_ACCEL_INELIGIBLE; 3885 3886 /* Request is eligible */ 3887 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3888 map->row_cnt; 3889 3890 map_index = (first_group * 3891 (map->row_cnt * total_disks_per_row)) + 3892 (map_row * total_disks_per_row) + first_column; 3893 break; 3894 default: 3895 return IO_ACCEL_INELIGIBLE; 3896 } 3897 3898 disk_handle = dd[map_index].ioaccel_handle; 3899 disk_block = map->disk_starting_blk + (first_row * map->strip_size) + 3900 (first_row_offset - (first_column * map->strip_size)); 3901 disk_block_cnt = block_cnt; 3902 3903 /* handle differing logical/physical block sizes */ 3904 if (map->phys_blk_shift) { 3905 disk_block <<= map->phys_blk_shift; 3906 disk_block_cnt <<= map->phys_blk_shift; 3907 } 3908 BUG_ON(disk_block_cnt > 0xffff); 3909 3910 /* build the new CDB for the physical disk I/O */ 3911 if (disk_block > 0xffffffff) { 3912 cdb[0] = is_write ? WRITE_16 : READ_16; 3913 cdb[1] = 0; 3914 cdb[2] = (u8) (disk_block >> 56); 3915 cdb[3] = (u8) (disk_block >> 48); 3916 cdb[4] = (u8) (disk_block >> 40); 3917 cdb[5] = (u8) (disk_block >> 32); 3918 cdb[6] = (u8) (disk_block >> 24); 3919 cdb[7] = (u8) (disk_block >> 16); 3920 cdb[8] = (u8) (disk_block >> 8); 3921 cdb[9] = (u8) (disk_block); 3922 cdb[10] = (u8) (disk_block_cnt >> 24); 3923 cdb[11] = (u8) (disk_block_cnt >> 16); 3924 cdb[12] = (u8) (disk_block_cnt >> 8); 3925 cdb[13] = (u8) (disk_block_cnt); 3926 cdb[14] = 0; 3927 cdb[15] = 0; 3928 cdb_len = 16; 3929 } else { 3930 cdb[0] = is_write ? WRITE_10 : READ_10; 3931 cdb[1] = 0; 3932 cdb[2] = (u8) (disk_block >> 24); 3933 cdb[3] = (u8) (disk_block >> 16); 3934 cdb[4] = (u8) (disk_block >> 8); 3935 cdb[5] = (u8) (disk_block); 3936 cdb[6] = 0; 3937 cdb[7] = (u8) (disk_block_cnt >> 8); 3938 cdb[8] = (u8) (disk_block_cnt); 3939 cdb[9] = 0; 3940 cdb_len = 10; 3941 } 3942 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, 3943 dev->scsi3addr); 3944 } 3945 3946 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 3947 void (*done)(struct scsi_cmnd *)) 3948 { 3949 struct ctlr_info *h; 3950 struct hpsa_scsi_dev_t *dev; 3951 unsigned char scsi3addr[8]; 3952 struct CommandList *c; 3953 unsigned long flags; 3954 int rc = 0; 3955 3956 /* Get the ptr to our adapter structure out of cmd->host. */ 3957 h = sdev_to_hba(cmd->device); 3958 dev = cmd->device->hostdata; 3959 if (!dev) { 3960 cmd->result = DID_NO_CONNECT << 16; 3961 done(cmd); 3962 return 0; 3963 } 3964 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3965 3966 spin_lock_irqsave(&h->lock, flags); 3967 if (unlikely(h->lockup_detected)) { 3968 spin_unlock_irqrestore(&h->lock, flags); 3969 cmd->result = DID_ERROR << 16; 3970 done(cmd); 3971 return 0; 3972 } 3973 spin_unlock_irqrestore(&h->lock, flags); 3974 c = cmd_alloc(h); 3975 if (c == NULL) { /* trouble... */ 3976 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 3977 return SCSI_MLQUEUE_HOST_BUSY; 3978 } 3979 3980 /* Fill in the command list header */ 3981 3982 cmd->scsi_done = done; /* save this for use by completion code */ 3983 3984 /* save c in case we have to abort it */ 3985 cmd->host_scribble = (unsigned char *) c; 3986 3987 c->cmd_type = CMD_SCSI; 3988 c->scsi_cmd = cmd; 3989 3990 /* Call alternate submit routine for I/O accelerated commands. 3991 * Retries always go down the normal I/O path. 3992 */ 3993 if (likely(cmd->retries == 0 && 3994 cmd->request->cmd_type == REQ_TYPE_FS && 3995 h->acciopath_status)) { 3996 if (dev->offload_enabled) { 3997 rc = hpsa_scsi_ioaccel_raid_map(h, c); 3998 if (rc == 0) 3999 return 0; /* Sent on ioaccel path */ 4000 if (rc < 0) { /* scsi_dma_map failed. */ 4001 cmd_free(h, c); 4002 return SCSI_MLQUEUE_HOST_BUSY; 4003 } 4004 } else if (dev->ioaccel_handle) { 4005 rc = hpsa_scsi_ioaccel_direct_map(h, c); 4006 if (rc == 0) 4007 return 0; /* Sent on direct map path */ 4008 if (rc < 0) { /* scsi_dma_map failed. */ 4009 cmd_free(h, c); 4010 return SCSI_MLQUEUE_HOST_BUSY; 4011 } 4012 } 4013 } 4014 4015 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4016 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 4017 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 4018 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; 4019 4020 /* Fill in the request block... */ 4021 4022 c->Request.Timeout = 0; 4023 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 4024 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 4025 c->Request.CDBLen = cmd->cmd_len; 4026 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 4027 c->Request.Type.Type = TYPE_CMD; 4028 c->Request.Type.Attribute = ATTR_SIMPLE; 4029 switch (cmd->sc_data_direction) { 4030 case DMA_TO_DEVICE: 4031 c->Request.Type.Direction = XFER_WRITE; 4032 break; 4033 case DMA_FROM_DEVICE: 4034 c->Request.Type.Direction = XFER_READ; 4035 break; 4036 case DMA_NONE: 4037 c->Request.Type.Direction = XFER_NONE; 4038 break; 4039 case DMA_BIDIRECTIONAL: 4040 /* This can happen if a buggy application does a scsi passthru 4041 * and sets both inlen and outlen to non-zero. ( see 4042 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 4043 */ 4044 4045 c->Request.Type.Direction = XFER_RSVD; 4046 /* This is technically wrong, and hpsa controllers should 4047 * reject it with CMD_INVALID, which is the most correct 4048 * response, but non-fibre backends appear to let it 4049 * slide by, and give the same results as if this field 4050 * were set correctly. Either way is acceptable for 4051 * our purposes here. 4052 */ 4053 4054 break; 4055 4056 default: 4057 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 4058 cmd->sc_data_direction); 4059 BUG(); 4060 break; 4061 } 4062 4063 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 4064 cmd_free(h, c); 4065 return SCSI_MLQUEUE_HOST_BUSY; 4066 } 4067 enqueue_cmd_and_start_io(h, c); 4068 /* the cmd'll come back via intr handler in complete_scsi_command() */ 4069 return 0; 4070 } 4071 4072 static DEF_SCSI_QCMD(hpsa_scsi_queue_command) 4073 4074 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) 4075 { 4076 unsigned long flags; 4077 4078 /* 4079 * Don't let rescans be initiated on a controller known 4080 * to be locked up. If the controller locks up *during* 4081 * a rescan, that thread is probably hosed, but at least 4082 * we can prevent new rescan threads from piling up on a 4083 * locked up controller. 4084 */ 4085 spin_lock_irqsave(&h->lock, flags); 4086 if (unlikely(h->lockup_detected)) { 4087 spin_unlock_irqrestore(&h->lock, flags); 4088 spin_lock_irqsave(&h->scan_lock, flags); 4089 h->scan_finished = 1; 4090 wake_up_all(&h->scan_wait_queue); 4091 spin_unlock_irqrestore(&h->scan_lock, flags); 4092 return 1; 4093 } 4094 spin_unlock_irqrestore(&h->lock, flags); 4095 return 0; 4096 } 4097 4098 static void hpsa_scan_start(struct Scsi_Host *sh) 4099 { 4100 struct ctlr_info *h = shost_to_hba(sh); 4101 unsigned long flags; 4102 4103 if (do_not_scan_if_controller_locked_up(h)) 4104 return; 4105 4106 /* wait until any scan already in progress is finished. */ 4107 while (1) { 4108 spin_lock_irqsave(&h->scan_lock, flags); 4109 if (h->scan_finished) 4110 break; 4111 spin_unlock_irqrestore(&h->scan_lock, flags); 4112 wait_event(h->scan_wait_queue, h->scan_finished); 4113 /* Note: We don't need to worry about a race between this 4114 * thread and driver unload because the midlayer will 4115 * have incremented the reference count, so unload won't 4116 * happen if we're in here. 4117 */ 4118 } 4119 h->scan_finished = 0; /* mark scan as in progress */ 4120 spin_unlock_irqrestore(&h->scan_lock, flags); 4121 4122 if (do_not_scan_if_controller_locked_up(h)) 4123 return; 4124 4125 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 4126 4127 spin_lock_irqsave(&h->scan_lock, flags); 4128 h->scan_finished = 1; /* mark scan as finished. */ 4129 wake_up_all(&h->scan_wait_queue); 4130 spin_unlock_irqrestore(&h->scan_lock, flags); 4131 } 4132 4133 static int hpsa_scan_finished(struct Scsi_Host *sh, 4134 unsigned long elapsed_time) 4135 { 4136 struct ctlr_info *h = shost_to_hba(sh); 4137 unsigned long flags; 4138 int finished; 4139 4140 spin_lock_irqsave(&h->scan_lock, flags); 4141 finished = h->scan_finished; 4142 spin_unlock_irqrestore(&h->scan_lock, flags); 4143 return finished; 4144 } 4145 4146 static int hpsa_change_queue_depth(struct scsi_device *sdev, 4147 int qdepth, int reason) 4148 { 4149 struct ctlr_info *h = sdev_to_hba(sdev); 4150 4151 if (reason != SCSI_QDEPTH_DEFAULT) 4152 return -ENOTSUPP; 4153 4154 if (qdepth < 1) 4155 qdepth = 1; 4156 else 4157 if (qdepth > h->nr_cmds) 4158 qdepth = h->nr_cmds; 4159 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 4160 return sdev->queue_depth; 4161 } 4162 4163 static void hpsa_unregister_scsi(struct ctlr_info *h) 4164 { 4165 /* we are being forcibly unloaded, and may not refuse. */ 4166 scsi_remove_host(h->scsi_host); 4167 scsi_host_put(h->scsi_host); 4168 h->scsi_host = NULL; 4169 } 4170 4171 static int hpsa_register_scsi(struct ctlr_info *h) 4172 { 4173 struct Scsi_Host *sh; 4174 int error; 4175 4176 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 4177 if (sh == NULL) 4178 goto fail; 4179 4180 sh->io_port = 0; 4181 sh->n_io_port = 0; 4182 sh->this_id = -1; 4183 sh->max_channel = 3; 4184 sh->max_cmd_len = MAX_COMMAND_SIZE; 4185 sh->max_lun = HPSA_MAX_LUN; 4186 sh->max_id = HPSA_MAX_LUN; 4187 sh->can_queue = h->nr_cmds; 4188 if (h->hba_mode_enabled) 4189 sh->cmd_per_lun = 7; 4190 else 4191 sh->cmd_per_lun = h->nr_cmds; 4192 sh->sg_tablesize = h->maxsgentries; 4193 h->scsi_host = sh; 4194 sh->hostdata[0] = (unsigned long) h; 4195 sh->irq = h->intr[h->intr_mode]; 4196 sh->unique_id = sh->irq; 4197 error = scsi_add_host(sh, &h->pdev->dev); 4198 if (error) 4199 goto fail_host_put; 4200 scsi_scan_host(sh); 4201 return 0; 4202 4203 fail_host_put: 4204 dev_err(&h->pdev->dev, "%s: scsi_add_host" 4205 " failed for controller %d\n", __func__, h->ctlr); 4206 scsi_host_put(sh); 4207 return error; 4208 fail: 4209 dev_err(&h->pdev->dev, "%s: scsi_host_alloc" 4210 " failed for controller %d\n", __func__, h->ctlr); 4211 return -ENOMEM; 4212 } 4213 4214 static int wait_for_device_to_become_ready(struct ctlr_info *h, 4215 unsigned char lunaddr[]) 4216 { 4217 int rc; 4218 int count = 0; 4219 int waittime = 1; /* seconds */ 4220 struct CommandList *c; 4221 4222 c = cmd_special_alloc(h); 4223 if (!c) { 4224 dev_warn(&h->pdev->dev, "out of memory in " 4225 "wait_for_device_to_become_ready.\n"); 4226 return IO_ERROR; 4227 } 4228 4229 /* Send test unit ready until device ready, or give up. */ 4230 while (count < HPSA_TUR_RETRY_LIMIT) { 4231 4232 /* Wait for a bit. do this first, because if we send 4233 * the TUR right away, the reset will just abort it. 4234 */ 4235 msleep(1000 * waittime); 4236 count++; 4237 rc = 0; /* Device ready. */ 4238 4239 /* Increase wait time with each try, up to a point. */ 4240 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 4241 waittime = waittime * 2; 4242 4243 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ 4244 (void) fill_cmd(c, TEST_UNIT_READY, h, 4245 NULL, 0, 0, lunaddr, TYPE_CMD); 4246 hpsa_scsi_do_simple_cmd_core(h, c); 4247 /* no unmap needed here because no data xfer. */ 4248 4249 if (c->err_info->CommandStatus == CMD_SUCCESS) 4250 break; 4251 4252 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 4253 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 4254 (c->err_info->SenseInfo[2] == NO_SENSE || 4255 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 4256 break; 4257 4258 dev_warn(&h->pdev->dev, "waiting %d secs " 4259 "for device to become ready.\n", waittime); 4260 rc = 1; /* device not ready. */ 4261 } 4262 4263 if (rc) 4264 dev_warn(&h->pdev->dev, "giving up on device.\n"); 4265 else 4266 dev_warn(&h->pdev->dev, "device is ready.\n"); 4267 4268 cmd_special_free(h, c); 4269 return rc; 4270 } 4271 4272 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 4273 * complaining. Doing a host- or bus-reset can't do anything good here. 4274 */ 4275 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 4276 { 4277 int rc; 4278 struct ctlr_info *h; 4279 struct hpsa_scsi_dev_t *dev; 4280 4281 /* find the controller to which the command to be aborted was sent */ 4282 h = sdev_to_hba(scsicmd->device); 4283 if (h == NULL) /* paranoia */ 4284 return FAILED; 4285 dev = scsicmd->device->hostdata; 4286 if (!dev) { 4287 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 4288 "device lookup failed.\n"); 4289 return FAILED; 4290 } 4291 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 4292 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4293 /* send a reset to the SCSI LUN which the command was sent to */ 4294 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); 4295 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 4296 return SUCCESS; 4297 4298 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 4299 return FAILED; 4300 } 4301 4302 static void swizzle_abort_tag(u8 *tag) 4303 { 4304 u8 original_tag[8]; 4305 4306 memcpy(original_tag, tag, 8); 4307 tag[0] = original_tag[3]; 4308 tag[1] = original_tag[2]; 4309 tag[2] = original_tag[1]; 4310 tag[3] = original_tag[0]; 4311 tag[4] = original_tag[7]; 4312 tag[5] = original_tag[6]; 4313 tag[6] = original_tag[5]; 4314 tag[7] = original_tag[4]; 4315 } 4316 4317 static void hpsa_get_tag(struct ctlr_info *h, 4318 struct CommandList *c, u32 *taglower, u32 *tagupper) 4319 { 4320 if (c->cmd_type == CMD_IOACCEL1) { 4321 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 4322 &h->ioaccel_cmd_pool[c->cmdindex]; 4323 *tagupper = cm1->Tag.upper; 4324 *taglower = cm1->Tag.lower; 4325 return; 4326 } 4327 if (c->cmd_type == CMD_IOACCEL2) { 4328 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) 4329 &h->ioaccel2_cmd_pool[c->cmdindex]; 4330 /* upper tag not used in ioaccel2 mode */ 4331 memset(tagupper, 0, sizeof(*tagupper)); 4332 *taglower = cm2->Tag; 4333 return; 4334 } 4335 *tagupper = c->Header.Tag.upper; 4336 *taglower = c->Header.Tag.lower; 4337 } 4338 4339 4340 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 4341 struct CommandList *abort, int swizzle) 4342 { 4343 int rc = IO_OK; 4344 struct CommandList *c; 4345 struct ErrorInfo *ei; 4346 u32 tagupper, taglower; 4347 4348 c = cmd_special_alloc(h); 4349 if (c == NULL) { /* trouble... */ 4350 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 4351 return -ENOMEM; 4352 } 4353 4354 /* fill_cmd can't fail here, no buffer to map */ 4355 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, 4356 0, 0, scsi3addr, TYPE_MSG); 4357 if (swizzle) 4358 swizzle_abort_tag(&c->Request.CDB[4]); 4359 hpsa_scsi_do_simple_cmd_core(h, c); 4360 hpsa_get_tag(h, abort, &taglower, &tagupper); 4361 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", 4362 __func__, tagupper, taglower); 4363 /* no unmap needed here because no data xfer. */ 4364 4365 ei = c->err_info; 4366 switch (ei->CommandStatus) { 4367 case CMD_SUCCESS: 4368 break; 4369 case CMD_UNABORTABLE: /* Very common, don't make noise. */ 4370 rc = -1; 4371 break; 4372 default: 4373 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", 4374 __func__, tagupper, taglower); 4375 hpsa_scsi_interpret_error(h, c); 4376 rc = -1; 4377 break; 4378 } 4379 cmd_special_free(h, c); 4380 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", 4381 __func__, tagupper, taglower); 4382 return rc; 4383 } 4384 4385 /* 4386 * hpsa_find_cmd_in_queue 4387 * 4388 * Used to determine whether a command (find) is still present 4389 * in queue_head. Optionally excludes the last element of queue_head. 4390 * 4391 * This is used to avoid unnecessary aborts. Commands in h->reqQ have 4392 * not yet been submitted, and so can be aborted by the driver without 4393 * sending an abort to the hardware. 4394 * 4395 * Returns pointer to command if found in queue, NULL otherwise. 4396 */ 4397 static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h, 4398 struct scsi_cmnd *find, struct list_head *queue_head) 4399 { 4400 unsigned long flags; 4401 struct CommandList *c = NULL; /* ptr into cmpQ */ 4402 4403 if (!find) 4404 return 0; 4405 spin_lock_irqsave(&h->lock, flags); 4406 list_for_each_entry(c, queue_head, list) { 4407 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */ 4408 continue; 4409 if (c->scsi_cmd == find) { 4410 spin_unlock_irqrestore(&h->lock, flags); 4411 return c; 4412 } 4413 } 4414 spin_unlock_irqrestore(&h->lock, flags); 4415 return NULL; 4416 } 4417 4418 static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h, 4419 u8 *tag, struct list_head *queue_head) 4420 { 4421 unsigned long flags; 4422 struct CommandList *c; 4423 4424 spin_lock_irqsave(&h->lock, flags); 4425 list_for_each_entry(c, queue_head, list) { 4426 if (memcmp(&c->Header.Tag, tag, 8) != 0) 4427 continue; 4428 spin_unlock_irqrestore(&h->lock, flags); 4429 return c; 4430 } 4431 spin_unlock_irqrestore(&h->lock, flags); 4432 return NULL; 4433 } 4434 4435 /* ioaccel2 path firmware cannot handle abort task requests. 4436 * Change abort requests to physical target reset, and send to the 4437 * address of the physical disk used for the ioaccel 2 command. 4438 * Return 0 on success (IO_OK) 4439 * -1 on failure 4440 */ 4441 4442 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, 4443 unsigned char *scsi3addr, struct CommandList *abort) 4444 { 4445 int rc = IO_OK; 4446 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 4447 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ 4448 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ 4449 unsigned char *psa = &phys_scsi3addr[0]; 4450 4451 /* Get a pointer to the hpsa logical device. */ 4452 scmd = (struct scsi_cmnd *) abort->scsi_cmd; 4453 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); 4454 if (dev == NULL) { 4455 dev_warn(&h->pdev->dev, 4456 "Cannot abort: no device pointer for command.\n"); 4457 return -1; /* not abortable */ 4458 } 4459 4460 if (h->raid_offload_debug > 0) 4461 dev_info(&h->pdev->dev, 4462 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4463 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 4464 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], 4465 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); 4466 4467 if (!dev->offload_enabled) { 4468 dev_warn(&h->pdev->dev, 4469 "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); 4470 return -1; /* not abortable */ 4471 } 4472 4473 /* Incoming scsi3addr is logical addr. We need physical disk addr. */ 4474 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { 4475 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); 4476 return -1; /* not abortable */ 4477 } 4478 4479 /* send the reset */ 4480 if (h->raid_offload_debug > 0) 4481 dev_info(&h->pdev->dev, 4482 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4483 psa[0], psa[1], psa[2], psa[3], 4484 psa[4], psa[5], psa[6], psa[7]); 4485 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); 4486 if (rc != 0) { 4487 dev_warn(&h->pdev->dev, 4488 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4489 psa[0], psa[1], psa[2], psa[3], 4490 psa[4], psa[5], psa[6], psa[7]); 4491 return rc; /* failed to reset */ 4492 } 4493 4494 /* wait for device to recover */ 4495 if (wait_for_device_to_become_ready(h, psa) != 0) { 4496 dev_warn(&h->pdev->dev, 4497 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4498 psa[0], psa[1], psa[2], psa[3], 4499 psa[4], psa[5], psa[6], psa[7]); 4500 return -1; /* failed to recover */ 4501 } 4502 4503 /* device recovered */ 4504 dev_info(&h->pdev->dev, 4505 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4506 psa[0], psa[1], psa[2], psa[3], 4507 psa[4], psa[5], psa[6], psa[7]); 4508 4509 return rc; /* success */ 4510 } 4511 4512 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to 4513 * tell which kind we're dealing with, so we send the abort both ways. There 4514 * shouldn't be any collisions between swizzled and unswizzled tags due to the 4515 * way we construct our tags but we check anyway in case the assumptions which 4516 * make this true someday become false. 4517 */ 4518 static int hpsa_send_abort_both_ways(struct ctlr_info *h, 4519 unsigned char *scsi3addr, struct CommandList *abort) 4520 { 4521 u8 swizzled_tag[8]; 4522 struct CommandList *c; 4523 int rc = 0, rc2 = 0; 4524 4525 /* ioccelerator mode 2 commands should be aborted via the 4526 * accelerated path, since RAID path is unaware of these commands, 4527 * but underlying firmware can't handle abort TMF. 4528 * Change abort to physical device reset. 4529 */ 4530 if (abort->cmd_type == CMD_IOACCEL2) 4531 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); 4532 4533 /* we do not expect to find the swizzled tag in our queue, but 4534 * check anyway just to be sure the assumptions which make this 4535 * the case haven't become wrong. 4536 */ 4537 memcpy(swizzled_tag, &abort->Request.CDB[4], 8); 4538 swizzle_abort_tag(swizzled_tag); 4539 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ); 4540 if (c != NULL) { 4541 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n"); 4542 return hpsa_send_abort(h, scsi3addr, abort, 0); 4543 } 4544 rc = hpsa_send_abort(h, scsi3addr, abort, 0); 4545 4546 /* if the command is still in our queue, we can't conclude that it was 4547 * aborted (it might have just completed normally) but in any case 4548 * we don't need to try to abort it another way. 4549 */ 4550 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ); 4551 if (c) 4552 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1); 4553 return rc && rc2; 4554 } 4555 4556 /* Send an abort for the specified command. 4557 * If the device and controller support it, 4558 * send a task abort request. 4559 */ 4560 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) 4561 { 4562 4563 int i, rc; 4564 struct ctlr_info *h; 4565 struct hpsa_scsi_dev_t *dev; 4566 struct CommandList *abort; /* pointer to command to be aborted */ 4567 struct CommandList *found; 4568 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ 4569 char msg[256]; /* For debug messaging. */ 4570 int ml = 0; 4571 u32 tagupper, taglower; 4572 4573 /* Find the controller of the command to be aborted */ 4574 h = sdev_to_hba(sc->device); 4575 if (WARN(h == NULL, 4576 "ABORT REQUEST FAILED, Controller lookup failed.\n")) 4577 return FAILED; 4578 4579 /* Check that controller supports some kind of task abort */ 4580 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && 4581 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 4582 return FAILED; 4583 4584 memset(msg, 0, sizeof(msg)); 4585 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ", 4586 h->scsi_host->host_no, sc->device->channel, 4587 sc->device->id, sc->device->lun); 4588 4589 /* Find the device of the command to be aborted */ 4590 dev = sc->device->hostdata; 4591 if (!dev) { 4592 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", 4593 msg); 4594 return FAILED; 4595 } 4596 4597 /* Get SCSI command to be aborted */ 4598 abort = (struct CommandList *) sc->host_scribble; 4599 if (abort == NULL) { 4600 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n", 4601 msg); 4602 return FAILED; 4603 } 4604 hpsa_get_tag(h, abort, &taglower, &tagupper); 4605 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); 4606 as = (struct scsi_cmnd *) abort->scsi_cmd; 4607 if (as != NULL) 4608 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", 4609 as->cmnd[0], as->serial_number); 4610 dev_dbg(&h->pdev->dev, "%s\n", msg); 4611 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", 4612 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4613 4614 /* Search reqQ to See if command is queued but not submitted, 4615 * if so, complete the command with aborted status and remove 4616 * it from the reqQ. 4617 */ 4618 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ); 4619 if (found) { 4620 found->err_info->CommandStatus = CMD_ABORTED; 4621 finish_cmd(found); 4622 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n", 4623 msg); 4624 return SUCCESS; 4625 } 4626 4627 /* not in reqQ, if also not in cmpQ, must have already completed */ 4628 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); 4629 if (!found) { 4630 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n", 4631 msg); 4632 return SUCCESS; 4633 } 4634 4635 /* 4636 * Command is in flight, or possibly already completed 4637 * by the firmware (but not to the scsi mid layer) but we can't 4638 * distinguish which. Send the abort down. 4639 */ 4640 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); 4641 if (rc != 0) { 4642 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); 4643 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", 4644 h->scsi_host->host_no, 4645 dev->bus, dev->target, dev->lun); 4646 return FAILED; 4647 } 4648 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); 4649 4650 /* If the abort(s) above completed and actually aborted the 4651 * command, then the command to be aborted should already be 4652 * completed. If not, wait around a bit more to see if they 4653 * manage to complete normally. 4654 */ 4655 #define ABORT_COMPLETE_WAIT_SECS 30 4656 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { 4657 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); 4658 if (!found) 4659 return SUCCESS; 4660 msleep(100); 4661 } 4662 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", 4663 msg, ABORT_COMPLETE_WAIT_SECS); 4664 return FAILED; 4665 } 4666 4667 4668 /* 4669 * For operations that cannot sleep, a command block is allocated at init, 4670 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 4671 * which ones are free or in use. Lock must be held when calling this. 4672 * cmd_free() is the complement. 4673 */ 4674 static struct CommandList *cmd_alloc(struct ctlr_info *h) 4675 { 4676 struct CommandList *c; 4677 int i; 4678 union u64bit temp64; 4679 dma_addr_t cmd_dma_handle, err_dma_handle; 4680 unsigned long flags; 4681 4682 spin_lock_irqsave(&h->lock, flags); 4683 do { 4684 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 4685 if (i == h->nr_cmds) { 4686 spin_unlock_irqrestore(&h->lock, flags); 4687 return NULL; 4688 } 4689 } while (test_and_set_bit 4690 (i & (BITS_PER_LONG - 1), 4691 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 4692 spin_unlock_irqrestore(&h->lock, flags); 4693 4694 c = h->cmd_pool + i; 4695 memset(c, 0, sizeof(*c)); 4696 cmd_dma_handle = h->cmd_pool_dhandle 4697 + i * sizeof(*c); 4698 c->err_info = h->errinfo_pool + i; 4699 memset(c->err_info, 0, sizeof(*c->err_info)); 4700 err_dma_handle = h->errinfo_pool_dhandle 4701 + i * sizeof(*c->err_info); 4702 4703 c->cmdindex = i; 4704 4705 INIT_LIST_HEAD(&c->list); 4706 c->busaddr = (u32) cmd_dma_handle; 4707 temp64.val = (u64) err_dma_handle; 4708 c->ErrDesc.Addr.lower = temp64.val32.lower; 4709 c->ErrDesc.Addr.upper = temp64.val32.upper; 4710 c->ErrDesc.Len = sizeof(*c->err_info); 4711 4712 c->h = h; 4713 return c; 4714 } 4715 4716 /* For operations that can wait for kmalloc to possibly sleep, 4717 * this routine can be called. Lock need not be held to call 4718 * cmd_special_alloc. cmd_special_free() is the complement. 4719 */ 4720 static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 4721 { 4722 struct CommandList *c; 4723 union u64bit temp64; 4724 dma_addr_t cmd_dma_handle, err_dma_handle; 4725 4726 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 4727 if (c == NULL) 4728 return NULL; 4729 memset(c, 0, sizeof(*c)); 4730 4731 c->cmd_type = CMD_SCSI; 4732 c->cmdindex = -1; 4733 4734 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), 4735 &err_dma_handle); 4736 4737 if (c->err_info == NULL) { 4738 pci_free_consistent(h->pdev, 4739 sizeof(*c), c, cmd_dma_handle); 4740 return NULL; 4741 } 4742 memset(c->err_info, 0, sizeof(*c->err_info)); 4743 4744 INIT_LIST_HEAD(&c->list); 4745 c->busaddr = (u32) cmd_dma_handle; 4746 temp64.val = (u64) err_dma_handle; 4747 c->ErrDesc.Addr.lower = temp64.val32.lower; 4748 c->ErrDesc.Addr.upper = temp64.val32.upper; 4749 c->ErrDesc.Len = sizeof(*c->err_info); 4750 4751 c->h = h; 4752 return c; 4753 } 4754 4755 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 4756 { 4757 int i; 4758 unsigned long flags; 4759 4760 i = c - h->cmd_pool; 4761 spin_lock_irqsave(&h->lock, flags); 4762 clear_bit(i & (BITS_PER_LONG - 1), 4763 h->cmd_pool_bits + (i / BITS_PER_LONG)); 4764 spin_unlock_irqrestore(&h->lock, flags); 4765 } 4766 4767 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 4768 { 4769 union u64bit temp64; 4770 4771 temp64.val32.lower = c->ErrDesc.Addr.lower; 4772 temp64.val32.upper = c->ErrDesc.Addr.upper; 4773 pci_free_consistent(h->pdev, sizeof(*c->err_info), 4774 c->err_info, (dma_addr_t) temp64.val); 4775 pci_free_consistent(h->pdev, sizeof(*c), 4776 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 4777 } 4778 4779 #ifdef CONFIG_COMPAT 4780 4781 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 4782 { 4783 IOCTL32_Command_struct __user *arg32 = 4784 (IOCTL32_Command_struct __user *) arg; 4785 IOCTL_Command_struct arg64; 4786 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 4787 int err; 4788 u32 cp; 4789 4790 memset(&arg64, 0, sizeof(arg64)); 4791 err = 0; 4792 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4793 sizeof(arg64.LUN_info)); 4794 err |= copy_from_user(&arg64.Request, &arg32->Request, 4795 sizeof(arg64.Request)); 4796 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4797 sizeof(arg64.error_info)); 4798 err |= get_user(arg64.buf_size, &arg32->buf_size); 4799 err |= get_user(cp, &arg32->buf); 4800 arg64.buf = compat_ptr(cp); 4801 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4802 4803 if (err) 4804 return -EFAULT; 4805 4806 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); 4807 if (err) 4808 return err; 4809 err |= copy_in_user(&arg32->error_info, &p->error_info, 4810 sizeof(arg32->error_info)); 4811 if (err) 4812 return -EFAULT; 4813 return err; 4814 } 4815 4816 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 4817 int cmd, void *arg) 4818 { 4819 BIG_IOCTL32_Command_struct __user *arg32 = 4820 (BIG_IOCTL32_Command_struct __user *) arg; 4821 BIG_IOCTL_Command_struct arg64; 4822 BIG_IOCTL_Command_struct __user *p = 4823 compat_alloc_user_space(sizeof(arg64)); 4824 int err; 4825 u32 cp; 4826 4827 memset(&arg64, 0, sizeof(arg64)); 4828 err = 0; 4829 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4830 sizeof(arg64.LUN_info)); 4831 err |= copy_from_user(&arg64.Request, &arg32->Request, 4832 sizeof(arg64.Request)); 4833 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4834 sizeof(arg64.error_info)); 4835 err |= get_user(arg64.buf_size, &arg32->buf_size); 4836 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 4837 err |= get_user(cp, &arg32->buf); 4838 arg64.buf = compat_ptr(cp); 4839 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4840 4841 if (err) 4842 return -EFAULT; 4843 4844 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 4845 if (err) 4846 return err; 4847 err |= copy_in_user(&arg32->error_info, &p->error_info, 4848 sizeof(arg32->error_info)); 4849 if (err) 4850 return -EFAULT; 4851 return err; 4852 } 4853 4854 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) 4855 { 4856 switch (cmd) { 4857 case CCISS_GETPCIINFO: 4858 case CCISS_GETINTINFO: 4859 case CCISS_SETINTINFO: 4860 case CCISS_GETNODENAME: 4861 case CCISS_SETNODENAME: 4862 case CCISS_GETHEARTBEAT: 4863 case CCISS_GETBUSTYPES: 4864 case CCISS_GETFIRMVER: 4865 case CCISS_GETDRIVVER: 4866 case CCISS_REVALIDVOLS: 4867 case CCISS_DEREGDISK: 4868 case CCISS_REGNEWDISK: 4869 case CCISS_REGNEWD: 4870 case CCISS_RESCANDISK: 4871 case CCISS_GETLUNINFO: 4872 return hpsa_ioctl(dev, cmd, arg); 4873 4874 case CCISS_PASSTHRU32: 4875 return hpsa_ioctl32_passthru(dev, cmd, arg); 4876 case CCISS_BIG_PASSTHRU32: 4877 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 4878 4879 default: 4880 return -ENOIOCTLCMD; 4881 } 4882 } 4883 #endif 4884 4885 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 4886 { 4887 struct hpsa_pci_info pciinfo; 4888 4889 if (!argp) 4890 return -EINVAL; 4891 pciinfo.domain = pci_domain_nr(h->pdev->bus); 4892 pciinfo.bus = h->pdev->bus->number; 4893 pciinfo.dev_fn = h->pdev->devfn; 4894 pciinfo.board_id = h->board_id; 4895 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 4896 return -EFAULT; 4897 return 0; 4898 } 4899 4900 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 4901 { 4902 DriverVer_type DriverVer; 4903 unsigned char vmaj, vmin, vsubmin; 4904 int rc; 4905 4906 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 4907 &vmaj, &vmin, &vsubmin); 4908 if (rc != 3) { 4909 dev_info(&h->pdev->dev, "driver version string '%s' " 4910 "unrecognized.", HPSA_DRIVER_VERSION); 4911 vmaj = 0; 4912 vmin = 0; 4913 vsubmin = 0; 4914 } 4915 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 4916 if (!argp) 4917 return -EINVAL; 4918 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 4919 return -EFAULT; 4920 return 0; 4921 } 4922 4923 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 4924 { 4925 IOCTL_Command_struct iocommand; 4926 struct CommandList *c; 4927 char *buff = NULL; 4928 union u64bit temp64; 4929 int rc = 0; 4930 4931 if (!argp) 4932 return -EINVAL; 4933 if (!capable(CAP_SYS_RAWIO)) 4934 return -EPERM; 4935 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 4936 return -EFAULT; 4937 if ((iocommand.buf_size < 1) && 4938 (iocommand.Request.Type.Direction != XFER_NONE)) { 4939 return -EINVAL; 4940 } 4941 if (iocommand.buf_size > 0) { 4942 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 4943 if (buff == NULL) 4944 return -EFAULT; 4945 if (iocommand.Request.Type.Direction == XFER_WRITE) { 4946 /* Copy the data into the buffer we created */ 4947 if (copy_from_user(buff, iocommand.buf, 4948 iocommand.buf_size)) { 4949 rc = -EFAULT; 4950 goto out_kfree; 4951 } 4952 } else { 4953 memset(buff, 0, iocommand.buf_size); 4954 } 4955 } 4956 c = cmd_special_alloc(h); 4957 if (c == NULL) { 4958 rc = -ENOMEM; 4959 goto out_kfree; 4960 } 4961 /* Fill in the command type */ 4962 c->cmd_type = CMD_IOCTL_PEND; 4963 /* Fill in Command Header */ 4964 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4965 if (iocommand.buf_size > 0) { /* buffer to fill */ 4966 c->Header.SGList = 1; 4967 c->Header.SGTotal = 1; 4968 } else { /* no buffers to fill */ 4969 c->Header.SGList = 0; 4970 c->Header.SGTotal = 0; 4971 } 4972 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 4973 /* use the kernel address the cmd block for tag */ 4974 c->Header.Tag.lower = c->busaddr; 4975 4976 /* Fill in Request block */ 4977 memcpy(&c->Request, &iocommand.Request, 4978 sizeof(c->Request)); 4979 4980 /* Fill in the scatter gather information */ 4981 if (iocommand.buf_size > 0) { 4982 temp64.val = pci_map_single(h->pdev, buff, 4983 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 4984 if (dma_mapping_error(&h->pdev->dev, temp64.val)) { 4985 c->SG[0].Addr.lower = 0; 4986 c->SG[0].Addr.upper = 0; 4987 c->SG[0].Len = 0; 4988 rc = -ENOMEM; 4989 goto out; 4990 } 4991 c->SG[0].Addr.lower = temp64.val32.lower; 4992 c->SG[0].Addr.upper = temp64.val32.upper; 4993 c->SG[0].Len = iocommand.buf_size; 4994 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/ 4995 } 4996 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 4997 if (iocommand.buf_size > 0) 4998 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 4999 check_ioctl_unit_attention(h, c); 5000 5001 /* Copy the error information out */ 5002 memcpy(&iocommand.error_info, c->err_info, 5003 sizeof(iocommand.error_info)); 5004 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 5005 rc = -EFAULT; 5006 goto out; 5007 } 5008 if (iocommand.Request.Type.Direction == XFER_READ && 5009 iocommand.buf_size > 0) { 5010 /* Copy the data out of the buffer we created */ 5011 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 5012 rc = -EFAULT; 5013 goto out; 5014 } 5015 } 5016 out: 5017 cmd_special_free(h, c); 5018 out_kfree: 5019 kfree(buff); 5020 return rc; 5021 } 5022 5023 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 5024 { 5025 BIG_IOCTL_Command_struct *ioc; 5026 struct CommandList *c; 5027 unsigned char **buff = NULL; 5028 int *buff_size = NULL; 5029 union u64bit temp64; 5030 BYTE sg_used = 0; 5031 int status = 0; 5032 int i; 5033 u32 left; 5034 u32 sz; 5035 BYTE __user *data_ptr; 5036 5037 if (!argp) 5038 return -EINVAL; 5039 if (!capable(CAP_SYS_RAWIO)) 5040 return -EPERM; 5041 ioc = (BIG_IOCTL_Command_struct *) 5042 kmalloc(sizeof(*ioc), GFP_KERNEL); 5043 if (!ioc) { 5044 status = -ENOMEM; 5045 goto cleanup1; 5046 } 5047 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 5048 status = -EFAULT; 5049 goto cleanup1; 5050 } 5051 if ((ioc->buf_size < 1) && 5052 (ioc->Request.Type.Direction != XFER_NONE)) { 5053 status = -EINVAL; 5054 goto cleanup1; 5055 } 5056 /* Check kmalloc limits using all SGs */ 5057 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 5058 status = -EINVAL; 5059 goto cleanup1; 5060 } 5061 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { 5062 status = -EINVAL; 5063 goto cleanup1; 5064 } 5065 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL); 5066 if (!buff) { 5067 status = -ENOMEM; 5068 goto cleanup1; 5069 } 5070 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL); 5071 if (!buff_size) { 5072 status = -ENOMEM; 5073 goto cleanup1; 5074 } 5075 left = ioc->buf_size; 5076 data_ptr = ioc->buf; 5077 while (left) { 5078 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 5079 buff_size[sg_used] = sz; 5080 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 5081 if (buff[sg_used] == NULL) { 5082 status = -ENOMEM; 5083 goto cleanup1; 5084 } 5085 if (ioc->Request.Type.Direction == XFER_WRITE) { 5086 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 5087 status = -ENOMEM; 5088 goto cleanup1; 5089 } 5090 } else 5091 memset(buff[sg_used], 0, sz); 5092 left -= sz; 5093 data_ptr += sz; 5094 sg_used++; 5095 } 5096 c = cmd_special_alloc(h); 5097 if (c == NULL) { 5098 status = -ENOMEM; 5099 goto cleanup1; 5100 } 5101 c->cmd_type = CMD_IOCTL_PEND; 5102 c->Header.ReplyQueue = 0; 5103 c->Header.SGList = c->Header.SGTotal = sg_used; 5104 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 5105 c->Header.Tag.lower = c->busaddr; 5106 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 5107 if (ioc->buf_size > 0) { 5108 int i; 5109 for (i = 0; i < sg_used; i++) { 5110 temp64.val = pci_map_single(h->pdev, buff[i], 5111 buff_size[i], PCI_DMA_BIDIRECTIONAL); 5112 if (dma_mapping_error(&h->pdev->dev, temp64.val)) { 5113 c->SG[i].Addr.lower = 0; 5114 c->SG[i].Addr.upper = 0; 5115 c->SG[i].Len = 0; 5116 hpsa_pci_unmap(h->pdev, c, i, 5117 PCI_DMA_BIDIRECTIONAL); 5118 status = -ENOMEM; 5119 goto cleanup0; 5120 } 5121 c->SG[i].Addr.lower = temp64.val32.lower; 5122 c->SG[i].Addr.upper = temp64.val32.upper; 5123 c->SG[i].Len = buff_size[i]; 5124 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST; 5125 } 5126 } 5127 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 5128 if (sg_used) 5129 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 5130 check_ioctl_unit_attention(h, c); 5131 /* Copy the error information out */ 5132 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 5133 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 5134 status = -EFAULT; 5135 goto cleanup0; 5136 } 5137 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { 5138 /* Copy the data out of the buffer we created */ 5139 BYTE __user *ptr = ioc->buf; 5140 for (i = 0; i < sg_used; i++) { 5141 if (copy_to_user(ptr, buff[i], buff_size[i])) { 5142 status = -EFAULT; 5143 goto cleanup0; 5144 } 5145 ptr += buff_size[i]; 5146 } 5147 } 5148 status = 0; 5149 cleanup0: 5150 cmd_special_free(h, c); 5151 cleanup1: 5152 if (buff) { 5153 for (i = 0; i < sg_used; i++) 5154 kfree(buff[i]); 5155 kfree(buff); 5156 } 5157 kfree(buff_size); 5158 kfree(ioc); 5159 return status; 5160 } 5161 5162 static void check_ioctl_unit_attention(struct ctlr_info *h, 5163 struct CommandList *c) 5164 { 5165 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 5166 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 5167 (void) check_for_unit_attention(h, c); 5168 } 5169 5170 static int increment_passthru_count(struct ctlr_info *h) 5171 { 5172 unsigned long flags; 5173 5174 spin_lock_irqsave(&h->passthru_count_lock, flags); 5175 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { 5176 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5177 return -1; 5178 } 5179 h->passthru_count++; 5180 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5181 return 0; 5182 } 5183 5184 static void decrement_passthru_count(struct ctlr_info *h) 5185 { 5186 unsigned long flags; 5187 5188 spin_lock_irqsave(&h->passthru_count_lock, flags); 5189 if (h->passthru_count <= 0) { 5190 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5191 /* not expecting to get here. */ 5192 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); 5193 return; 5194 } 5195 h->passthru_count--; 5196 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5197 } 5198 5199 /* 5200 * ioctl 5201 */ 5202 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) 5203 { 5204 struct ctlr_info *h; 5205 void __user *argp = (void __user *)arg; 5206 int rc; 5207 5208 h = sdev_to_hba(dev); 5209 5210 switch (cmd) { 5211 case CCISS_DEREGDISK: 5212 case CCISS_REGNEWDISK: 5213 case CCISS_REGNEWD: 5214 hpsa_scan_start(h->scsi_host); 5215 return 0; 5216 case CCISS_GETPCIINFO: 5217 return hpsa_getpciinfo_ioctl(h, argp); 5218 case CCISS_GETDRIVVER: 5219 return hpsa_getdrivver_ioctl(h, argp); 5220 case CCISS_PASSTHRU: 5221 if (increment_passthru_count(h)) 5222 return -EAGAIN; 5223 rc = hpsa_passthru_ioctl(h, argp); 5224 decrement_passthru_count(h); 5225 return rc; 5226 case CCISS_BIG_PASSTHRU: 5227 if (increment_passthru_count(h)) 5228 return -EAGAIN; 5229 rc = hpsa_big_passthru_ioctl(h, argp); 5230 decrement_passthru_count(h); 5231 return rc; 5232 default: 5233 return -ENOTTY; 5234 } 5235 } 5236 5237 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, 5238 u8 reset_type) 5239 { 5240 struct CommandList *c; 5241 5242 c = cmd_alloc(h); 5243 if (!c) 5244 return -ENOMEM; 5245 /* fill_cmd can't fail here, no data buffer to map */ 5246 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 5247 RAID_CTLR_LUNID, TYPE_MSG); 5248 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 5249 c->waiting = NULL; 5250 enqueue_cmd_and_start_io(h, c); 5251 /* Don't wait for completion, the reset won't complete. Don't free 5252 * the command either. This is the last command we will send before 5253 * re-initializing everything, so it doesn't matter and won't leak. 5254 */ 5255 return 0; 5256 } 5257 5258 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 5259 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 5260 int cmd_type) 5261 { 5262 int pci_dir = XFER_NONE; 5263 struct CommandList *a; /* for commands to be aborted */ 5264 5265 c->cmd_type = CMD_IOCTL_PEND; 5266 c->Header.ReplyQueue = 0; 5267 if (buff != NULL && size > 0) { 5268 c->Header.SGList = 1; 5269 c->Header.SGTotal = 1; 5270 } else { 5271 c->Header.SGList = 0; 5272 c->Header.SGTotal = 0; 5273 } 5274 c->Header.Tag.lower = c->busaddr; 5275 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 5276 5277 c->Request.Type.Type = cmd_type; 5278 if (cmd_type == TYPE_CMD) { 5279 switch (cmd) { 5280 case HPSA_INQUIRY: 5281 /* are we trying to read a vital product page */ 5282 if (page_code & VPD_PAGE) { 5283 c->Request.CDB[1] = 0x01; 5284 c->Request.CDB[2] = (page_code & 0xff); 5285 } 5286 c->Request.CDBLen = 6; 5287 c->Request.Type.Attribute = ATTR_SIMPLE; 5288 c->Request.Type.Direction = XFER_READ; 5289 c->Request.Timeout = 0; 5290 c->Request.CDB[0] = HPSA_INQUIRY; 5291 c->Request.CDB[4] = size & 0xFF; 5292 break; 5293 case HPSA_REPORT_LOG: 5294 case HPSA_REPORT_PHYS: 5295 /* Talking to controller so It's a physical command 5296 mode = 00 target = 0. Nothing to write. 5297 */ 5298 c->Request.CDBLen = 12; 5299 c->Request.Type.Attribute = ATTR_SIMPLE; 5300 c->Request.Type.Direction = XFER_READ; 5301 c->Request.Timeout = 0; 5302 c->Request.CDB[0] = cmd; 5303 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5304 c->Request.CDB[7] = (size >> 16) & 0xFF; 5305 c->Request.CDB[8] = (size >> 8) & 0xFF; 5306 c->Request.CDB[9] = size & 0xFF; 5307 break; 5308 case HPSA_CACHE_FLUSH: 5309 c->Request.CDBLen = 12; 5310 c->Request.Type.Attribute = ATTR_SIMPLE; 5311 c->Request.Type.Direction = XFER_WRITE; 5312 c->Request.Timeout = 0; 5313 c->Request.CDB[0] = BMIC_WRITE; 5314 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 5315 c->Request.CDB[7] = (size >> 8) & 0xFF; 5316 c->Request.CDB[8] = size & 0xFF; 5317 break; 5318 case TEST_UNIT_READY: 5319 c->Request.CDBLen = 6; 5320 c->Request.Type.Attribute = ATTR_SIMPLE; 5321 c->Request.Type.Direction = XFER_NONE; 5322 c->Request.Timeout = 0; 5323 break; 5324 case HPSA_GET_RAID_MAP: 5325 c->Request.CDBLen = 12; 5326 c->Request.Type.Attribute = ATTR_SIMPLE; 5327 c->Request.Type.Direction = XFER_READ; 5328 c->Request.Timeout = 0; 5329 c->Request.CDB[0] = HPSA_CISS_READ; 5330 c->Request.CDB[1] = cmd; 5331 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5332 c->Request.CDB[7] = (size >> 16) & 0xFF; 5333 c->Request.CDB[8] = (size >> 8) & 0xFF; 5334 c->Request.CDB[9] = size & 0xFF; 5335 break; 5336 case BMIC_SENSE_CONTROLLER_PARAMETERS: 5337 c->Request.CDBLen = 10; 5338 c->Request.Type.Attribute = ATTR_SIMPLE; 5339 c->Request.Type.Direction = XFER_READ; 5340 c->Request.Timeout = 0; 5341 c->Request.CDB[0] = BMIC_READ; 5342 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; 5343 c->Request.CDB[7] = (size >> 16) & 0xFF; 5344 c->Request.CDB[8] = (size >> 8) & 0xFF; 5345 break; 5346 default: 5347 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 5348 BUG(); 5349 return -1; 5350 } 5351 } else if (cmd_type == TYPE_MSG) { 5352 switch (cmd) { 5353 5354 case HPSA_DEVICE_RESET_MSG: 5355 c->Request.CDBLen = 16; 5356 c->Request.Type.Type = 1; /* It is a MSG not a CMD */ 5357 c->Request.Type.Attribute = ATTR_SIMPLE; 5358 c->Request.Type.Direction = XFER_NONE; 5359 c->Request.Timeout = 0; /* Don't time out */ 5360 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 5361 c->Request.CDB[0] = cmd; 5362 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; 5363 /* If bytes 4-7 are zero, it means reset the */ 5364 /* LunID device */ 5365 c->Request.CDB[4] = 0x00; 5366 c->Request.CDB[5] = 0x00; 5367 c->Request.CDB[6] = 0x00; 5368 c->Request.CDB[7] = 0x00; 5369 break; 5370 case HPSA_ABORT_MSG: 5371 a = buff; /* point to command to be aborted */ 5372 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n", 5373 a->Header.Tag.upper, a->Header.Tag.lower, 5374 c->Header.Tag.upper, c->Header.Tag.lower); 5375 c->Request.CDBLen = 16; 5376 c->Request.Type.Type = TYPE_MSG; 5377 c->Request.Type.Attribute = ATTR_SIMPLE; 5378 c->Request.Type.Direction = XFER_WRITE; 5379 c->Request.Timeout = 0; /* Don't time out */ 5380 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; 5381 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; 5382 c->Request.CDB[2] = 0x00; /* reserved */ 5383 c->Request.CDB[3] = 0x00; /* reserved */ 5384 /* Tag to abort goes in CDB[4]-CDB[11] */ 5385 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF; 5386 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF; 5387 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF; 5388 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF; 5389 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF; 5390 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF; 5391 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF; 5392 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF; 5393 c->Request.CDB[12] = 0x00; /* reserved */ 5394 c->Request.CDB[13] = 0x00; /* reserved */ 5395 c->Request.CDB[14] = 0x00; /* reserved */ 5396 c->Request.CDB[15] = 0x00; /* reserved */ 5397 break; 5398 default: 5399 dev_warn(&h->pdev->dev, "unknown message type %d\n", 5400 cmd); 5401 BUG(); 5402 } 5403 } else { 5404 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 5405 BUG(); 5406 } 5407 5408 switch (c->Request.Type.Direction) { 5409 case XFER_READ: 5410 pci_dir = PCI_DMA_FROMDEVICE; 5411 break; 5412 case XFER_WRITE: 5413 pci_dir = PCI_DMA_TODEVICE; 5414 break; 5415 case XFER_NONE: 5416 pci_dir = PCI_DMA_NONE; 5417 break; 5418 default: 5419 pci_dir = PCI_DMA_BIDIRECTIONAL; 5420 } 5421 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) 5422 return -1; 5423 return 0; 5424 } 5425 5426 /* 5427 * Map (physical) PCI mem into (virtual) kernel space 5428 */ 5429 static void __iomem *remap_pci_mem(ulong base, ulong size) 5430 { 5431 ulong page_base = ((ulong) base) & PAGE_MASK; 5432 ulong page_offs = ((ulong) base) - page_base; 5433 void __iomem *page_remapped = ioremap_nocache(page_base, 5434 page_offs + size); 5435 5436 return page_remapped ? (page_remapped + page_offs) : NULL; 5437 } 5438 5439 /* Takes cmds off the submission queue and sends them to the hardware, 5440 * then puts them on the queue of cmds waiting for completion. 5441 */ 5442 static void start_io(struct ctlr_info *h) 5443 { 5444 struct CommandList *c; 5445 unsigned long flags; 5446 5447 spin_lock_irqsave(&h->lock, flags); 5448 while (!list_empty(&h->reqQ)) { 5449 c = list_entry(h->reqQ.next, struct CommandList, list); 5450 /* can't do anything if fifo is full */ 5451 if ((h->access.fifo_full(h))) { 5452 h->fifo_recently_full = 1; 5453 dev_warn(&h->pdev->dev, "fifo full\n"); 5454 break; 5455 } 5456 h->fifo_recently_full = 0; 5457 5458 /* Get the first entry from the Request Q */ 5459 removeQ(c); 5460 h->Qdepth--; 5461 5462 /* Put job onto the completed Q */ 5463 addQ(&h->cmpQ, c); 5464 5465 /* Must increment commands_outstanding before unlocking 5466 * and submitting to avoid race checking for fifo full 5467 * condition. 5468 */ 5469 h->commands_outstanding++; 5470 if (h->commands_outstanding > h->max_outstanding) 5471 h->max_outstanding = h->commands_outstanding; 5472 5473 /* Tell the controller execute command */ 5474 spin_unlock_irqrestore(&h->lock, flags); 5475 h->access.submit_command(h, c); 5476 spin_lock_irqsave(&h->lock, flags); 5477 } 5478 spin_unlock_irqrestore(&h->lock, flags); 5479 } 5480 5481 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) 5482 { 5483 return h->access.command_completed(h, q); 5484 } 5485 5486 static inline bool interrupt_pending(struct ctlr_info *h) 5487 { 5488 return h->access.intr_pending(h); 5489 } 5490 5491 static inline long interrupt_not_for_us(struct ctlr_info *h) 5492 { 5493 return (h->access.intr_pending(h) == 0) || 5494 (h->interrupts_enabled == 0); 5495 } 5496 5497 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 5498 u32 raw_tag) 5499 { 5500 if (unlikely(tag_index >= h->nr_cmds)) { 5501 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 5502 return 1; 5503 } 5504 return 0; 5505 } 5506 5507 static inline void finish_cmd(struct CommandList *c) 5508 { 5509 unsigned long flags; 5510 int io_may_be_stalled = 0; 5511 struct ctlr_info *h = c->h; 5512 5513 spin_lock_irqsave(&h->lock, flags); 5514 removeQ(c); 5515 5516 /* 5517 * Check for possibly stalled i/o. 5518 * 5519 * If a fifo_full condition is encountered, requests will back up 5520 * in h->reqQ. This queue is only emptied out by start_io which is 5521 * only called when a new i/o request comes in. If no i/o's are 5522 * forthcoming, the i/o's in h->reqQ can get stuck. So we call 5523 * start_io from here if we detect such a danger. 5524 * 5525 * Normally, we shouldn't hit this case, but pounding on the 5526 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if 5527 * commands_outstanding is low. We want to avoid calling 5528 * start_io from in here as much as possible, and esp. don't 5529 * want to get in a cycle where we call start_io every time 5530 * through here. 5531 */ 5532 if (unlikely(h->fifo_recently_full) && 5533 h->commands_outstanding < 5) 5534 io_may_be_stalled = 1; 5535 5536 spin_unlock_irqrestore(&h->lock, flags); 5537 5538 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 5539 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 5540 || c->cmd_type == CMD_IOACCEL2)) 5541 complete_scsi_command(c); 5542 else if (c->cmd_type == CMD_IOCTL_PEND) 5543 complete(c->waiting); 5544 if (unlikely(io_may_be_stalled)) 5545 start_io(h); 5546 } 5547 5548 static inline u32 hpsa_tag_contains_index(u32 tag) 5549 { 5550 return tag & DIRECT_LOOKUP_BIT; 5551 } 5552 5553 static inline u32 hpsa_tag_to_index(u32 tag) 5554 { 5555 return tag >> DIRECT_LOOKUP_SHIFT; 5556 } 5557 5558 5559 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) 5560 { 5561 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 5562 #define HPSA_SIMPLE_ERROR_BITS 0x03 5563 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 5564 return tag & ~HPSA_SIMPLE_ERROR_BITS; 5565 return tag & ~HPSA_PERF_ERROR_BITS; 5566 } 5567 5568 /* process completion of an indexed ("direct lookup") command */ 5569 static inline void process_indexed_cmd(struct ctlr_info *h, 5570 u32 raw_tag) 5571 { 5572 u32 tag_index; 5573 struct CommandList *c; 5574 5575 tag_index = hpsa_tag_to_index(raw_tag); 5576 if (!bad_tag(h, tag_index, raw_tag)) { 5577 c = h->cmd_pool + tag_index; 5578 finish_cmd(c); 5579 } 5580 } 5581 5582 /* process completion of a non-indexed command */ 5583 static inline void process_nonindexed_cmd(struct ctlr_info *h, 5584 u32 raw_tag) 5585 { 5586 u32 tag; 5587 struct CommandList *c = NULL; 5588 unsigned long flags; 5589 5590 tag = hpsa_tag_discard_error_bits(h, raw_tag); 5591 spin_lock_irqsave(&h->lock, flags); 5592 list_for_each_entry(c, &h->cmpQ, list) { 5593 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 5594 spin_unlock_irqrestore(&h->lock, flags); 5595 finish_cmd(c); 5596 return; 5597 } 5598 } 5599 spin_unlock_irqrestore(&h->lock, flags); 5600 bad_tag(h, h->nr_cmds + 1, raw_tag); 5601 } 5602 5603 /* Some controllers, like p400, will give us one interrupt 5604 * after a soft reset, even if we turned interrupts off. 5605 * Only need to check for this in the hpsa_xxx_discard_completions 5606 * functions. 5607 */ 5608 static int ignore_bogus_interrupt(struct ctlr_info *h) 5609 { 5610 if (likely(!reset_devices)) 5611 return 0; 5612 5613 if (likely(h->interrupts_enabled)) 5614 return 0; 5615 5616 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 5617 "(known firmware bug.) Ignoring.\n"); 5618 5619 return 1; 5620 } 5621 5622 /* 5623 * Convert &h->q[x] (passed to interrupt handlers) back to h. 5624 * Relies on (h-q[x] == x) being true for x such that 5625 * 0 <= x < MAX_REPLY_QUEUES. 5626 */ 5627 static struct ctlr_info *queue_to_hba(u8 *queue) 5628 { 5629 return container_of((queue - *queue), struct ctlr_info, q[0]); 5630 } 5631 5632 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) 5633 { 5634 struct ctlr_info *h = queue_to_hba(queue); 5635 u8 q = *(u8 *) queue; 5636 u32 raw_tag; 5637 5638 if (ignore_bogus_interrupt(h)) 5639 return IRQ_NONE; 5640 5641 if (interrupt_not_for_us(h)) 5642 return IRQ_NONE; 5643 h->last_intr_timestamp = get_jiffies_64(); 5644 while (interrupt_pending(h)) { 5645 raw_tag = get_next_completion(h, q); 5646 while (raw_tag != FIFO_EMPTY) 5647 raw_tag = next_command(h, q); 5648 } 5649 return IRQ_HANDLED; 5650 } 5651 5652 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) 5653 { 5654 struct ctlr_info *h = queue_to_hba(queue); 5655 u32 raw_tag; 5656 u8 q = *(u8 *) queue; 5657 5658 if (ignore_bogus_interrupt(h)) 5659 return IRQ_NONE; 5660 5661 h->last_intr_timestamp = get_jiffies_64(); 5662 raw_tag = get_next_completion(h, q); 5663 while (raw_tag != FIFO_EMPTY) 5664 raw_tag = next_command(h, q); 5665 return IRQ_HANDLED; 5666 } 5667 5668 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) 5669 { 5670 struct ctlr_info *h = queue_to_hba((u8 *) queue); 5671 u32 raw_tag; 5672 u8 q = *(u8 *) queue; 5673 5674 if (interrupt_not_for_us(h)) 5675 return IRQ_NONE; 5676 h->last_intr_timestamp = get_jiffies_64(); 5677 while (interrupt_pending(h)) { 5678 raw_tag = get_next_completion(h, q); 5679 while (raw_tag != FIFO_EMPTY) { 5680 if (likely(hpsa_tag_contains_index(raw_tag))) 5681 process_indexed_cmd(h, raw_tag); 5682 else 5683 process_nonindexed_cmd(h, raw_tag); 5684 raw_tag = next_command(h, q); 5685 } 5686 } 5687 return IRQ_HANDLED; 5688 } 5689 5690 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) 5691 { 5692 struct ctlr_info *h = queue_to_hba(queue); 5693 u32 raw_tag; 5694 u8 q = *(u8 *) queue; 5695 5696 h->last_intr_timestamp = get_jiffies_64(); 5697 raw_tag = get_next_completion(h, q); 5698 while (raw_tag != FIFO_EMPTY) { 5699 if (likely(hpsa_tag_contains_index(raw_tag))) 5700 process_indexed_cmd(h, raw_tag); 5701 else 5702 process_nonindexed_cmd(h, raw_tag); 5703 raw_tag = next_command(h, q); 5704 } 5705 return IRQ_HANDLED; 5706 } 5707 5708 /* Send a message CDB to the firmware. Careful, this only works 5709 * in simple mode, not performant mode due to the tag lookup. 5710 * We only ever use this immediately after a controller reset. 5711 */ 5712 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 5713 unsigned char type) 5714 { 5715 struct Command { 5716 struct CommandListHeader CommandHeader; 5717 struct RequestBlock Request; 5718 struct ErrDescriptor ErrorDescriptor; 5719 }; 5720 struct Command *cmd; 5721 static const size_t cmd_sz = sizeof(*cmd) + 5722 sizeof(cmd->ErrorDescriptor); 5723 dma_addr_t paddr64; 5724 uint32_t paddr32, tag; 5725 void __iomem *vaddr; 5726 int i, err; 5727 5728 vaddr = pci_ioremap_bar(pdev, 0); 5729 if (vaddr == NULL) 5730 return -ENOMEM; 5731 5732 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 5733 * CCISS commands, so they must be allocated from the lower 4GiB of 5734 * memory. 5735 */ 5736 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5737 if (err) { 5738 iounmap(vaddr); 5739 return -ENOMEM; 5740 } 5741 5742 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 5743 if (cmd == NULL) { 5744 iounmap(vaddr); 5745 return -ENOMEM; 5746 } 5747 5748 /* This must fit, because of the 32-bit consistent DMA mask. Also, 5749 * although there's no guarantee, we assume that the address is at 5750 * least 4-byte aligned (most likely, it's page-aligned). 5751 */ 5752 paddr32 = paddr64; 5753 5754 cmd->CommandHeader.ReplyQueue = 0; 5755 cmd->CommandHeader.SGList = 0; 5756 cmd->CommandHeader.SGTotal = 0; 5757 cmd->CommandHeader.Tag.lower = paddr32; 5758 cmd->CommandHeader.Tag.upper = 0; 5759 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 5760 5761 cmd->Request.CDBLen = 16; 5762 cmd->Request.Type.Type = TYPE_MSG; 5763 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 5764 cmd->Request.Type.Direction = XFER_NONE; 5765 cmd->Request.Timeout = 0; /* Don't time out */ 5766 cmd->Request.CDB[0] = opcode; 5767 cmd->Request.CDB[1] = type; 5768 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 5769 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); 5770 cmd->ErrorDescriptor.Addr.upper = 0; 5771 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); 5772 5773 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 5774 5775 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 5776 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 5777 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) 5778 break; 5779 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 5780 } 5781 5782 iounmap(vaddr); 5783 5784 /* we leak the DMA buffer here ... no choice since the controller could 5785 * still complete the command. 5786 */ 5787 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 5788 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 5789 opcode, type); 5790 return -ETIMEDOUT; 5791 } 5792 5793 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 5794 5795 if (tag & HPSA_ERROR_BIT) { 5796 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 5797 opcode, type); 5798 return -EIO; 5799 } 5800 5801 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 5802 opcode, type); 5803 return 0; 5804 } 5805 5806 #define hpsa_noop(p) hpsa_message(p, 3, 0) 5807 5808 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 5809 void * __iomem vaddr, u32 use_doorbell) 5810 { 5811 u16 pmcsr; 5812 int pos; 5813 5814 if (use_doorbell) { 5815 /* For everything after the P600, the PCI power state method 5816 * of resetting the controller doesn't work, so we have this 5817 * other way using the doorbell register. 5818 */ 5819 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 5820 writel(use_doorbell, vaddr + SA5_DOORBELL); 5821 5822 /* PMC hardware guys tell us we need a 5 second delay after 5823 * doorbell reset and before any attempt to talk to the board 5824 * at all to ensure that this actually works and doesn't fall 5825 * over in some weird corner cases. 5826 */ 5827 msleep(5000); 5828 } else { /* Try to do it the PCI power state way */ 5829 5830 /* Quoting from the Open CISS Specification: "The Power 5831 * Management Control/Status Register (CSR) controls the power 5832 * state of the device. The normal operating state is D0, 5833 * CSR=00h. The software off state is D3, CSR=03h. To reset 5834 * the controller, place the interface device in D3 then to D0, 5835 * this causes a secondary PCI reset which will reset the 5836 * controller." */ 5837 5838 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 5839 if (pos == 0) { 5840 dev_err(&pdev->dev, 5841 "hpsa_reset_controller: " 5842 "PCI PM not supported\n"); 5843 return -ENODEV; 5844 } 5845 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 5846 /* enter the D3hot power management state */ 5847 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 5848 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 5849 pmcsr |= PCI_D3hot; 5850 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 5851 5852 msleep(500); 5853 5854 /* enter the D0 power management state */ 5855 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 5856 pmcsr |= PCI_D0; 5857 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 5858 5859 /* 5860 * The P600 requires a small delay when changing states. 5861 * Otherwise we may think the board did not reset and we bail. 5862 * This for kdump only and is particular to the P600. 5863 */ 5864 msleep(500); 5865 } 5866 return 0; 5867 } 5868 5869 static void init_driver_version(char *driver_version, int len) 5870 { 5871 memset(driver_version, 0, len); 5872 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); 5873 } 5874 5875 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) 5876 { 5877 char *driver_version; 5878 int i, size = sizeof(cfgtable->driver_version); 5879 5880 driver_version = kmalloc(size, GFP_KERNEL); 5881 if (!driver_version) 5882 return -ENOMEM; 5883 5884 init_driver_version(driver_version, size); 5885 for (i = 0; i < size; i++) 5886 writeb(driver_version[i], &cfgtable->driver_version[i]); 5887 kfree(driver_version); 5888 return 0; 5889 } 5890 5891 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, 5892 unsigned char *driver_ver) 5893 { 5894 int i; 5895 5896 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 5897 driver_ver[i] = readb(&cfgtable->driver_version[i]); 5898 } 5899 5900 static int controller_reset_failed(struct CfgTable __iomem *cfgtable) 5901 { 5902 5903 char *driver_ver, *old_driver_ver; 5904 int rc, size = sizeof(cfgtable->driver_version); 5905 5906 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 5907 if (!old_driver_ver) 5908 return -ENOMEM; 5909 driver_ver = old_driver_ver + size; 5910 5911 /* After a reset, the 32 bytes of "driver version" in the cfgtable 5912 * should have been changed, otherwise we know the reset failed. 5913 */ 5914 init_driver_version(old_driver_ver, size); 5915 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 5916 rc = !memcmp(driver_ver, old_driver_ver, size); 5917 kfree(old_driver_ver); 5918 return rc; 5919 } 5920 /* This does a hard reset of the controller using PCI power management 5921 * states or the using the doorbell register. 5922 */ 5923 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 5924 { 5925 u64 cfg_offset; 5926 u32 cfg_base_addr; 5927 u64 cfg_base_addr_index; 5928 void __iomem *vaddr; 5929 unsigned long paddr; 5930 u32 misc_fw_support; 5931 int rc; 5932 struct CfgTable __iomem *cfgtable; 5933 u32 use_doorbell; 5934 u32 board_id; 5935 u16 command_register; 5936 5937 /* For controllers as old as the P600, this is very nearly 5938 * the same thing as 5939 * 5940 * pci_save_state(pci_dev); 5941 * pci_set_power_state(pci_dev, PCI_D3hot); 5942 * pci_set_power_state(pci_dev, PCI_D0); 5943 * pci_restore_state(pci_dev); 5944 * 5945 * For controllers newer than the P600, the pci power state 5946 * method of resetting doesn't work so we have another way 5947 * using the doorbell register. 5948 */ 5949 5950 rc = hpsa_lookup_board_id(pdev, &board_id); 5951 if (rc < 0 || !ctlr_is_resettable(board_id)) { 5952 dev_warn(&pdev->dev, "Not resetting device.\n"); 5953 return -ENODEV; 5954 } 5955 5956 /* if controller is soft- but not hard resettable... */ 5957 if (!ctlr_is_hard_resettable(board_id)) 5958 return -ENOTSUPP; /* try soft reset later. */ 5959 5960 /* Save the PCI command register */ 5961 pci_read_config_word(pdev, 4, &command_register); 5962 /* Turn the board off. This is so that later pci_restore_state() 5963 * won't turn the board on before the rest of config space is ready. 5964 */ 5965 pci_disable_device(pdev); 5966 pci_save_state(pdev); 5967 5968 /* find the first memory BAR, so we can find the cfg table */ 5969 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 5970 if (rc) 5971 return rc; 5972 vaddr = remap_pci_mem(paddr, 0x250); 5973 if (!vaddr) 5974 return -ENOMEM; 5975 5976 /* find cfgtable in order to check if reset via doorbell is supported */ 5977 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 5978 &cfg_base_addr_index, &cfg_offset); 5979 if (rc) 5980 goto unmap_vaddr; 5981 cfgtable = remap_pci_mem(pci_resource_start(pdev, 5982 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 5983 if (!cfgtable) { 5984 rc = -ENOMEM; 5985 goto unmap_vaddr; 5986 } 5987 rc = write_driver_ver_to_cfgtable(cfgtable); 5988 if (rc) 5989 goto unmap_vaddr; 5990 5991 /* If reset via doorbell register is supported, use that. 5992 * There are two such methods. Favor the newest method. 5993 */ 5994 misc_fw_support = readl(&cfgtable->misc_fw_support); 5995 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 5996 if (use_doorbell) { 5997 use_doorbell = DOORBELL_CTLR_RESET2; 5998 } else { 5999 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 6000 if (use_doorbell) { 6001 dev_warn(&pdev->dev, "Soft reset not supported. " 6002 "Firmware update is required.\n"); 6003 rc = -ENOTSUPP; /* try soft reset */ 6004 goto unmap_cfgtable; 6005 } 6006 } 6007 6008 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 6009 if (rc) 6010 goto unmap_cfgtable; 6011 6012 pci_restore_state(pdev); 6013 rc = pci_enable_device(pdev); 6014 if (rc) { 6015 dev_warn(&pdev->dev, "failed to enable device.\n"); 6016 goto unmap_cfgtable; 6017 } 6018 pci_write_config_word(pdev, 4, command_register); 6019 6020 /* Some devices (notably the HP Smart Array 5i Controller) 6021 need a little pause here */ 6022 msleep(HPSA_POST_RESET_PAUSE_MSECS); 6023 6024 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 6025 if (rc) { 6026 dev_warn(&pdev->dev, 6027 "failed waiting for board to become ready " 6028 "after hard reset\n"); 6029 goto unmap_cfgtable; 6030 } 6031 6032 rc = controller_reset_failed(vaddr); 6033 if (rc < 0) 6034 goto unmap_cfgtable; 6035 if (rc) { 6036 dev_warn(&pdev->dev, "Unable to successfully reset " 6037 "controller. Will try soft reset.\n"); 6038 rc = -ENOTSUPP; 6039 } else { 6040 dev_info(&pdev->dev, "board ready after hard reset.\n"); 6041 } 6042 6043 unmap_cfgtable: 6044 iounmap(cfgtable); 6045 6046 unmap_vaddr: 6047 iounmap(vaddr); 6048 return rc; 6049 } 6050 6051 /* 6052 * We cannot read the structure directly, for portability we must use 6053 * the io functions. 6054 * This is for debug only. 6055 */ 6056 static void print_cfg_table(struct device *dev, struct CfgTable *tb) 6057 { 6058 #ifdef HPSA_DEBUG 6059 int i; 6060 char temp_name[17]; 6061 6062 dev_info(dev, "Controller Configuration information\n"); 6063 dev_info(dev, "------------------------------------\n"); 6064 for (i = 0; i < 4; i++) 6065 temp_name[i] = readb(&(tb->Signature[i])); 6066 temp_name[4] = '\0'; 6067 dev_info(dev, " Signature = %s\n", temp_name); 6068 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 6069 dev_info(dev, " Transport methods supported = 0x%x\n", 6070 readl(&(tb->TransportSupport))); 6071 dev_info(dev, " Transport methods active = 0x%x\n", 6072 readl(&(tb->TransportActive))); 6073 dev_info(dev, " Requested transport Method = 0x%x\n", 6074 readl(&(tb->HostWrite.TransportRequest))); 6075 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 6076 readl(&(tb->HostWrite.CoalIntDelay))); 6077 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 6078 readl(&(tb->HostWrite.CoalIntCount))); 6079 dev_info(dev, " Max outstanding commands = 0x%d\n", 6080 readl(&(tb->CmdsOutMax))); 6081 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 6082 for (i = 0; i < 16; i++) 6083 temp_name[i] = readb(&(tb->ServerName[i])); 6084 temp_name[16] = '\0'; 6085 dev_info(dev, " Server Name = %s\n", temp_name); 6086 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 6087 readl(&(tb->HeartBeat))); 6088 #endif /* HPSA_DEBUG */ 6089 } 6090 6091 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 6092 { 6093 int i, offset, mem_type, bar_type; 6094 6095 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 6096 return 0; 6097 offset = 0; 6098 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 6099 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 6100 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 6101 offset += 4; 6102 else { 6103 mem_type = pci_resource_flags(pdev, i) & 6104 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 6105 switch (mem_type) { 6106 case PCI_BASE_ADDRESS_MEM_TYPE_32: 6107 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 6108 offset += 4; /* 32 bit */ 6109 break; 6110 case PCI_BASE_ADDRESS_MEM_TYPE_64: 6111 offset += 8; 6112 break; 6113 default: /* reserved in PCI 2.2 */ 6114 dev_warn(&pdev->dev, 6115 "base address is invalid\n"); 6116 return -1; 6117 break; 6118 } 6119 } 6120 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 6121 return i + 1; 6122 } 6123 return -1; 6124 } 6125 6126 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 6127 * controllers that are capable. If not, we use IO-APIC mode. 6128 */ 6129 6130 static void hpsa_interrupt_mode(struct ctlr_info *h) 6131 { 6132 #ifdef CONFIG_PCI_MSI 6133 int err, i; 6134 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES]; 6135 6136 for (i = 0; i < MAX_REPLY_QUEUES; i++) { 6137 hpsa_msix_entries[i].vector = 0; 6138 hpsa_msix_entries[i].entry = i; 6139 } 6140 6141 /* Some boards advertise MSI but don't really support it */ 6142 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 6143 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 6144 goto default_int_mode; 6145 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 6146 dev_info(&h->pdev->dev, "MSIX\n"); 6147 h->msix_vector = MAX_REPLY_QUEUES; 6148 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 6149 h->msix_vector); 6150 if (err > 0) { 6151 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 6152 "available\n", err); 6153 h->msix_vector = err; 6154 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 6155 h->msix_vector); 6156 } 6157 if (!err) { 6158 for (i = 0; i < h->msix_vector; i++) 6159 h->intr[i] = hpsa_msix_entries[i].vector; 6160 return; 6161 } else { 6162 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", 6163 err); 6164 h->msix_vector = 0; 6165 goto default_int_mode; 6166 } 6167 } 6168 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 6169 dev_info(&h->pdev->dev, "MSI\n"); 6170 if (!pci_enable_msi(h->pdev)) 6171 h->msi_vector = 1; 6172 else 6173 dev_warn(&h->pdev->dev, "MSI init failed\n"); 6174 } 6175 default_int_mode: 6176 #endif /* CONFIG_PCI_MSI */ 6177 /* if we get here we're going to use the default interrupt mode */ 6178 h->intr[h->intr_mode] = h->pdev->irq; 6179 } 6180 6181 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 6182 { 6183 int i; 6184 u32 subsystem_vendor_id, subsystem_device_id; 6185 6186 subsystem_vendor_id = pdev->subsystem_vendor; 6187 subsystem_device_id = pdev->subsystem_device; 6188 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 6189 subsystem_vendor_id; 6190 6191 for (i = 0; i < ARRAY_SIZE(products); i++) 6192 if (*board_id == products[i].board_id) 6193 return i; 6194 6195 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 6196 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 6197 !hpsa_allow_any) { 6198 dev_warn(&pdev->dev, "unrecognized board ID: " 6199 "0x%08x, ignoring.\n", *board_id); 6200 return -ENODEV; 6201 } 6202 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 6203 } 6204 6205 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 6206 unsigned long *memory_bar) 6207 { 6208 int i; 6209 6210 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 6211 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 6212 /* addressing mode bits already removed */ 6213 *memory_bar = pci_resource_start(pdev, i); 6214 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 6215 *memory_bar); 6216 return 0; 6217 } 6218 dev_warn(&pdev->dev, "no memory BAR found\n"); 6219 return -ENODEV; 6220 } 6221 6222 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 6223 int wait_for_ready) 6224 { 6225 int i, iterations; 6226 u32 scratchpad; 6227 if (wait_for_ready) 6228 iterations = HPSA_BOARD_READY_ITERATIONS; 6229 else 6230 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 6231 6232 for (i = 0; i < iterations; i++) { 6233 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 6234 if (wait_for_ready) { 6235 if (scratchpad == HPSA_FIRMWARE_READY) 6236 return 0; 6237 } else { 6238 if (scratchpad != HPSA_FIRMWARE_READY) 6239 return 0; 6240 } 6241 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 6242 } 6243 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 6244 return -ENODEV; 6245 } 6246 6247 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 6248 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 6249 u64 *cfg_offset) 6250 { 6251 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 6252 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 6253 *cfg_base_addr &= (u32) 0x0000ffff; 6254 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 6255 if (*cfg_base_addr_index == -1) { 6256 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 6257 return -ENODEV; 6258 } 6259 return 0; 6260 } 6261 6262 static int hpsa_find_cfgtables(struct ctlr_info *h) 6263 { 6264 u64 cfg_offset; 6265 u32 cfg_base_addr; 6266 u64 cfg_base_addr_index; 6267 u32 trans_offset; 6268 int rc; 6269 6270 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 6271 &cfg_base_addr_index, &cfg_offset); 6272 if (rc) 6273 return rc; 6274 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 6275 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 6276 if (!h->cfgtable) 6277 return -ENOMEM; 6278 rc = write_driver_ver_to_cfgtable(h->cfgtable); 6279 if (rc) 6280 return rc; 6281 /* Find performant mode table. */ 6282 trans_offset = readl(&h->cfgtable->TransMethodOffset); 6283 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 6284 cfg_base_addr_index)+cfg_offset+trans_offset, 6285 sizeof(*h->transtable)); 6286 if (!h->transtable) 6287 return -ENOMEM; 6288 return 0; 6289 } 6290 6291 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 6292 { 6293 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 6294 6295 /* Limit commands in memory limited kdump scenario. */ 6296 if (reset_devices && h->max_commands > 32) 6297 h->max_commands = 32; 6298 6299 if (h->max_commands < 16) { 6300 dev_warn(&h->pdev->dev, "Controller reports " 6301 "max supported commands of %d, an obvious lie. " 6302 "Using 16. Ensure that firmware is up to date.\n", 6303 h->max_commands); 6304 h->max_commands = 16; 6305 } 6306 } 6307 6308 /* Interrogate the hardware for some limits: 6309 * max commands, max SG elements without chaining, and with chaining, 6310 * SG chain block size, etc. 6311 */ 6312 static void hpsa_find_board_params(struct ctlr_info *h) 6313 { 6314 hpsa_get_max_perf_mode_cmds(h); 6315 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 6316 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 6317 h->fw_support = readl(&(h->cfgtable->misc_fw_support)); 6318 /* 6319 * Limit in-command s/g elements to 32 save dma'able memory. 6320 * Howvever spec says if 0, use 31 6321 */ 6322 h->max_cmd_sg_entries = 31; 6323 if (h->maxsgentries > 512) { 6324 h->max_cmd_sg_entries = 32; 6325 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 6326 h->maxsgentries--; /* save one for chain pointer */ 6327 } else { 6328 h->maxsgentries = 31; /* default to traditional values */ 6329 h->chainsize = 0; 6330 } 6331 6332 /* Find out what task management functions are supported and cache */ 6333 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); 6334 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) 6335 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); 6336 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 6337 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); 6338 } 6339 6340 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 6341 { 6342 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { 6343 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 6344 return false; 6345 } 6346 return true; 6347 } 6348 6349 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) 6350 { 6351 u32 driver_support; 6352 6353 #ifdef CONFIG_X86 6354 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 6355 driver_support = readl(&(h->cfgtable->driver_support)); 6356 driver_support |= ENABLE_SCSI_PREFETCH; 6357 #endif 6358 driver_support |= ENABLE_UNIT_ATTN; 6359 writel(driver_support, &(h->cfgtable->driver_support)); 6360 } 6361 6362 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 6363 * in a prefetch beyond physical memory. 6364 */ 6365 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 6366 { 6367 u32 dma_prefetch; 6368 6369 if (h->board_id != 0x3225103C) 6370 return; 6371 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 6372 dma_prefetch |= 0x8000; 6373 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 6374 } 6375 6376 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) 6377 { 6378 int i; 6379 u32 doorbell_value; 6380 unsigned long flags; 6381 /* wait until the clear_event_notify bit 6 is cleared by controller. */ 6382 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 6383 spin_lock_irqsave(&h->lock, flags); 6384 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 6385 spin_unlock_irqrestore(&h->lock, flags); 6386 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) 6387 break; 6388 /* delay and try again */ 6389 msleep(20); 6390 } 6391 } 6392 6393 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 6394 { 6395 int i; 6396 u32 doorbell_value; 6397 unsigned long flags; 6398 6399 /* under certain very rare conditions, this can take awhile. 6400 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 6401 * as we enter this code.) 6402 */ 6403 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 6404 spin_lock_irqsave(&h->lock, flags); 6405 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 6406 spin_unlock_irqrestore(&h->lock, flags); 6407 if (!(doorbell_value & CFGTBL_ChangeReq)) 6408 break; 6409 /* delay and try again */ 6410 usleep_range(10000, 20000); 6411 } 6412 } 6413 6414 static int hpsa_enter_simple_mode(struct ctlr_info *h) 6415 { 6416 u32 trans_support; 6417 6418 trans_support = readl(&(h->cfgtable->TransportSupport)); 6419 if (!(trans_support & SIMPLE_MODE)) 6420 return -ENOTSUPP; 6421 6422 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 6423 6424 /* Update the field, and then ring the doorbell */ 6425 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 6426 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 6427 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6428 hpsa_wait_for_mode_change_ack(h); 6429 print_cfg_table(&h->pdev->dev, h->cfgtable); 6430 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) 6431 goto error; 6432 h->transMethod = CFGTBL_Trans_Simple; 6433 return 0; 6434 error: 6435 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); 6436 return -ENODEV; 6437 } 6438 6439 static int hpsa_pci_init(struct ctlr_info *h) 6440 { 6441 int prod_index, err; 6442 6443 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 6444 if (prod_index < 0) 6445 return -ENODEV; 6446 h->product_name = products[prod_index].product_name; 6447 h->access = *(products[prod_index].access); 6448 6449 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 6450 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 6451 6452 err = pci_enable_device(h->pdev); 6453 if (err) { 6454 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 6455 return err; 6456 } 6457 6458 /* Enable bus mastering (pci_disable_device may disable this) */ 6459 pci_set_master(h->pdev); 6460 6461 err = pci_request_regions(h->pdev, HPSA); 6462 if (err) { 6463 dev_err(&h->pdev->dev, 6464 "cannot obtain PCI resources, aborting\n"); 6465 return err; 6466 } 6467 hpsa_interrupt_mode(h); 6468 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 6469 if (err) 6470 goto err_out_free_res; 6471 h->vaddr = remap_pci_mem(h->paddr, 0x250); 6472 if (!h->vaddr) { 6473 err = -ENOMEM; 6474 goto err_out_free_res; 6475 } 6476 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 6477 if (err) 6478 goto err_out_free_res; 6479 err = hpsa_find_cfgtables(h); 6480 if (err) 6481 goto err_out_free_res; 6482 hpsa_find_board_params(h); 6483 6484 if (!hpsa_CISS_signature_present(h)) { 6485 err = -ENODEV; 6486 goto err_out_free_res; 6487 } 6488 hpsa_set_driver_support_bits(h); 6489 hpsa_p600_dma_prefetch_quirk(h); 6490 err = hpsa_enter_simple_mode(h); 6491 if (err) 6492 goto err_out_free_res; 6493 return 0; 6494 6495 err_out_free_res: 6496 if (h->transtable) 6497 iounmap(h->transtable); 6498 if (h->cfgtable) 6499 iounmap(h->cfgtable); 6500 if (h->vaddr) 6501 iounmap(h->vaddr); 6502 pci_disable_device(h->pdev); 6503 pci_release_regions(h->pdev); 6504 return err; 6505 } 6506 6507 static void hpsa_hba_inquiry(struct ctlr_info *h) 6508 { 6509 int rc; 6510 6511 #define HBA_INQUIRY_BYTE_COUNT 64 6512 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 6513 if (!h->hba_inquiry_data) 6514 return; 6515 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 6516 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 6517 if (rc != 0) { 6518 kfree(h->hba_inquiry_data); 6519 h->hba_inquiry_data = NULL; 6520 } 6521 } 6522 6523 static int hpsa_init_reset_devices(struct pci_dev *pdev) 6524 { 6525 int rc, i; 6526 6527 if (!reset_devices) 6528 return 0; 6529 6530 /* Reset the controller with a PCI power-cycle or via doorbell */ 6531 rc = hpsa_kdump_hard_reset_controller(pdev); 6532 6533 /* -ENOTSUPP here means we cannot reset the controller 6534 * but it's already (and still) up and running in 6535 * "performant mode". Or, it might be 640x, which can't reset 6536 * due to concerns about shared bbwc between 6402/6404 pair. 6537 */ 6538 if (rc == -ENOTSUPP) 6539 return rc; /* just try to do the kdump anyhow. */ 6540 if (rc) 6541 return -ENODEV; 6542 6543 /* Now try to get the controller to respond to a no-op */ 6544 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); 6545 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 6546 if (hpsa_noop(pdev) == 0) 6547 break; 6548 else 6549 dev_warn(&pdev->dev, "no-op failed%s\n", 6550 (i < 11 ? "; re-trying" : "")); 6551 } 6552 return 0; 6553 } 6554 6555 static int hpsa_allocate_cmd_pool(struct ctlr_info *h) 6556 { 6557 h->cmd_pool_bits = kzalloc( 6558 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 6559 sizeof(unsigned long), GFP_KERNEL); 6560 h->cmd_pool = pci_alloc_consistent(h->pdev, 6561 h->nr_cmds * sizeof(*h->cmd_pool), 6562 &(h->cmd_pool_dhandle)); 6563 h->errinfo_pool = pci_alloc_consistent(h->pdev, 6564 h->nr_cmds * sizeof(*h->errinfo_pool), 6565 &(h->errinfo_pool_dhandle)); 6566 if ((h->cmd_pool_bits == NULL) 6567 || (h->cmd_pool == NULL) 6568 || (h->errinfo_pool == NULL)) { 6569 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 6570 return -ENOMEM; 6571 } 6572 return 0; 6573 } 6574 6575 static void hpsa_free_cmd_pool(struct ctlr_info *h) 6576 { 6577 kfree(h->cmd_pool_bits); 6578 if (h->cmd_pool) 6579 pci_free_consistent(h->pdev, 6580 h->nr_cmds * sizeof(struct CommandList), 6581 h->cmd_pool, h->cmd_pool_dhandle); 6582 if (h->ioaccel2_cmd_pool) 6583 pci_free_consistent(h->pdev, 6584 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 6585 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 6586 if (h->errinfo_pool) 6587 pci_free_consistent(h->pdev, 6588 h->nr_cmds * sizeof(struct ErrorInfo), 6589 h->errinfo_pool, 6590 h->errinfo_pool_dhandle); 6591 if (h->ioaccel_cmd_pool) 6592 pci_free_consistent(h->pdev, 6593 h->nr_cmds * sizeof(struct io_accel1_cmd), 6594 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 6595 } 6596 6597 static int hpsa_request_irq(struct ctlr_info *h, 6598 irqreturn_t (*msixhandler)(int, void *), 6599 irqreturn_t (*intxhandler)(int, void *)) 6600 { 6601 int rc, i; 6602 6603 /* 6604 * initialize h->q[x] = x so that interrupt handlers know which 6605 * queue to process. 6606 */ 6607 for (i = 0; i < MAX_REPLY_QUEUES; i++) 6608 h->q[i] = (u8) i; 6609 6610 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { 6611 /* If performant mode and MSI-X, use multiple reply queues */ 6612 for (i = 0; i < h->msix_vector; i++) 6613 rc = request_irq(h->intr[i], msixhandler, 6614 0, h->devname, 6615 &h->q[i]); 6616 } else { 6617 /* Use single reply pool */ 6618 if (h->msix_vector > 0 || h->msi_vector) { 6619 rc = request_irq(h->intr[h->intr_mode], 6620 msixhandler, 0, h->devname, 6621 &h->q[h->intr_mode]); 6622 } else { 6623 rc = request_irq(h->intr[h->intr_mode], 6624 intxhandler, IRQF_SHARED, h->devname, 6625 &h->q[h->intr_mode]); 6626 } 6627 } 6628 if (rc) { 6629 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 6630 h->intr[h->intr_mode], h->devname); 6631 return -ENODEV; 6632 } 6633 return 0; 6634 } 6635 6636 static int hpsa_kdump_soft_reset(struct ctlr_info *h) 6637 { 6638 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, 6639 HPSA_RESET_TYPE_CONTROLLER)) { 6640 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 6641 return -EIO; 6642 } 6643 6644 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 6645 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 6646 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 6647 return -1; 6648 } 6649 6650 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 6651 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 6652 dev_warn(&h->pdev->dev, "Board failed to become ready " 6653 "after soft reset.\n"); 6654 return -1; 6655 } 6656 6657 return 0; 6658 } 6659 6660 static void free_irqs(struct ctlr_info *h) 6661 { 6662 int i; 6663 6664 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 6665 /* Single reply queue, only one irq to free */ 6666 i = h->intr_mode; 6667 free_irq(h->intr[i], &h->q[i]); 6668 return; 6669 } 6670 6671 for (i = 0; i < h->msix_vector; i++) 6672 free_irq(h->intr[i], &h->q[i]); 6673 } 6674 6675 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) 6676 { 6677 free_irqs(h); 6678 #ifdef CONFIG_PCI_MSI 6679 if (h->msix_vector) { 6680 if (h->pdev->msix_enabled) 6681 pci_disable_msix(h->pdev); 6682 } else if (h->msi_vector) { 6683 if (h->pdev->msi_enabled) 6684 pci_disable_msi(h->pdev); 6685 } 6686 #endif /* CONFIG_PCI_MSI */ 6687 } 6688 6689 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 6690 { 6691 hpsa_free_irqs_and_disable_msix(h); 6692 hpsa_free_sg_chain_blocks(h); 6693 hpsa_free_cmd_pool(h); 6694 kfree(h->ioaccel1_blockFetchTable); 6695 kfree(h->blockFetchTable); 6696 pci_free_consistent(h->pdev, h->reply_pool_size, 6697 h->reply_pool, h->reply_pool_dhandle); 6698 if (h->vaddr) 6699 iounmap(h->vaddr); 6700 if (h->transtable) 6701 iounmap(h->transtable); 6702 if (h->cfgtable) 6703 iounmap(h->cfgtable); 6704 pci_release_regions(h->pdev); 6705 kfree(h); 6706 } 6707 6708 /* Called when controller lockup detected. */ 6709 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) 6710 { 6711 struct CommandList *c = NULL; 6712 6713 assert_spin_locked(&h->lock); 6714 /* Mark all outstanding commands as failed and complete them. */ 6715 while (!list_empty(list)) { 6716 c = list_entry(list->next, struct CommandList, list); 6717 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 6718 finish_cmd(c); 6719 } 6720 } 6721 6722 static void controller_lockup_detected(struct ctlr_info *h) 6723 { 6724 unsigned long flags; 6725 6726 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6727 spin_lock_irqsave(&h->lock, flags); 6728 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6729 spin_unlock_irqrestore(&h->lock, flags); 6730 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 6731 h->lockup_detected); 6732 pci_disable_device(h->pdev); 6733 spin_lock_irqsave(&h->lock, flags); 6734 fail_all_cmds_on_list(h, &h->cmpQ); 6735 fail_all_cmds_on_list(h, &h->reqQ); 6736 spin_unlock_irqrestore(&h->lock, flags); 6737 } 6738 6739 static void detect_controller_lockup(struct ctlr_info *h) 6740 { 6741 u64 now; 6742 u32 heartbeat; 6743 unsigned long flags; 6744 6745 now = get_jiffies_64(); 6746 /* If we've received an interrupt recently, we're ok. */ 6747 if (time_after64(h->last_intr_timestamp + 6748 (h->heartbeat_sample_interval), now)) 6749 return; 6750 6751 /* 6752 * If we've already checked the heartbeat recently, we're ok. 6753 * This could happen if someone sends us a signal. We 6754 * otherwise don't care about signals in this thread. 6755 */ 6756 if (time_after64(h->last_heartbeat_timestamp + 6757 (h->heartbeat_sample_interval), now)) 6758 return; 6759 6760 /* If heartbeat has not changed since we last looked, we're not ok. */ 6761 spin_lock_irqsave(&h->lock, flags); 6762 heartbeat = readl(&h->cfgtable->HeartBeat); 6763 spin_unlock_irqrestore(&h->lock, flags); 6764 if (h->last_heartbeat == heartbeat) { 6765 controller_lockup_detected(h); 6766 return; 6767 } 6768 6769 /* We're ok. */ 6770 h->last_heartbeat = heartbeat; 6771 h->last_heartbeat_timestamp = now; 6772 } 6773 6774 static void hpsa_ack_ctlr_events(struct ctlr_info *h) 6775 { 6776 int i; 6777 char *event_type; 6778 6779 /* Clear the driver-requested rescan flag */ 6780 h->drv_req_rescan = 0; 6781 6782 /* Ask the controller to clear the events we're handling. */ 6783 if ((h->transMethod & (CFGTBL_Trans_io_accel1 6784 | CFGTBL_Trans_io_accel2)) && 6785 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || 6786 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { 6787 6788 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) 6789 event_type = "state change"; 6790 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) 6791 event_type = "configuration change"; 6792 /* Stop sending new RAID offload reqs via the IO accelerator */ 6793 scsi_block_requests(h->scsi_host); 6794 for (i = 0; i < h->ndevices; i++) 6795 h->dev[i]->offload_enabled = 0; 6796 hpsa_drain_accel_commands(h); 6797 /* Set 'accelerator path config change' bit */ 6798 dev_warn(&h->pdev->dev, 6799 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", 6800 h->events, event_type); 6801 writel(h->events, &(h->cfgtable->clear_event_notify)); 6802 /* Set the "clear event notify field update" bit 6 */ 6803 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6804 /* Wait until ctlr clears 'clear event notify field', bit 6 */ 6805 hpsa_wait_for_clear_event_notify_ack(h); 6806 scsi_unblock_requests(h->scsi_host); 6807 } else { 6808 /* Acknowledge controller notification events. */ 6809 writel(h->events, &(h->cfgtable->clear_event_notify)); 6810 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6811 hpsa_wait_for_clear_event_notify_ack(h); 6812 #if 0 6813 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6814 hpsa_wait_for_mode_change_ack(h); 6815 #endif 6816 } 6817 return; 6818 } 6819 6820 /* Check a register on the controller to see if there are configuration 6821 * changes (added/changed/removed logical drives, etc.) which mean that 6822 * we should rescan the controller for devices. 6823 * Also check flag for driver-initiated rescan. 6824 */ 6825 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) 6826 { 6827 if (h->drv_req_rescan) 6828 return 1; 6829 6830 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 6831 return 0; 6832 6833 h->events = readl(&(h->cfgtable->event_notify)); 6834 return h->events & RESCAN_REQUIRED_EVENT_BITS; 6835 } 6836 6837 /* 6838 * Check if any of the offline devices have become ready 6839 */ 6840 static int hpsa_offline_devices_ready(struct ctlr_info *h) 6841 { 6842 unsigned long flags; 6843 struct offline_device_entry *d; 6844 struct list_head *this, *tmp; 6845 6846 spin_lock_irqsave(&h->offline_device_lock, flags); 6847 list_for_each_safe(this, tmp, &h->offline_device_list) { 6848 d = list_entry(this, struct offline_device_entry, 6849 offline_list); 6850 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6851 if (!hpsa_volume_offline(h, d->scsi3addr)) 6852 return 1; 6853 spin_lock_irqsave(&h->offline_device_lock, flags); 6854 } 6855 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6856 return 0; 6857 } 6858 6859 6860 static void hpsa_monitor_ctlr_worker(struct work_struct *work) 6861 { 6862 unsigned long flags; 6863 struct ctlr_info *h = container_of(to_delayed_work(work), 6864 struct ctlr_info, monitor_ctlr_work); 6865 detect_controller_lockup(h); 6866 if (h->lockup_detected) 6867 return; 6868 6869 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 6870 scsi_host_get(h->scsi_host); 6871 h->drv_req_rescan = 0; 6872 hpsa_ack_ctlr_events(h); 6873 hpsa_scan_start(h->scsi_host); 6874 scsi_host_put(h->scsi_host); 6875 } 6876 6877 spin_lock_irqsave(&h->lock, flags); 6878 if (h->remove_in_progress) { 6879 spin_unlock_irqrestore(&h->lock, flags); 6880 return; 6881 } 6882 schedule_delayed_work(&h->monitor_ctlr_work, 6883 h->heartbeat_sample_interval); 6884 spin_unlock_irqrestore(&h->lock, flags); 6885 } 6886 6887 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6888 { 6889 int dac, rc; 6890 struct ctlr_info *h; 6891 int try_soft_reset = 0; 6892 unsigned long flags; 6893 6894 if (number_of_controllers == 0) 6895 printk(KERN_INFO DRIVER_NAME "\n"); 6896 6897 rc = hpsa_init_reset_devices(pdev); 6898 if (rc) { 6899 if (rc != -ENOTSUPP) 6900 return rc; 6901 /* If the reset fails in a particular way (it has no way to do 6902 * a proper hard reset, so returns -ENOTSUPP) we can try to do 6903 * a soft reset once we get the controller configured up to the 6904 * point that it can accept a command. 6905 */ 6906 try_soft_reset = 1; 6907 rc = 0; 6908 } 6909 6910 reinit_after_soft_reset: 6911 6912 /* Command structures must be aligned on a 32-byte boundary because 6913 * the 5 lower bits of the address are used by the hardware. and by 6914 * the driver. See comments in hpsa.h for more info. 6915 */ 6916 #define COMMANDLIST_ALIGNMENT 128 6917 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 6918 h = kzalloc(sizeof(*h), GFP_KERNEL); 6919 if (!h) 6920 return -ENOMEM; 6921 6922 h->pdev = pdev; 6923 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 6924 INIT_LIST_HEAD(&h->cmpQ); 6925 INIT_LIST_HEAD(&h->reqQ); 6926 INIT_LIST_HEAD(&h->offline_device_list); 6927 spin_lock_init(&h->lock); 6928 spin_lock_init(&h->offline_device_lock); 6929 spin_lock_init(&h->scan_lock); 6930 spin_lock_init(&h->passthru_count_lock); 6931 rc = hpsa_pci_init(h); 6932 if (rc != 0) 6933 goto clean1; 6934 6935 sprintf(h->devname, HPSA "%d", number_of_controllers); 6936 h->ctlr = number_of_controllers; 6937 number_of_controllers++; 6938 6939 /* configure PCI DMA stuff */ 6940 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 6941 if (rc == 0) { 6942 dac = 1; 6943 } else { 6944 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6945 if (rc == 0) { 6946 dac = 0; 6947 } else { 6948 dev_err(&pdev->dev, "no suitable DMA available\n"); 6949 goto clean1; 6950 } 6951 } 6952 6953 /* make sure the board interrupts are off */ 6954 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6955 6956 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 6957 goto clean2; 6958 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 6959 h->devname, pdev->device, 6960 h->intr[h->intr_mode], dac ? "" : " not"); 6961 if (hpsa_allocate_cmd_pool(h)) 6962 goto clean4; 6963 if (hpsa_allocate_sg_chain_blocks(h)) 6964 goto clean4; 6965 init_waitqueue_head(&h->scan_wait_queue); 6966 h->scan_finished = 1; /* no scan currently in progress */ 6967 6968 pci_set_drvdata(pdev, h); 6969 h->ndevices = 0; 6970 h->hba_mode_enabled = 0; 6971 h->scsi_host = NULL; 6972 spin_lock_init(&h->devlock); 6973 hpsa_put_ctlr_into_performant_mode(h); 6974 6975 /* At this point, the controller is ready to take commands. 6976 * Now, if reset_devices and the hard reset didn't work, try 6977 * the soft reset and see if that works. 6978 */ 6979 if (try_soft_reset) { 6980 6981 /* This is kind of gross. We may or may not get a completion 6982 * from the soft reset command, and if we do, then the value 6983 * from the fifo may or may not be valid. So, we wait 10 secs 6984 * after the reset throwing away any completions we get during 6985 * that time. Unregister the interrupt handler and register 6986 * fake ones to scoop up any residual completions. 6987 */ 6988 spin_lock_irqsave(&h->lock, flags); 6989 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6990 spin_unlock_irqrestore(&h->lock, flags); 6991 free_irqs(h); 6992 rc = hpsa_request_irq(h, hpsa_msix_discard_completions, 6993 hpsa_intx_discard_completions); 6994 if (rc) { 6995 dev_warn(&h->pdev->dev, "Failed to request_irq after " 6996 "soft reset.\n"); 6997 goto clean4; 6998 } 6999 7000 rc = hpsa_kdump_soft_reset(h); 7001 if (rc) 7002 /* Neither hard nor soft reset worked, we're hosed. */ 7003 goto clean4; 7004 7005 dev_info(&h->pdev->dev, "Board READY.\n"); 7006 dev_info(&h->pdev->dev, 7007 "Waiting for stale completions to drain.\n"); 7008 h->access.set_intr_mask(h, HPSA_INTR_ON); 7009 msleep(10000); 7010 h->access.set_intr_mask(h, HPSA_INTR_OFF); 7011 7012 rc = controller_reset_failed(h->cfgtable); 7013 if (rc) 7014 dev_info(&h->pdev->dev, 7015 "Soft reset appears to have failed.\n"); 7016 7017 /* since the controller's reset, we have to go back and re-init 7018 * everything. Easiest to just forget what we've done and do it 7019 * all over again. 7020 */ 7021 hpsa_undo_allocations_after_kdump_soft_reset(h); 7022 try_soft_reset = 0; 7023 if (rc) 7024 /* don't go to clean4, we already unallocated */ 7025 return -ENODEV; 7026 7027 goto reinit_after_soft_reset; 7028 } 7029 7030 /* Enable Accelerated IO path at driver layer */ 7031 h->acciopath_status = 1; 7032 7033 h->drv_req_rescan = 0; 7034 7035 /* Turn the interrupts on so we can service requests */ 7036 h->access.set_intr_mask(h, HPSA_INTR_ON); 7037 7038 hpsa_hba_inquiry(h); 7039 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 7040 7041 /* Monitor the controller for firmware lockups */ 7042 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 7043 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); 7044 schedule_delayed_work(&h->monitor_ctlr_work, 7045 h->heartbeat_sample_interval); 7046 return 0; 7047 7048 clean4: 7049 hpsa_free_sg_chain_blocks(h); 7050 hpsa_free_cmd_pool(h); 7051 free_irqs(h); 7052 clean2: 7053 clean1: 7054 kfree(h); 7055 return rc; 7056 } 7057 7058 static void hpsa_flush_cache(struct ctlr_info *h) 7059 { 7060 char *flush_buf; 7061 struct CommandList *c; 7062 unsigned long flags; 7063 7064 /* Don't bother trying to flush the cache if locked up */ 7065 spin_lock_irqsave(&h->lock, flags); 7066 if (unlikely(h->lockup_detected)) { 7067 spin_unlock_irqrestore(&h->lock, flags); 7068 return; 7069 } 7070 spin_unlock_irqrestore(&h->lock, flags); 7071 7072 flush_buf = kzalloc(4, GFP_KERNEL); 7073 if (!flush_buf) 7074 return; 7075 7076 c = cmd_special_alloc(h); 7077 if (!c) { 7078 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 7079 goto out_of_memory; 7080 } 7081 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 7082 RAID_CTLR_LUNID, TYPE_CMD)) { 7083 goto out; 7084 } 7085 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 7086 if (c->err_info->CommandStatus != 0) 7087 out: 7088 dev_warn(&h->pdev->dev, 7089 "error flushing cache on controller\n"); 7090 cmd_special_free(h, c); 7091 out_of_memory: 7092 kfree(flush_buf); 7093 } 7094 7095 static void hpsa_shutdown(struct pci_dev *pdev) 7096 { 7097 struct ctlr_info *h; 7098 7099 h = pci_get_drvdata(pdev); 7100 /* Turn board interrupts off and send the flush cache command 7101 * sendcmd will turn off interrupt, and send the flush... 7102 * To write all data in the battery backed cache to disks 7103 */ 7104 hpsa_flush_cache(h); 7105 h->access.set_intr_mask(h, HPSA_INTR_OFF); 7106 hpsa_free_irqs_and_disable_msix(h); 7107 } 7108 7109 static void hpsa_free_device_info(struct ctlr_info *h) 7110 { 7111 int i; 7112 7113 for (i = 0; i < h->ndevices; i++) 7114 kfree(h->dev[i]); 7115 } 7116 7117 static void hpsa_remove_one(struct pci_dev *pdev) 7118 { 7119 struct ctlr_info *h; 7120 unsigned long flags; 7121 7122 if (pci_get_drvdata(pdev) == NULL) { 7123 dev_err(&pdev->dev, "unable to remove device\n"); 7124 return; 7125 } 7126 h = pci_get_drvdata(pdev); 7127 7128 /* Get rid of any controller monitoring work items */ 7129 spin_lock_irqsave(&h->lock, flags); 7130 h->remove_in_progress = 1; 7131 cancel_delayed_work(&h->monitor_ctlr_work); 7132 spin_unlock_irqrestore(&h->lock, flags); 7133 7134 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 7135 hpsa_shutdown(pdev); 7136 iounmap(h->vaddr); 7137 iounmap(h->transtable); 7138 iounmap(h->cfgtable); 7139 hpsa_free_device_info(h); 7140 hpsa_free_sg_chain_blocks(h); 7141 pci_free_consistent(h->pdev, 7142 h->nr_cmds * sizeof(struct CommandList), 7143 h->cmd_pool, h->cmd_pool_dhandle); 7144 pci_free_consistent(h->pdev, 7145 h->nr_cmds * sizeof(struct ErrorInfo), 7146 h->errinfo_pool, h->errinfo_pool_dhandle); 7147 pci_free_consistent(h->pdev, h->reply_pool_size, 7148 h->reply_pool, h->reply_pool_dhandle); 7149 kfree(h->cmd_pool_bits); 7150 kfree(h->blockFetchTable); 7151 kfree(h->ioaccel1_blockFetchTable); 7152 kfree(h->ioaccel2_blockFetchTable); 7153 kfree(h->hba_inquiry_data); 7154 pci_disable_device(pdev); 7155 pci_release_regions(pdev); 7156 kfree(h); 7157 } 7158 7159 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 7160 __attribute__((unused)) pm_message_t state) 7161 { 7162 return -ENOSYS; 7163 } 7164 7165 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 7166 { 7167 return -ENOSYS; 7168 } 7169 7170 static struct pci_driver hpsa_pci_driver = { 7171 .name = HPSA, 7172 .probe = hpsa_init_one, 7173 .remove = hpsa_remove_one, 7174 .id_table = hpsa_pci_device_id, /* id_table */ 7175 .shutdown = hpsa_shutdown, 7176 .suspend = hpsa_suspend, 7177 .resume = hpsa_resume, 7178 }; 7179 7180 /* Fill in bucket_map[], given nsgs (the max number of 7181 * scatter gather elements supported) and bucket[], 7182 * which is an array of 8 integers. The bucket[] array 7183 * contains 8 different DMA transfer sizes (in 16 7184 * byte increments) which the controller uses to fetch 7185 * commands. This function fills in bucket_map[], which 7186 * maps a given number of scatter gather elements to one of 7187 * the 8 DMA transfer sizes. The point of it is to allow the 7188 * controller to only do as much DMA as needed to fetch the 7189 * command, with the DMA transfer size encoded in the lower 7190 * bits of the command address. 7191 */ 7192 static void calc_bucket_map(int bucket[], int num_buckets, 7193 int nsgs, int min_blocks, int *bucket_map) 7194 { 7195 int i, j, b, size; 7196 7197 /* Note, bucket_map must have nsgs+1 entries. */ 7198 for (i = 0; i <= nsgs; i++) { 7199 /* Compute size of a command with i SG entries */ 7200 size = i + min_blocks; 7201 b = num_buckets; /* Assume the biggest bucket */ 7202 /* Find the bucket that is just big enough */ 7203 for (j = 0; j < num_buckets; j++) { 7204 if (bucket[j] >= size) { 7205 b = j; 7206 break; 7207 } 7208 } 7209 /* for a command with i SG entries, use bucket b. */ 7210 bucket_map[i] = b; 7211 } 7212 } 7213 7214 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) 7215 { 7216 int i; 7217 unsigned long register_value; 7218 unsigned long transMethod = CFGTBL_Trans_Performant | 7219 (trans_support & CFGTBL_Trans_use_short_tags) | 7220 CFGTBL_Trans_enable_directed_msix | 7221 (trans_support & (CFGTBL_Trans_io_accel1 | 7222 CFGTBL_Trans_io_accel2)); 7223 struct access_method access = SA5_performant_access; 7224 7225 /* This is a bit complicated. There are 8 registers on 7226 * the controller which we write to to tell it 8 different 7227 * sizes of commands which there may be. It's a way of 7228 * reducing the DMA done to fetch each command. Encoded into 7229 * each command's tag are 3 bits which communicate to the controller 7230 * which of the eight sizes that command fits within. The size of 7231 * each command depends on how many scatter gather entries there are. 7232 * Each SG entry requires 16 bytes. The eight registers are programmed 7233 * with the number of 16-byte blocks a command of that size requires. 7234 * The smallest command possible requires 5 such 16 byte blocks. 7235 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte 7236 * blocks. Note, this only extends to the SG entries contained 7237 * within the command block, and does not extend to chained blocks 7238 * of SG elements. bft[] contains the eight values we write to 7239 * the registers. They are not evenly distributed, but have more 7240 * sizes for small commands, and fewer sizes for larger commands. 7241 */ 7242 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; 7243 #define MIN_IOACCEL2_BFT_ENTRY 5 7244 #define HPSA_IOACCEL2_HEADER_SZ 4 7245 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, 7246 13, 14, 15, 16, 17, 18, 19, 7247 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; 7248 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); 7249 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); 7250 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > 7251 16 * MIN_IOACCEL2_BFT_ENTRY); 7252 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); 7253 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); 7254 /* 5 = 1 s/g entry or 4k 7255 * 6 = 2 s/g entry or 8k 7256 * 8 = 4 s/g entry or 16k 7257 * 10 = 6 s/g entry or 24k 7258 */ 7259 7260 /* Controller spec: zero out this buffer. */ 7261 memset(h->reply_pool, 0, h->reply_pool_size); 7262 7263 bft[7] = SG_ENTRIES_IN_CMD + 4; 7264 calc_bucket_map(bft, ARRAY_SIZE(bft), 7265 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); 7266 for (i = 0; i < 8; i++) 7267 writel(bft[i], &h->transtable->BlockFetch[i]); 7268 7269 /* size of controller ring buffer */ 7270 writel(h->max_commands, &h->transtable->RepQSize); 7271 writel(h->nreply_queues, &h->transtable->RepQCount); 7272 writel(0, &h->transtable->RepQCtrAddrLow32); 7273 writel(0, &h->transtable->RepQCtrAddrHigh32); 7274 7275 for (i = 0; i < h->nreply_queues; i++) { 7276 writel(0, &h->transtable->RepQAddr[i].upper); 7277 writel(h->reply_pool_dhandle + 7278 (h->max_commands * sizeof(u64) * i), 7279 &h->transtable->RepQAddr[i].lower); 7280 } 7281 7282 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 7283 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); 7284 /* 7285 * enable outbound interrupt coalescing in accelerator mode; 7286 */ 7287 if (trans_support & CFGTBL_Trans_io_accel1) { 7288 access = SA5_ioaccel_mode1_access; 7289 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 7290 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 7291 } else { 7292 if (trans_support & CFGTBL_Trans_io_accel2) { 7293 access = SA5_ioaccel_mode2_access; 7294 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 7295 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 7296 } 7297 } 7298 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7299 hpsa_wait_for_mode_change_ack(h); 7300 register_value = readl(&(h->cfgtable->TransportActive)); 7301 if (!(register_value & CFGTBL_Trans_Performant)) { 7302 dev_warn(&h->pdev->dev, "unable to get board into" 7303 " performant mode\n"); 7304 return; 7305 } 7306 /* Change the access methods to the performant access methods */ 7307 h->access = access; 7308 h->transMethod = transMethod; 7309 7310 if (!((trans_support & CFGTBL_Trans_io_accel1) || 7311 (trans_support & CFGTBL_Trans_io_accel2))) 7312 return; 7313 7314 if (trans_support & CFGTBL_Trans_io_accel1) { 7315 /* Set up I/O accelerator mode */ 7316 for (i = 0; i < h->nreply_queues; i++) { 7317 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); 7318 h->reply_queue[i].current_entry = 7319 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); 7320 } 7321 bft[7] = h->ioaccel_maxsg + 8; 7322 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, 7323 h->ioaccel1_blockFetchTable); 7324 7325 /* initialize all reply queue entries to unused */ 7326 memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED, 7327 h->reply_pool_size); 7328 7329 /* set all the constant fields in the accelerator command 7330 * frames once at init time to save CPU cycles later. 7331 */ 7332 for (i = 0; i < h->nr_cmds; i++) { 7333 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; 7334 7335 cp->function = IOACCEL1_FUNCTION_SCSIIO; 7336 cp->err_info = (u32) (h->errinfo_pool_dhandle + 7337 (i * sizeof(struct ErrorInfo))); 7338 cp->err_info_len = sizeof(struct ErrorInfo); 7339 cp->sgl_offset = IOACCEL1_SGLOFFSET; 7340 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; 7341 cp->timeout_sec = 0; 7342 cp->ReplyQueue = 0; 7343 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) | 7344 DIRECT_LOOKUP_BIT; 7345 cp->Tag.upper = 0; 7346 cp->host_addr.lower = 7347 (u32) (h->ioaccel_cmd_pool_dhandle + 7348 (i * sizeof(struct io_accel1_cmd))); 7349 cp->host_addr.upper = 0; 7350 } 7351 } else if (trans_support & CFGTBL_Trans_io_accel2) { 7352 u64 cfg_offset, cfg_base_addr_index; 7353 u32 bft2_offset, cfg_base_addr; 7354 int rc; 7355 7356 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 7357 &cfg_base_addr_index, &cfg_offset); 7358 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); 7359 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; 7360 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, 7361 4, h->ioaccel2_blockFetchTable); 7362 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); 7363 BUILD_BUG_ON(offsetof(struct CfgTable, 7364 io_accel_request_size_offset) != 0xb8); 7365 h->ioaccel2_bft2_regs = 7366 remap_pci_mem(pci_resource_start(h->pdev, 7367 cfg_base_addr_index) + 7368 cfg_offset + bft2_offset, 7369 ARRAY_SIZE(bft2) * 7370 sizeof(*h->ioaccel2_bft2_regs)); 7371 for (i = 0; i < ARRAY_SIZE(bft2); i++) 7372 writel(bft2[i], &h->ioaccel2_bft2_regs[i]); 7373 } 7374 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7375 hpsa_wait_for_mode_change_ack(h); 7376 } 7377 7378 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) 7379 { 7380 h->ioaccel_maxsg = 7381 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 7382 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) 7383 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; 7384 7385 /* Command structures must be aligned on a 128-byte boundary 7386 * because the 7 lower bits of the address are used by the 7387 * hardware. 7388 */ 7389 #define IOACCEL1_COMMANDLIST_ALIGNMENT 128 7390 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 7391 IOACCEL1_COMMANDLIST_ALIGNMENT); 7392 h->ioaccel_cmd_pool = 7393 pci_alloc_consistent(h->pdev, 7394 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 7395 &(h->ioaccel_cmd_pool_dhandle)); 7396 7397 h->ioaccel1_blockFetchTable = 7398 kmalloc(((h->ioaccel_maxsg + 1) * 7399 sizeof(u32)), GFP_KERNEL); 7400 7401 if ((h->ioaccel_cmd_pool == NULL) || 7402 (h->ioaccel1_blockFetchTable == NULL)) 7403 goto clean_up; 7404 7405 memset(h->ioaccel_cmd_pool, 0, 7406 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); 7407 return 0; 7408 7409 clean_up: 7410 if (h->ioaccel_cmd_pool) 7411 pci_free_consistent(h->pdev, 7412 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 7413 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 7414 kfree(h->ioaccel1_blockFetchTable); 7415 return 1; 7416 } 7417 7418 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) 7419 { 7420 /* Allocate ioaccel2 mode command blocks and block fetch table */ 7421 7422 h->ioaccel_maxsg = 7423 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 7424 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 7425 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 7426 7427 #define IOACCEL2_COMMANDLIST_ALIGNMENT 128 7428 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 7429 IOACCEL2_COMMANDLIST_ALIGNMENT); 7430 h->ioaccel2_cmd_pool = 7431 pci_alloc_consistent(h->pdev, 7432 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 7433 &(h->ioaccel2_cmd_pool_dhandle)); 7434 7435 h->ioaccel2_blockFetchTable = 7436 kmalloc(((h->ioaccel_maxsg + 1) * 7437 sizeof(u32)), GFP_KERNEL); 7438 7439 if ((h->ioaccel2_cmd_pool == NULL) || 7440 (h->ioaccel2_blockFetchTable == NULL)) 7441 goto clean_up; 7442 7443 memset(h->ioaccel2_cmd_pool, 0, 7444 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); 7445 return 0; 7446 7447 clean_up: 7448 if (h->ioaccel2_cmd_pool) 7449 pci_free_consistent(h->pdev, 7450 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 7451 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 7452 kfree(h->ioaccel2_blockFetchTable); 7453 return 1; 7454 } 7455 7456 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 7457 { 7458 u32 trans_support; 7459 unsigned long transMethod = CFGTBL_Trans_Performant | 7460 CFGTBL_Trans_use_short_tags; 7461 int i; 7462 7463 if (hpsa_simple_mode) 7464 return; 7465 7466 /* Check for I/O accelerator mode support */ 7467 if (trans_support & CFGTBL_Trans_io_accel1) { 7468 transMethod |= CFGTBL_Trans_io_accel1 | 7469 CFGTBL_Trans_enable_directed_msix; 7470 if (hpsa_alloc_ioaccel_cmd_and_bft(h)) 7471 goto clean_up; 7472 } else { 7473 if (trans_support & CFGTBL_Trans_io_accel2) { 7474 transMethod |= CFGTBL_Trans_io_accel2 | 7475 CFGTBL_Trans_enable_directed_msix; 7476 if (ioaccel2_alloc_cmds_and_bft(h)) 7477 goto clean_up; 7478 } 7479 } 7480 7481 /* TODO, check that this next line h->nreply_queues is correct */ 7482 trans_support = readl(&(h->cfgtable->TransportSupport)); 7483 if (!(trans_support & PERFORMANT_MODE)) 7484 return; 7485 7486 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 7487 hpsa_get_max_perf_mode_cmds(h); 7488 /* Performant mode ring buffer and supporting data structures */ 7489 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; 7490 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 7491 &(h->reply_pool_dhandle)); 7492 7493 for (i = 0; i < h->nreply_queues; i++) { 7494 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; 7495 h->reply_queue[i].size = h->max_commands; 7496 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 7497 h->reply_queue[i].current_entry = 0; 7498 } 7499 7500 /* Need a block fetch table for performant mode */ 7501 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 7502 sizeof(u32)), GFP_KERNEL); 7503 7504 if ((h->reply_pool == NULL) 7505 || (h->blockFetchTable == NULL)) 7506 goto clean_up; 7507 7508 hpsa_enter_performant_mode(h, trans_support); 7509 return; 7510 7511 clean_up: 7512 if (h->reply_pool) 7513 pci_free_consistent(h->pdev, h->reply_pool_size, 7514 h->reply_pool, h->reply_pool_dhandle); 7515 kfree(h->blockFetchTable); 7516 } 7517 7518 static int is_accelerated_cmd(struct CommandList *c) 7519 { 7520 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; 7521 } 7522 7523 static void hpsa_drain_accel_commands(struct ctlr_info *h) 7524 { 7525 struct CommandList *c = NULL; 7526 unsigned long flags; 7527 int accel_cmds_out; 7528 7529 do { /* wait for all outstanding commands to drain out */ 7530 accel_cmds_out = 0; 7531 spin_lock_irqsave(&h->lock, flags); 7532 list_for_each_entry(c, &h->cmpQ, list) 7533 accel_cmds_out += is_accelerated_cmd(c); 7534 list_for_each_entry(c, &h->reqQ, list) 7535 accel_cmds_out += is_accelerated_cmd(c); 7536 spin_unlock_irqrestore(&h->lock, flags); 7537 if (accel_cmds_out <= 0) 7538 break; 7539 msleep(100); 7540 } while (1); 7541 } 7542 7543 /* 7544 * This is it. Register the PCI driver information for the cards we control 7545 * the OS will call our registered routines when it finds one of our cards. 7546 */ 7547 static int __init hpsa_init(void) 7548 { 7549 return pci_register_driver(&hpsa_pci_driver); 7550 } 7551 7552 static void __exit hpsa_cleanup(void) 7553 { 7554 pci_unregister_driver(&hpsa_pci_driver); 7555 } 7556 7557 static void __attribute__((unused)) verify_offsets(void) 7558 { 7559 #define VERIFY_OFFSET(member, offset) \ 7560 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) 7561 7562 VERIFY_OFFSET(structure_size, 0); 7563 VERIFY_OFFSET(volume_blk_size, 4); 7564 VERIFY_OFFSET(volume_blk_cnt, 8); 7565 VERIFY_OFFSET(phys_blk_shift, 16); 7566 VERIFY_OFFSET(parity_rotation_shift, 17); 7567 VERIFY_OFFSET(strip_size, 18); 7568 VERIFY_OFFSET(disk_starting_blk, 20); 7569 VERIFY_OFFSET(disk_blk_cnt, 28); 7570 VERIFY_OFFSET(data_disks_per_row, 36); 7571 VERIFY_OFFSET(metadata_disks_per_row, 38); 7572 VERIFY_OFFSET(row_cnt, 40); 7573 VERIFY_OFFSET(layout_map_count, 42); 7574 VERIFY_OFFSET(flags, 44); 7575 VERIFY_OFFSET(dekindex, 46); 7576 /* VERIFY_OFFSET(reserved, 48 */ 7577 VERIFY_OFFSET(data, 64); 7578 7579 #undef VERIFY_OFFSET 7580 7581 #define VERIFY_OFFSET(member, offset) \ 7582 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) 7583 7584 VERIFY_OFFSET(IU_type, 0); 7585 VERIFY_OFFSET(direction, 1); 7586 VERIFY_OFFSET(reply_queue, 2); 7587 /* VERIFY_OFFSET(reserved1, 3); */ 7588 VERIFY_OFFSET(scsi_nexus, 4); 7589 VERIFY_OFFSET(Tag, 8); 7590 VERIFY_OFFSET(cdb, 16); 7591 VERIFY_OFFSET(cciss_lun, 32); 7592 VERIFY_OFFSET(data_len, 40); 7593 VERIFY_OFFSET(cmd_priority_task_attr, 44); 7594 VERIFY_OFFSET(sg_count, 45); 7595 /* VERIFY_OFFSET(reserved3 */ 7596 VERIFY_OFFSET(err_ptr, 48); 7597 VERIFY_OFFSET(err_len, 56); 7598 /* VERIFY_OFFSET(reserved4 */ 7599 VERIFY_OFFSET(sg, 64); 7600 7601 #undef VERIFY_OFFSET 7602 7603 #define VERIFY_OFFSET(member, offset) \ 7604 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) 7605 7606 VERIFY_OFFSET(dev_handle, 0x00); 7607 VERIFY_OFFSET(reserved1, 0x02); 7608 VERIFY_OFFSET(function, 0x03); 7609 VERIFY_OFFSET(reserved2, 0x04); 7610 VERIFY_OFFSET(err_info, 0x0C); 7611 VERIFY_OFFSET(reserved3, 0x10); 7612 VERIFY_OFFSET(err_info_len, 0x12); 7613 VERIFY_OFFSET(reserved4, 0x13); 7614 VERIFY_OFFSET(sgl_offset, 0x14); 7615 VERIFY_OFFSET(reserved5, 0x15); 7616 VERIFY_OFFSET(transfer_len, 0x1C); 7617 VERIFY_OFFSET(reserved6, 0x20); 7618 VERIFY_OFFSET(io_flags, 0x24); 7619 VERIFY_OFFSET(reserved7, 0x26); 7620 VERIFY_OFFSET(LUN, 0x34); 7621 VERIFY_OFFSET(control, 0x3C); 7622 VERIFY_OFFSET(CDB, 0x40); 7623 VERIFY_OFFSET(reserved8, 0x50); 7624 VERIFY_OFFSET(host_context_flags, 0x60); 7625 VERIFY_OFFSET(timeout_sec, 0x62); 7626 VERIFY_OFFSET(ReplyQueue, 0x64); 7627 VERIFY_OFFSET(reserved9, 0x65); 7628 VERIFY_OFFSET(Tag, 0x68); 7629 VERIFY_OFFSET(host_addr, 0x70); 7630 VERIFY_OFFSET(CISS_LUN, 0x78); 7631 VERIFY_OFFSET(SG, 0x78 + 8); 7632 #undef VERIFY_OFFSET 7633 } 7634 7635 module_init(hpsa_init); 7636 module_exit(hpsa_cleanup); 7637