1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/pci-aspm.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/delay.h> 30 #include <linux/fs.h> 31 #include <linux/timer.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/compat.h> 35 #include <linux/blktrace_api.h> 36 #include <linux/uaccess.h> 37 #include <linux/io.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/completion.h> 40 #include <linux/moduleparam.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_tcq.h> 46 #include <linux/cciss_ioctl.h> 47 #include <linux/string.h> 48 #include <linux/bitmap.h> 49 #include <linux/atomic.h> 50 #include <linux/kthread.h> 51 #include <linux/jiffies.h> 52 #include <asm/div64.h> 53 #include "hpsa_cmd.h" 54 #include "hpsa.h" 55 56 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 57 #define HPSA_DRIVER_VERSION "3.4.0-1" 58 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 59 #define HPSA "hpsa" 60 61 /* How long to wait (in milliseconds) for board to go into simple mode */ 62 #define MAX_CONFIG_WAIT 30000 63 #define MAX_IOCTL_CONFIG_WAIT 1000 64 65 /*define how many times we will try a command because of bus resets */ 66 #define MAX_CMD_RETRIES 3 67 68 /* Embedded module documentation macros - see modules.h */ 69 MODULE_AUTHOR("Hewlett-Packard Company"); 70 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 71 HPSA_DRIVER_VERSION); 72 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 73 MODULE_VERSION(HPSA_DRIVER_VERSION); 74 MODULE_LICENSE("GPL"); 75 76 static int hpsa_allow_any; 77 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 78 MODULE_PARM_DESC(hpsa_allow_any, 79 "Allow hpsa driver to access unknown HP Smart Array hardware"); 80 static int hpsa_simple_mode; 81 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 82 MODULE_PARM_DESC(hpsa_simple_mode, 83 "Use 'simple mode' rather than 'performant mode'"); 84 85 /* define the PCI info for the cards we can control */ 86 static const struct pci_device_id hpsa_pci_device_id[] = { 87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, 112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, 113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, 114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, 115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, 116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, 117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, 118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, 119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 122 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 123 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 124 {0,} 125 }; 126 127 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 128 129 /* board_id = Subsystem Device ID & Vendor ID 130 * product = Marketing Name for the board 131 * access = Address of the struct of function pointers 132 */ 133 static struct board_type products[] = { 134 {0x3241103C, "Smart Array P212", &SA5_access}, 135 {0x3243103C, "Smart Array P410", &SA5_access}, 136 {0x3245103C, "Smart Array P410i", &SA5_access}, 137 {0x3247103C, "Smart Array P411", &SA5_access}, 138 {0x3249103C, "Smart Array P812", &SA5_access}, 139 {0x324A103C, "Smart Array P712m", &SA5_access}, 140 {0x324B103C, "Smart Array P711m", &SA5_access}, 141 {0x3350103C, "Smart Array P222", &SA5_access}, 142 {0x3351103C, "Smart Array P420", &SA5_access}, 143 {0x3352103C, "Smart Array P421", &SA5_access}, 144 {0x3353103C, "Smart Array P822", &SA5_access}, 145 {0x3354103C, "Smart Array P420i", &SA5_access}, 146 {0x3355103C, "Smart Array P220i", &SA5_access}, 147 {0x3356103C, "Smart Array P721m", &SA5_access}, 148 {0x1921103C, "Smart Array P830i", &SA5_access}, 149 {0x1922103C, "Smart Array P430", &SA5_access}, 150 {0x1923103C, "Smart Array P431", &SA5_access}, 151 {0x1924103C, "Smart Array P830", &SA5_access}, 152 {0x1926103C, "Smart Array P731m", &SA5_access}, 153 {0x1928103C, "Smart Array P230i", &SA5_access}, 154 {0x1929103C, "Smart Array P530", &SA5_access}, 155 {0x21BD103C, "Smart Array", &SA5_access}, 156 {0x21BE103C, "Smart Array", &SA5_access}, 157 {0x21BF103C, "Smart Array", &SA5_access}, 158 {0x21C0103C, "Smart Array", &SA5_access}, 159 {0x21C1103C, "Smart Array", &SA5_access}, 160 {0x21C2103C, "Smart Array", &SA5_access}, 161 {0x21C3103C, "Smart Array", &SA5_access}, 162 {0x21C4103C, "Smart Array", &SA5_access}, 163 {0x21C5103C, "Smart Array", &SA5_access}, 164 {0x21C7103C, "Smart Array", &SA5_access}, 165 {0x21C8103C, "Smart Array", &SA5_access}, 166 {0x21C9103C, "Smart Array", &SA5_access}, 167 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 168 }; 169 170 static int number_of_controllers; 171 172 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 173 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 174 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 175 static void start_io(struct ctlr_info *h); 176 177 #ifdef CONFIG_COMPAT 178 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 179 #endif 180 181 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 182 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); 183 static struct CommandList *cmd_alloc(struct ctlr_info *h); 184 static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 185 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 186 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 187 int cmd_type); 188 #define VPD_PAGE (1 << 8) 189 190 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 191 static void hpsa_scan_start(struct Scsi_Host *); 192 static int hpsa_scan_finished(struct Scsi_Host *sh, 193 unsigned long elapsed_time); 194 static int hpsa_change_queue_depth(struct scsi_device *sdev, 195 int qdepth, int reason); 196 197 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 198 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); 199 static int hpsa_slave_alloc(struct scsi_device *sdev); 200 static void hpsa_slave_destroy(struct scsi_device *sdev); 201 202 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 203 static int check_for_unit_attention(struct ctlr_info *h, 204 struct CommandList *c); 205 static void check_ioctl_unit_attention(struct ctlr_info *h, 206 struct CommandList *c); 207 /* performant mode helper functions */ 208 static void calc_bucket_map(int *bucket, int num_buckets, 209 int nsgs, int min_blocks, int *bucket_map); 210 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 211 static inline u32 next_command(struct ctlr_info *h, u8 q); 212 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 213 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 214 u64 *cfg_offset); 215 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 216 unsigned long *memory_bar); 217 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 218 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 219 int wait_for_ready); 220 static inline void finish_cmd(struct CommandList *c); 221 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h); 222 #define BOARD_NOT_READY 0 223 #define BOARD_READY 1 224 static void hpsa_drain_commands(struct ctlr_info *h); 225 static void hpsa_flush_cache(struct ctlr_info *h); 226 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 227 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 228 u8 *scsi3addr); 229 230 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 231 { 232 unsigned long *priv = shost_priv(sdev->host); 233 return (struct ctlr_info *) *priv; 234 } 235 236 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 237 { 238 unsigned long *priv = shost_priv(sh); 239 return (struct ctlr_info *) *priv; 240 } 241 242 static int check_for_unit_attention(struct ctlr_info *h, 243 struct CommandList *c) 244 { 245 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 246 return 0; 247 248 switch (c->err_info->SenseInfo[12]) { 249 case STATE_CHANGED: 250 dev_warn(&h->pdev->dev, HPSA "%d: a state change " 251 "detected, command retried\n", h->ctlr); 252 break; 253 case LUN_FAILED: 254 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure " 255 "detected, action required\n", h->ctlr); 256 break; 257 case REPORT_LUNS_CHANGED: 258 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data " 259 "changed, action required\n", h->ctlr); 260 /* 261 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 262 * target (array) devices. 263 */ 264 break; 265 case POWER_OR_RESET: 266 dev_warn(&h->pdev->dev, HPSA "%d: a power on " 267 "or device reset detected\n", h->ctlr); 268 break; 269 case UNIT_ATTENTION_CLEARED: 270 dev_warn(&h->pdev->dev, HPSA "%d: unit attention " 271 "cleared by another initiator\n", h->ctlr); 272 break; 273 default: 274 dev_warn(&h->pdev->dev, HPSA "%d: unknown " 275 "unit attention detected\n", h->ctlr); 276 break; 277 } 278 return 1; 279 } 280 281 static int check_for_busy(struct ctlr_info *h, struct CommandList *c) 282 { 283 if (c->err_info->CommandStatus != CMD_TARGET_STATUS || 284 (c->err_info->ScsiStatus != SAM_STAT_BUSY && 285 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) 286 return 0; 287 dev_warn(&h->pdev->dev, HPSA "device busy"); 288 return 1; 289 } 290 291 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, 292 struct device_attribute *attr, 293 const char *buf, size_t count) 294 { 295 int status, len; 296 struct ctlr_info *h; 297 struct Scsi_Host *shost = class_to_shost(dev); 298 char tmpbuf[10]; 299 300 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 301 return -EACCES; 302 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 303 strncpy(tmpbuf, buf, len); 304 tmpbuf[len] = '\0'; 305 if (sscanf(tmpbuf, "%d", &status) != 1) 306 return -EINVAL; 307 h = shost_to_hba(shost); 308 h->acciopath_status = !!status; 309 dev_warn(&h->pdev->dev, 310 "hpsa: HP SSD Smart Path %s via sysfs update.\n", 311 h->acciopath_status ? "enabled" : "disabled"); 312 return count; 313 } 314 315 static ssize_t host_store_rescan(struct device *dev, 316 struct device_attribute *attr, 317 const char *buf, size_t count) 318 { 319 struct ctlr_info *h; 320 struct Scsi_Host *shost = class_to_shost(dev); 321 h = shost_to_hba(shost); 322 hpsa_scan_start(h->scsi_host); 323 return count; 324 } 325 326 static ssize_t host_show_firmware_revision(struct device *dev, 327 struct device_attribute *attr, char *buf) 328 { 329 struct ctlr_info *h; 330 struct Scsi_Host *shost = class_to_shost(dev); 331 unsigned char *fwrev; 332 333 h = shost_to_hba(shost); 334 if (!h->hba_inquiry_data) 335 return 0; 336 fwrev = &h->hba_inquiry_data[32]; 337 return snprintf(buf, 20, "%c%c%c%c\n", 338 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 339 } 340 341 static ssize_t host_show_commands_outstanding(struct device *dev, 342 struct device_attribute *attr, char *buf) 343 { 344 struct Scsi_Host *shost = class_to_shost(dev); 345 struct ctlr_info *h = shost_to_hba(shost); 346 347 return snprintf(buf, 20, "%d\n", h->commands_outstanding); 348 } 349 350 static ssize_t host_show_transport_mode(struct device *dev, 351 struct device_attribute *attr, char *buf) 352 { 353 struct ctlr_info *h; 354 struct Scsi_Host *shost = class_to_shost(dev); 355 356 h = shost_to_hba(shost); 357 return snprintf(buf, 20, "%s\n", 358 h->transMethod & CFGTBL_Trans_Performant ? 359 "performant" : "simple"); 360 } 361 362 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, 363 struct device_attribute *attr, char *buf) 364 { 365 struct ctlr_info *h; 366 struct Scsi_Host *shost = class_to_shost(dev); 367 368 h = shost_to_hba(shost); 369 return snprintf(buf, 30, "HP SSD Smart Path %s\n", 370 (h->acciopath_status == 1) ? "enabled" : "disabled"); 371 } 372 373 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 374 static u32 unresettable_controller[] = { 375 0x324a103C, /* Smart Array P712m */ 376 0x324b103C, /* SmartArray P711m */ 377 0x3223103C, /* Smart Array P800 */ 378 0x3234103C, /* Smart Array P400 */ 379 0x3235103C, /* Smart Array P400i */ 380 0x3211103C, /* Smart Array E200i */ 381 0x3212103C, /* Smart Array E200 */ 382 0x3213103C, /* Smart Array E200i */ 383 0x3214103C, /* Smart Array E200i */ 384 0x3215103C, /* Smart Array E200i */ 385 0x3237103C, /* Smart Array E500 */ 386 0x323D103C, /* Smart Array P700m */ 387 0x40800E11, /* Smart Array 5i */ 388 0x409C0E11, /* Smart Array 6400 */ 389 0x409D0E11, /* Smart Array 6400 EM */ 390 0x40700E11, /* Smart Array 5300 */ 391 0x40820E11, /* Smart Array 532 */ 392 0x40830E11, /* Smart Array 5312 */ 393 0x409A0E11, /* Smart Array 641 */ 394 0x409B0E11, /* Smart Array 642 */ 395 0x40910E11, /* Smart Array 6i */ 396 }; 397 398 /* List of controllers which cannot even be soft reset */ 399 static u32 soft_unresettable_controller[] = { 400 0x40800E11, /* Smart Array 5i */ 401 0x40700E11, /* Smart Array 5300 */ 402 0x40820E11, /* Smart Array 532 */ 403 0x40830E11, /* Smart Array 5312 */ 404 0x409A0E11, /* Smart Array 641 */ 405 0x409B0E11, /* Smart Array 642 */ 406 0x40910E11, /* Smart Array 6i */ 407 /* Exclude 640x boards. These are two pci devices in one slot 408 * which share a battery backed cache module. One controls the 409 * cache, the other accesses the cache through the one that controls 410 * it. If we reset the one controlling the cache, the other will 411 * likely not be happy. Just forbid resetting this conjoined mess. 412 * The 640x isn't really supported by hpsa anyway. 413 */ 414 0x409C0E11, /* Smart Array 6400 */ 415 0x409D0E11, /* Smart Array 6400 EM */ 416 }; 417 418 static int ctlr_is_hard_resettable(u32 board_id) 419 { 420 int i; 421 422 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 423 if (unresettable_controller[i] == board_id) 424 return 0; 425 return 1; 426 } 427 428 static int ctlr_is_soft_resettable(u32 board_id) 429 { 430 int i; 431 432 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 433 if (soft_unresettable_controller[i] == board_id) 434 return 0; 435 return 1; 436 } 437 438 static int ctlr_is_resettable(u32 board_id) 439 { 440 return ctlr_is_hard_resettable(board_id) || 441 ctlr_is_soft_resettable(board_id); 442 } 443 444 static ssize_t host_show_resettable(struct device *dev, 445 struct device_attribute *attr, char *buf) 446 { 447 struct ctlr_info *h; 448 struct Scsi_Host *shost = class_to_shost(dev); 449 450 h = shost_to_hba(shost); 451 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 452 } 453 454 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 455 { 456 return (scsi3addr[3] & 0xC0) == 0x40; 457 } 458 459 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 460 "1(ADM)", "UNKNOWN" 461 }; 462 #define HPSA_RAID_0 0 463 #define HPSA_RAID_4 1 464 #define HPSA_RAID_1 2 /* also used for RAID 10 */ 465 #define HPSA_RAID_5 3 /* also used for RAID 50 */ 466 #define HPSA_RAID_51 4 467 #define HPSA_RAID_6 5 /* also used for RAID 60 */ 468 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 469 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 470 471 static ssize_t raid_level_show(struct device *dev, 472 struct device_attribute *attr, char *buf) 473 { 474 ssize_t l = 0; 475 unsigned char rlevel; 476 struct ctlr_info *h; 477 struct scsi_device *sdev; 478 struct hpsa_scsi_dev_t *hdev; 479 unsigned long flags; 480 481 sdev = to_scsi_device(dev); 482 h = sdev_to_hba(sdev); 483 spin_lock_irqsave(&h->lock, flags); 484 hdev = sdev->hostdata; 485 if (!hdev) { 486 spin_unlock_irqrestore(&h->lock, flags); 487 return -ENODEV; 488 } 489 490 /* Is this even a logical drive? */ 491 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 492 spin_unlock_irqrestore(&h->lock, flags); 493 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 494 return l; 495 } 496 497 rlevel = hdev->raid_level; 498 spin_unlock_irqrestore(&h->lock, flags); 499 if (rlevel > RAID_UNKNOWN) 500 rlevel = RAID_UNKNOWN; 501 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 502 return l; 503 } 504 505 static ssize_t lunid_show(struct device *dev, 506 struct device_attribute *attr, char *buf) 507 { 508 struct ctlr_info *h; 509 struct scsi_device *sdev; 510 struct hpsa_scsi_dev_t *hdev; 511 unsigned long flags; 512 unsigned char lunid[8]; 513 514 sdev = to_scsi_device(dev); 515 h = sdev_to_hba(sdev); 516 spin_lock_irqsave(&h->lock, flags); 517 hdev = sdev->hostdata; 518 if (!hdev) { 519 spin_unlock_irqrestore(&h->lock, flags); 520 return -ENODEV; 521 } 522 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 523 spin_unlock_irqrestore(&h->lock, flags); 524 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 525 lunid[0], lunid[1], lunid[2], lunid[3], 526 lunid[4], lunid[5], lunid[6], lunid[7]); 527 } 528 529 static ssize_t unique_id_show(struct device *dev, 530 struct device_attribute *attr, char *buf) 531 { 532 struct ctlr_info *h; 533 struct scsi_device *sdev; 534 struct hpsa_scsi_dev_t *hdev; 535 unsigned long flags; 536 unsigned char sn[16]; 537 538 sdev = to_scsi_device(dev); 539 h = sdev_to_hba(sdev); 540 spin_lock_irqsave(&h->lock, flags); 541 hdev = sdev->hostdata; 542 if (!hdev) { 543 spin_unlock_irqrestore(&h->lock, flags); 544 return -ENODEV; 545 } 546 memcpy(sn, hdev->device_id, sizeof(sn)); 547 spin_unlock_irqrestore(&h->lock, flags); 548 return snprintf(buf, 16 * 2 + 2, 549 "%02X%02X%02X%02X%02X%02X%02X%02X" 550 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 551 sn[0], sn[1], sn[2], sn[3], 552 sn[4], sn[5], sn[6], sn[7], 553 sn[8], sn[9], sn[10], sn[11], 554 sn[12], sn[13], sn[14], sn[15]); 555 } 556 557 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, 558 struct device_attribute *attr, char *buf) 559 { 560 struct ctlr_info *h; 561 struct scsi_device *sdev; 562 struct hpsa_scsi_dev_t *hdev; 563 unsigned long flags; 564 int offload_enabled; 565 566 sdev = to_scsi_device(dev); 567 h = sdev_to_hba(sdev); 568 spin_lock_irqsave(&h->lock, flags); 569 hdev = sdev->hostdata; 570 if (!hdev) { 571 spin_unlock_irqrestore(&h->lock, flags); 572 return -ENODEV; 573 } 574 offload_enabled = hdev->offload_enabled; 575 spin_unlock_irqrestore(&h->lock, flags); 576 return snprintf(buf, 20, "%d\n", offload_enabled); 577 } 578 579 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 580 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 581 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 582 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 583 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, 584 host_show_hp_ssd_smart_path_enabled, NULL); 585 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, 586 host_show_hp_ssd_smart_path_status, 587 host_store_hp_ssd_smart_path_status); 588 static DEVICE_ATTR(firmware_revision, S_IRUGO, 589 host_show_firmware_revision, NULL); 590 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 591 host_show_commands_outstanding, NULL); 592 static DEVICE_ATTR(transport_mode, S_IRUGO, 593 host_show_transport_mode, NULL); 594 static DEVICE_ATTR(resettable, S_IRUGO, 595 host_show_resettable, NULL); 596 597 static struct device_attribute *hpsa_sdev_attrs[] = { 598 &dev_attr_raid_level, 599 &dev_attr_lunid, 600 &dev_attr_unique_id, 601 &dev_attr_hp_ssd_smart_path_enabled, 602 NULL, 603 }; 604 605 static struct device_attribute *hpsa_shost_attrs[] = { 606 &dev_attr_rescan, 607 &dev_attr_firmware_revision, 608 &dev_attr_commands_outstanding, 609 &dev_attr_transport_mode, 610 &dev_attr_resettable, 611 &dev_attr_hp_ssd_smart_path_status, 612 NULL, 613 }; 614 615 static struct scsi_host_template hpsa_driver_template = { 616 .module = THIS_MODULE, 617 .name = HPSA, 618 .proc_name = HPSA, 619 .queuecommand = hpsa_scsi_queue_command, 620 .scan_start = hpsa_scan_start, 621 .scan_finished = hpsa_scan_finished, 622 .change_queue_depth = hpsa_change_queue_depth, 623 .this_id = -1, 624 .use_clustering = ENABLE_CLUSTERING, 625 .eh_abort_handler = hpsa_eh_abort_handler, 626 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 627 .ioctl = hpsa_ioctl, 628 .slave_alloc = hpsa_slave_alloc, 629 .slave_destroy = hpsa_slave_destroy, 630 #ifdef CONFIG_COMPAT 631 .compat_ioctl = hpsa_compat_ioctl, 632 #endif 633 .sdev_attrs = hpsa_sdev_attrs, 634 .shost_attrs = hpsa_shost_attrs, 635 .max_sectors = 8192, 636 .no_write_same = 1, 637 }; 638 639 640 /* Enqueuing and dequeuing functions for cmdlists. */ 641 static inline void addQ(struct list_head *list, struct CommandList *c) 642 { 643 list_add_tail(&c->list, list); 644 } 645 646 static inline u32 next_command(struct ctlr_info *h, u8 q) 647 { 648 u32 a; 649 struct reply_pool *rq = &h->reply_queue[q]; 650 unsigned long flags; 651 652 if (h->transMethod & CFGTBL_Trans_io_accel1) 653 return h->access.command_completed(h, q); 654 655 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 656 return h->access.command_completed(h, q); 657 658 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 659 a = rq->head[rq->current_entry]; 660 rq->current_entry++; 661 spin_lock_irqsave(&h->lock, flags); 662 h->commands_outstanding--; 663 spin_unlock_irqrestore(&h->lock, flags); 664 } else { 665 a = FIFO_EMPTY; 666 } 667 /* Check for wraparound */ 668 if (rq->current_entry == h->max_commands) { 669 rq->current_entry = 0; 670 rq->wraparound ^= 1; 671 } 672 return a; 673 } 674 675 /* 676 * There are some special bits in the bus address of the 677 * command that we have to set for the controller to know 678 * how to process the command: 679 * 680 * Normal performant mode: 681 * bit 0: 1 means performant mode, 0 means simple mode. 682 * bits 1-3 = block fetch table entry 683 * bits 4-6 = command type (== 0) 684 * 685 * ioaccel1 mode: 686 * bit 0 = "performant mode" bit. 687 * bits 1-3 = block fetch table entry 688 * bits 4-6 = command type (== 110) 689 * (command type is needed because ioaccel1 mode 690 * commands are submitted through the same register as normal 691 * mode commands, so this is how the controller knows whether 692 * the command is normal mode or ioaccel1 mode.) 693 * 694 * ioaccel2 mode: 695 * bit 0 = "performant mode" bit. 696 * bits 1-4 = block fetch table entry (note extra bit) 697 * bits 4-6 = not needed, because ioaccel2 mode has 698 * a separate special register for submitting commands. 699 */ 700 701 /* set_performant_mode: Modify the tag for cciss performant 702 * set bit 0 for pull model, bits 3-1 for block fetch 703 * register number 704 */ 705 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 706 { 707 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 708 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 709 if (likely(h->msix_vector > 0)) 710 c->Header.ReplyQueue = 711 raw_smp_processor_id() % h->nreply_queues; 712 } 713 } 714 715 static void set_ioaccel1_performant_mode(struct ctlr_info *h, 716 struct CommandList *c) 717 { 718 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 719 720 /* Tell the controller to post the reply to the queue for this 721 * processor. This seems to give the best I/O throughput. 722 */ 723 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; 724 /* Set the bits in the address sent down to include: 725 * - performant mode bit (bit 0) 726 * - pull count (bits 1-3) 727 * - command type (bits 4-6) 728 */ 729 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | 730 IOACCEL1_BUSADDR_CMDTYPE; 731 } 732 733 static void set_ioaccel2_performant_mode(struct ctlr_info *h, 734 struct CommandList *c) 735 { 736 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 737 738 /* Tell the controller to post the reply to the queue for this 739 * processor. This seems to give the best I/O throughput. 740 */ 741 cp->reply_queue = smp_processor_id() % h->nreply_queues; 742 /* Set the bits in the address sent down to include: 743 * - performant mode bit not used in ioaccel mode 2 744 * - pull count (bits 0-3) 745 * - command type isn't needed for ioaccel2 746 */ 747 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); 748 } 749 750 static int is_firmware_flash_cmd(u8 *cdb) 751 { 752 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; 753 } 754 755 /* 756 * During firmware flash, the heartbeat register may not update as frequently 757 * as it should. So we dial down lockup detection during firmware flash. and 758 * dial it back up when firmware flash completes. 759 */ 760 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 761 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) 762 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 763 struct CommandList *c) 764 { 765 if (!is_firmware_flash_cmd(c->Request.CDB)) 766 return; 767 atomic_inc(&h->firmware_flash_in_progress); 768 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; 769 } 770 771 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, 772 struct CommandList *c) 773 { 774 if (is_firmware_flash_cmd(c->Request.CDB) && 775 atomic_dec_and_test(&h->firmware_flash_in_progress)) 776 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 777 } 778 779 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 780 struct CommandList *c) 781 { 782 unsigned long flags; 783 784 switch (c->cmd_type) { 785 case CMD_IOACCEL1: 786 set_ioaccel1_performant_mode(h, c); 787 break; 788 case CMD_IOACCEL2: 789 set_ioaccel2_performant_mode(h, c); 790 break; 791 default: 792 set_performant_mode(h, c); 793 } 794 dial_down_lockup_detection_during_fw_flash(h, c); 795 spin_lock_irqsave(&h->lock, flags); 796 addQ(&h->reqQ, c); 797 h->Qdepth++; 798 spin_unlock_irqrestore(&h->lock, flags); 799 start_io(h); 800 } 801 802 static inline void removeQ(struct CommandList *c) 803 { 804 if (WARN_ON(list_empty(&c->list))) 805 return; 806 list_del_init(&c->list); 807 } 808 809 static inline int is_hba_lunid(unsigned char scsi3addr[]) 810 { 811 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 812 } 813 814 static inline int is_scsi_rev_5(struct ctlr_info *h) 815 { 816 if (!h->hba_inquiry_data) 817 return 0; 818 if ((h->hba_inquiry_data[2] & 0x07) == 5) 819 return 1; 820 return 0; 821 } 822 823 static int hpsa_find_target_lun(struct ctlr_info *h, 824 unsigned char scsi3addr[], int bus, int *target, int *lun) 825 { 826 /* finds an unused bus, target, lun for a new physical device 827 * assumes h->devlock is held 828 */ 829 int i, found = 0; 830 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 831 832 bitmap_zero(lun_taken, HPSA_MAX_DEVICES); 833 834 for (i = 0; i < h->ndevices; i++) { 835 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 836 __set_bit(h->dev[i]->target, lun_taken); 837 } 838 839 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); 840 if (i < HPSA_MAX_DEVICES) { 841 /* *bus = 1; */ 842 *target = i; 843 *lun = 0; 844 found = 1; 845 } 846 return !found; 847 } 848 849 /* Add an entry into h->dev[] array. */ 850 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 851 struct hpsa_scsi_dev_t *device, 852 struct hpsa_scsi_dev_t *added[], int *nadded) 853 { 854 /* assumes h->devlock is held */ 855 int n = h->ndevices; 856 int i; 857 unsigned char addr1[8], addr2[8]; 858 struct hpsa_scsi_dev_t *sd; 859 860 if (n >= HPSA_MAX_DEVICES) { 861 dev_err(&h->pdev->dev, "too many devices, some will be " 862 "inaccessible.\n"); 863 return -1; 864 } 865 866 /* physical devices do not have lun or target assigned until now. */ 867 if (device->lun != -1) 868 /* Logical device, lun is already assigned. */ 869 goto lun_assigned; 870 871 /* If this device a non-zero lun of a multi-lun device 872 * byte 4 of the 8-byte LUN addr will contain the logical 873 * unit no, zero otherise. 874 */ 875 if (device->scsi3addr[4] == 0) { 876 /* This is not a non-zero lun of a multi-lun device */ 877 if (hpsa_find_target_lun(h, device->scsi3addr, 878 device->bus, &device->target, &device->lun) != 0) 879 return -1; 880 goto lun_assigned; 881 } 882 883 /* This is a non-zero lun of a multi-lun device. 884 * Search through our list and find the device which 885 * has the same 8 byte LUN address, excepting byte 4. 886 * Assign the same bus and target for this new LUN. 887 * Use the logical unit number from the firmware. 888 */ 889 memcpy(addr1, device->scsi3addr, 8); 890 addr1[4] = 0; 891 for (i = 0; i < n; i++) { 892 sd = h->dev[i]; 893 memcpy(addr2, sd->scsi3addr, 8); 894 addr2[4] = 0; 895 /* differ only in byte 4? */ 896 if (memcmp(addr1, addr2, 8) == 0) { 897 device->bus = sd->bus; 898 device->target = sd->target; 899 device->lun = device->scsi3addr[4]; 900 break; 901 } 902 } 903 if (device->lun == -1) { 904 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 905 " suspect firmware bug or unsupported hardware " 906 "configuration.\n"); 907 return -1; 908 } 909 910 lun_assigned: 911 912 h->dev[n] = device; 913 h->ndevices++; 914 added[*nadded] = device; 915 (*nadded)++; 916 917 /* initially, (before registering with scsi layer) we don't 918 * know our hostno and we don't want to print anything first 919 * time anyway (the scsi layer's inquiries will show that info) 920 */ 921 /* if (hostno != -1) */ 922 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 923 scsi_device_type(device->devtype), hostno, 924 device->bus, device->target, device->lun); 925 return 0; 926 } 927 928 /* Update an entry in h->dev[] array. */ 929 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, 930 int entry, struct hpsa_scsi_dev_t *new_entry) 931 { 932 /* assumes h->devlock is held */ 933 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 934 935 /* Raid level changed. */ 936 h->dev[entry]->raid_level = new_entry->raid_level; 937 938 /* Raid offload parameters changed. */ 939 h->dev[entry]->offload_config = new_entry->offload_config; 940 h->dev[entry]->offload_enabled = new_entry->offload_enabled; 941 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 942 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; 943 h->dev[entry]->raid_map = new_entry->raid_map; 944 945 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", 946 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 947 new_entry->target, new_entry->lun); 948 } 949 950 /* Replace an entry from h->dev[] array. */ 951 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 952 int entry, struct hpsa_scsi_dev_t *new_entry, 953 struct hpsa_scsi_dev_t *added[], int *nadded, 954 struct hpsa_scsi_dev_t *removed[], int *nremoved) 955 { 956 /* assumes h->devlock is held */ 957 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 958 removed[*nremoved] = h->dev[entry]; 959 (*nremoved)++; 960 961 /* 962 * New physical devices won't have target/lun assigned yet 963 * so we need to preserve the values in the slot we are replacing. 964 */ 965 if (new_entry->target == -1) { 966 new_entry->target = h->dev[entry]->target; 967 new_entry->lun = h->dev[entry]->lun; 968 } 969 970 h->dev[entry] = new_entry; 971 added[*nadded] = new_entry; 972 (*nadded)++; 973 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 974 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 975 new_entry->target, new_entry->lun); 976 } 977 978 /* Remove an entry from h->dev[] array. */ 979 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 980 struct hpsa_scsi_dev_t *removed[], int *nremoved) 981 { 982 /* assumes h->devlock is held */ 983 int i; 984 struct hpsa_scsi_dev_t *sd; 985 986 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 987 988 sd = h->dev[entry]; 989 removed[*nremoved] = h->dev[entry]; 990 (*nremoved)++; 991 992 for (i = entry; i < h->ndevices-1; i++) 993 h->dev[i] = h->dev[i+1]; 994 h->ndevices--; 995 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 996 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 997 sd->lun); 998 } 999 1000 #define SCSI3ADDR_EQ(a, b) ( \ 1001 (a)[7] == (b)[7] && \ 1002 (a)[6] == (b)[6] && \ 1003 (a)[5] == (b)[5] && \ 1004 (a)[4] == (b)[4] && \ 1005 (a)[3] == (b)[3] && \ 1006 (a)[2] == (b)[2] && \ 1007 (a)[1] == (b)[1] && \ 1008 (a)[0] == (b)[0]) 1009 1010 static void fixup_botched_add(struct ctlr_info *h, 1011 struct hpsa_scsi_dev_t *added) 1012 { 1013 /* called when scsi_add_device fails in order to re-adjust 1014 * h->dev[] to match the mid layer's view. 1015 */ 1016 unsigned long flags; 1017 int i, j; 1018 1019 spin_lock_irqsave(&h->lock, flags); 1020 for (i = 0; i < h->ndevices; i++) { 1021 if (h->dev[i] == added) { 1022 for (j = i; j < h->ndevices-1; j++) 1023 h->dev[j] = h->dev[j+1]; 1024 h->ndevices--; 1025 break; 1026 } 1027 } 1028 spin_unlock_irqrestore(&h->lock, flags); 1029 kfree(added); 1030 } 1031 1032 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 1033 struct hpsa_scsi_dev_t *dev2) 1034 { 1035 /* we compare everything except lun and target as these 1036 * are not yet assigned. Compare parts likely 1037 * to differ first 1038 */ 1039 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 1040 sizeof(dev1->scsi3addr)) != 0) 1041 return 0; 1042 if (memcmp(dev1->device_id, dev2->device_id, 1043 sizeof(dev1->device_id)) != 0) 1044 return 0; 1045 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 1046 return 0; 1047 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 1048 return 0; 1049 if (dev1->devtype != dev2->devtype) 1050 return 0; 1051 if (dev1->bus != dev2->bus) 1052 return 0; 1053 return 1; 1054 } 1055 1056 static inline int device_updated(struct hpsa_scsi_dev_t *dev1, 1057 struct hpsa_scsi_dev_t *dev2) 1058 { 1059 /* Device attributes that can change, but don't mean 1060 * that the device is a different device, nor that the OS 1061 * needs to be told anything about the change. 1062 */ 1063 if (dev1->raid_level != dev2->raid_level) 1064 return 1; 1065 if (dev1->offload_config != dev2->offload_config) 1066 return 1; 1067 if (dev1->offload_enabled != dev2->offload_enabled) 1068 return 1; 1069 return 0; 1070 } 1071 1072 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 1073 * and return needle location in *index. If scsi3addr matches, but not 1074 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 1075 * location in *index. 1076 * In the case of a minor device attribute change, such as RAID level, just 1077 * return DEVICE_UPDATED, along with the updated device's location in index. 1078 * If needle not found, return DEVICE_NOT_FOUND. 1079 */ 1080 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 1081 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 1082 int *index) 1083 { 1084 int i; 1085 #define DEVICE_NOT_FOUND 0 1086 #define DEVICE_CHANGED 1 1087 #define DEVICE_SAME 2 1088 #define DEVICE_UPDATED 3 1089 for (i = 0; i < haystack_size; i++) { 1090 if (haystack[i] == NULL) /* previously removed. */ 1091 continue; 1092 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 1093 *index = i; 1094 if (device_is_the_same(needle, haystack[i])) { 1095 if (device_updated(needle, haystack[i])) 1096 return DEVICE_UPDATED; 1097 return DEVICE_SAME; 1098 } else { 1099 return DEVICE_CHANGED; 1100 } 1101 } 1102 } 1103 *index = -1; 1104 return DEVICE_NOT_FOUND; 1105 } 1106 1107 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 1108 struct hpsa_scsi_dev_t *sd[], int nsds) 1109 { 1110 /* sd contains scsi3 addresses and devtypes, and inquiry 1111 * data. This function takes what's in sd to be the current 1112 * reality and updates h->dev[] to reflect that reality. 1113 */ 1114 int i, entry, device_change, changes = 0; 1115 struct hpsa_scsi_dev_t *csd; 1116 unsigned long flags; 1117 struct hpsa_scsi_dev_t **added, **removed; 1118 int nadded, nremoved; 1119 struct Scsi_Host *sh = NULL; 1120 1121 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 1122 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 1123 1124 if (!added || !removed) { 1125 dev_warn(&h->pdev->dev, "out of memory in " 1126 "adjust_hpsa_scsi_table\n"); 1127 goto free_and_out; 1128 } 1129 1130 spin_lock_irqsave(&h->devlock, flags); 1131 1132 /* find any devices in h->dev[] that are not in 1133 * sd[] and remove them from h->dev[], and for any 1134 * devices which have changed, remove the old device 1135 * info and add the new device info. 1136 * If minor device attributes change, just update 1137 * the existing device structure. 1138 */ 1139 i = 0; 1140 nremoved = 0; 1141 nadded = 0; 1142 while (i < h->ndevices) { 1143 csd = h->dev[i]; 1144 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 1145 if (device_change == DEVICE_NOT_FOUND) { 1146 changes++; 1147 hpsa_scsi_remove_entry(h, hostno, i, 1148 removed, &nremoved); 1149 continue; /* remove ^^^, hence i not incremented */ 1150 } else if (device_change == DEVICE_CHANGED) { 1151 changes++; 1152 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 1153 added, &nadded, removed, &nremoved); 1154 /* Set it to NULL to prevent it from being freed 1155 * at the bottom of hpsa_update_scsi_devices() 1156 */ 1157 sd[entry] = NULL; 1158 } else if (device_change == DEVICE_UPDATED) { 1159 hpsa_scsi_update_entry(h, hostno, i, sd[entry]); 1160 } 1161 i++; 1162 } 1163 1164 /* Now, make sure every device listed in sd[] is also 1165 * listed in h->dev[], adding them if they aren't found 1166 */ 1167 1168 for (i = 0; i < nsds; i++) { 1169 if (!sd[i]) /* if already added above. */ 1170 continue; 1171 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 1172 h->ndevices, &entry); 1173 if (device_change == DEVICE_NOT_FOUND) { 1174 changes++; 1175 if (hpsa_scsi_add_entry(h, hostno, sd[i], 1176 added, &nadded) != 0) 1177 break; 1178 sd[i] = NULL; /* prevent from being freed later. */ 1179 } else if (device_change == DEVICE_CHANGED) { 1180 /* should never happen... */ 1181 changes++; 1182 dev_warn(&h->pdev->dev, 1183 "device unexpectedly changed.\n"); 1184 /* but if it does happen, we just ignore that device */ 1185 } 1186 } 1187 spin_unlock_irqrestore(&h->devlock, flags); 1188 1189 /* Don't notify scsi mid layer of any changes the first time through 1190 * (or if there are no changes) scsi_scan_host will do it later the 1191 * first time through. 1192 */ 1193 if (hostno == -1 || !changes) 1194 goto free_and_out; 1195 1196 sh = h->scsi_host; 1197 /* Notify scsi mid layer of any removed devices */ 1198 for (i = 0; i < nremoved; i++) { 1199 struct scsi_device *sdev = 1200 scsi_device_lookup(sh, removed[i]->bus, 1201 removed[i]->target, removed[i]->lun); 1202 if (sdev != NULL) { 1203 scsi_remove_device(sdev); 1204 scsi_device_put(sdev); 1205 } else { 1206 /* We don't expect to get here. 1207 * future cmds to this device will get selection 1208 * timeout as if the device was gone. 1209 */ 1210 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 1211 " for removal.", hostno, removed[i]->bus, 1212 removed[i]->target, removed[i]->lun); 1213 } 1214 kfree(removed[i]); 1215 removed[i] = NULL; 1216 } 1217 1218 /* Notify scsi mid layer of any added devices */ 1219 for (i = 0; i < nadded; i++) { 1220 if (scsi_add_device(sh, added[i]->bus, 1221 added[i]->target, added[i]->lun) == 0) 1222 continue; 1223 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 1224 "device not added.\n", hostno, added[i]->bus, 1225 added[i]->target, added[i]->lun); 1226 /* now we have to remove it from h->dev, 1227 * since it didn't get added to scsi mid layer 1228 */ 1229 fixup_botched_add(h, added[i]); 1230 } 1231 1232 free_and_out: 1233 kfree(added); 1234 kfree(removed); 1235 } 1236 1237 /* 1238 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * 1239 * Assume's h->devlock is held. 1240 */ 1241 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 1242 int bus, int target, int lun) 1243 { 1244 int i; 1245 struct hpsa_scsi_dev_t *sd; 1246 1247 for (i = 0; i < h->ndevices; i++) { 1248 sd = h->dev[i]; 1249 if (sd->bus == bus && sd->target == target && sd->lun == lun) 1250 return sd; 1251 } 1252 return NULL; 1253 } 1254 1255 /* link sdev->hostdata to our per-device structure. */ 1256 static int hpsa_slave_alloc(struct scsi_device *sdev) 1257 { 1258 struct hpsa_scsi_dev_t *sd; 1259 unsigned long flags; 1260 struct ctlr_info *h; 1261 1262 h = sdev_to_hba(sdev); 1263 spin_lock_irqsave(&h->devlock, flags); 1264 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 1265 sdev_id(sdev), sdev->lun); 1266 if (sd != NULL) 1267 sdev->hostdata = sd; 1268 spin_unlock_irqrestore(&h->devlock, flags); 1269 return 0; 1270 } 1271 1272 static void hpsa_slave_destroy(struct scsi_device *sdev) 1273 { 1274 /* nothing to do. */ 1275 } 1276 1277 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 1278 { 1279 int i; 1280 1281 if (!h->cmd_sg_list) 1282 return; 1283 for (i = 0; i < h->nr_cmds; i++) { 1284 kfree(h->cmd_sg_list[i]); 1285 h->cmd_sg_list[i] = NULL; 1286 } 1287 kfree(h->cmd_sg_list); 1288 h->cmd_sg_list = NULL; 1289 } 1290 1291 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 1292 { 1293 int i; 1294 1295 if (h->chainsize <= 0) 1296 return 0; 1297 1298 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 1299 GFP_KERNEL); 1300 if (!h->cmd_sg_list) 1301 return -ENOMEM; 1302 for (i = 0; i < h->nr_cmds; i++) { 1303 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 1304 h->chainsize, GFP_KERNEL); 1305 if (!h->cmd_sg_list[i]) 1306 goto clean; 1307 } 1308 return 0; 1309 1310 clean: 1311 hpsa_free_sg_chain_blocks(h); 1312 return -ENOMEM; 1313 } 1314 1315 static int hpsa_map_sg_chain_block(struct ctlr_info *h, 1316 struct CommandList *c) 1317 { 1318 struct SGDescriptor *chain_sg, *chain_block; 1319 u64 temp64; 1320 1321 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1322 chain_block = h->cmd_sg_list[c->cmdindex]; 1323 chain_sg->Ext = HPSA_SG_CHAIN; 1324 chain_sg->Len = sizeof(*chain_sg) * 1325 (c->Header.SGTotal - h->max_cmd_sg_entries); 1326 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 1327 PCI_DMA_TODEVICE); 1328 if (dma_mapping_error(&h->pdev->dev, temp64)) { 1329 /* prevent subsequent unmapping */ 1330 chain_sg->Addr.lower = 0; 1331 chain_sg->Addr.upper = 0; 1332 return -1; 1333 } 1334 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 1335 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); 1336 return 0; 1337 } 1338 1339 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 1340 struct CommandList *c) 1341 { 1342 struct SGDescriptor *chain_sg; 1343 union u64bit temp64; 1344 1345 if (c->Header.SGTotal <= h->max_cmd_sg_entries) 1346 return; 1347 1348 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1349 temp64.val32.lower = chain_sg->Addr.lower; 1350 temp64.val32.upper = chain_sg->Addr.upper; 1351 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1352 } 1353 1354 1355 /* Decode the various types of errors on ioaccel2 path. 1356 * Return 1 for any error that should generate a RAID path retry. 1357 * Return 0 for errors that don't require a RAID path retry. 1358 */ 1359 static int handle_ioaccel_mode2_error(struct ctlr_info *h, 1360 struct CommandList *c, 1361 struct scsi_cmnd *cmd, 1362 struct io_accel2_cmd *c2) 1363 { 1364 int data_len; 1365 int retry = 0; 1366 1367 switch (c2->error_data.serv_response) { 1368 case IOACCEL2_SERV_RESPONSE_COMPLETE: 1369 switch (c2->error_data.status) { 1370 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: 1371 break; 1372 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: 1373 dev_warn(&h->pdev->dev, 1374 "%s: task complete with check condition.\n", 1375 "HP SSD Smart Path"); 1376 if (c2->error_data.data_present != 1377 IOACCEL2_SENSE_DATA_PRESENT) 1378 break; 1379 /* copy the sense data */ 1380 data_len = c2->error_data.sense_data_len; 1381 if (data_len > SCSI_SENSE_BUFFERSIZE) 1382 data_len = SCSI_SENSE_BUFFERSIZE; 1383 if (data_len > sizeof(c2->error_data.sense_data_buff)) 1384 data_len = 1385 sizeof(c2->error_data.sense_data_buff); 1386 memcpy(cmd->sense_buffer, 1387 c2->error_data.sense_data_buff, data_len); 1388 cmd->result |= SAM_STAT_CHECK_CONDITION; 1389 retry = 1; 1390 break; 1391 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 1392 dev_warn(&h->pdev->dev, 1393 "%s: task complete with BUSY status.\n", 1394 "HP SSD Smart Path"); 1395 retry = 1; 1396 break; 1397 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: 1398 dev_warn(&h->pdev->dev, 1399 "%s: task complete with reservation conflict.\n", 1400 "HP SSD Smart Path"); 1401 retry = 1; 1402 break; 1403 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: 1404 /* Make scsi midlayer do unlimited retries */ 1405 cmd->result = DID_IMM_RETRY << 16; 1406 break; 1407 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: 1408 dev_warn(&h->pdev->dev, 1409 "%s: task complete with aborted status.\n", 1410 "HP SSD Smart Path"); 1411 retry = 1; 1412 break; 1413 default: 1414 dev_warn(&h->pdev->dev, 1415 "%s: task complete with unrecognized status: 0x%02x\n", 1416 "HP SSD Smart Path", c2->error_data.status); 1417 retry = 1; 1418 break; 1419 } 1420 break; 1421 case IOACCEL2_SERV_RESPONSE_FAILURE: 1422 /* don't expect to get here. */ 1423 dev_warn(&h->pdev->dev, 1424 "unexpected delivery or target failure, status = 0x%02x\n", 1425 c2->error_data.status); 1426 retry = 1; 1427 break; 1428 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 1429 break; 1430 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 1431 break; 1432 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 1433 dev_warn(&h->pdev->dev, "task management function rejected.\n"); 1434 retry = 1; 1435 break; 1436 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 1437 dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); 1438 break; 1439 default: 1440 dev_warn(&h->pdev->dev, 1441 "%s: Unrecognized server response: 0x%02x\n", 1442 "HP SSD Smart Path", 1443 c2->error_data.serv_response); 1444 retry = 1; 1445 break; 1446 } 1447 1448 return retry; /* retry on raid path? */ 1449 } 1450 1451 static void process_ioaccel2_completion(struct ctlr_info *h, 1452 struct CommandList *c, struct scsi_cmnd *cmd, 1453 struct hpsa_scsi_dev_t *dev) 1454 { 1455 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 1456 int raid_retry = 0; 1457 1458 /* check for good status */ 1459 if (likely(c2->error_data.serv_response == 0 && 1460 c2->error_data.status == 0)) { 1461 cmd_free(h, c); 1462 cmd->scsi_done(cmd); 1463 return; 1464 } 1465 1466 /* Any RAID offload error results in retry which will use 1467 * the normal I/O path so the controller can handle whatever's 1468 * wrong. 1469 */ 1470 if (is_logical_dev_addr_mode(dev->scsi3addr) && 1471 c2->error_data.serv_response == 1472 IOACCEL2_SERV_RESPONSE_FAILURE) { 1473 if (c2->error_data.status == 1474 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) 1475 dev_warn(&h->pdev->dev, 1476 "%s: Path is unavailable, retrying on standard path.\n", 1477 "HP SSD Smart Path"); 1478 else 1479 dev_warn(&h->pdev->dev, 1480 "%s: Error 0x%02x, retrying on standard path.\n", 1481 "HP SSD Smart Path", c2->error_data.status); 1482 1483 dev->offload_enabled = 0; 1484 h->drv_req_rescan = 1; /* schedule controller for a rescan */ 1485 cmd->result = DID_SOFT_ERROR << 16; 1486 cmd_free(h, c); 1487 cmd->scsi_done(cmd); 1488 return; 1489 } 1490 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2); 1491 /* If error found, disable Smart Path, schedule a rescan, 1492 * and force a retry on the standard path. 1493 */ 1494 if (raid_retry) { 1495 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n", 1496 "HP SSD Smart Path"); 1497 dev->offload_enabled = 0; /* Disable Smart Path */ 1498 h->drv_req_rescan = 1; /* schedule controller rescan */ 1499 cmd->result = DID_SOFT_ERROR << 16; 1500 } 1501 cmd_free(h, c); 1502 cmd->scsi_done(cmd); 1503 } 1504 1505 static void complete_scsi_command(struct CommandList *cp) 1506 { 1507 struct scsi_cmnd *cmd; 1508 struct ctlr_info *h; 1509 struct ErrorInfo *ei; 1510 struct hpsa_scsi_dev_t *dev; 1511 1512 unsigned char sense_key; 1513 unsigned char asc; /* additional sense code */ 1514 unsigned char ascq; /* additional sense code qualifier */ 1515 unsigned long sense_data_size; 1516 1517 ei = cp->err_info; 1518 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1519 h = cp->h; 1520 dev = cmd->device->hostdata; 1521 1522 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1523 if ((cp->cmd_type == CMD_SCSI) && 1524 (cp->Header.SGTotal > h->max_cmd_sg_entries)) 1525 hpsa_unmap_sg_chain_block(h, cp); 1526 1527 cmd->result = (DID_OK << 16); /* host byte */ 1528 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1529 1530 if (cp->cmd_type == CMD_IOACCEL2) 1531 return process_ioaccel2_completion(h, cp, cmd, dev); 1532 1533 cmd->result |= ei->ScsiStatus; 1534 1535 /* copy the sense data whether we need to or not. */ 1536 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 1537 sense_data_size = SCSI_SENSE_BUFFERSIZE; 1538 else 1539 sense_data_size = sizeof(ei->SenseInfo); 1540 if (ei->SenseLen < sense_data_size) 1541 sense_data_size = ei->SenseLen; 1542 1543 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 1544 scsi_set_resid(cmd, ei->ResidualCnt); 1545 1546 if (ei->CommandStatus == 0) { 1547 cmd_free(h, cp); 1548 cmd->scsi_done(cmd); 1549 return; 1550 } 1551 1552 /* For I/O accelerator commands, copy over some fields to the normal 1553 * CISS header used below for error handling. 1554 */ 1555 if (cp->cmd_type == CMD_IOACCEL1) { 1556 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 1557 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); 1558 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; 1559 cp->Header.Tag.lower = c->Tag.lower; 1560 cp->Header.Tag.upper = c->Tag.upper; 1561 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 1562 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 1563 1564 /* Any RAID offload error results in retry which will use 1565 * the normal I/O path so the controller can handle whatever's 1566 * wrong. 1567 */ 1568 if (is_logical_dev_addr_mode(dev->scsi3addr)) { 1569 if (ei->CommandStatus == CMD_IOACCEL_DISABLED) 1570 dev->offload_enabled = 0; 1571 cmd->result = DID_SOFT_ERROR << 16; 1572 cmd_free(h, cp); 1573 cmd->scsi_done(cmd); 1574 return; 1575 } 1576 } 1577 1578 /* an error has occurred */ 1579 switch (ei->CommandStatus) { 1580 1581 case CMD_TARGET_STATUS: 1582 if (ei->ScsiStatus) { 1583 /* Get sense key */ 1584 sense_key = 0xf & ei->SenseInfo[2]; 1585 /* Get additional sense code */ 1586 asc = ei->SenseInfo[12]; 1587 /* Get addition sense code qualifier */ 1588 ascq = ei->SenseInfo[13]; 1589 } 1590 1591 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1592 if (check_for_unit_attention(h, cp)) 1593 break; 1594 if (sense_key == ILLEGAL_REQUEST) { 1595 /* 1596 * SCSI REPORT_LUNS is commonly unsupported on 1597 * Smart Array. Suppress noisy complaint. 1598 */ 1599 if (cp->Request.CDB[0] == REPORT_LUNS) 1600 break; 1601 1602 /* If ASC/ASCQ indicate Logical Unit 1603 * Not Supported condition, 1604 */ 1605 if ((asc == 0x25) && (ascq == 0x0)) { 1606 dev_warn(&h->pdev->dev, "cp %p " 1607 "has check condition\n", cp); 1608 break; 1609 } 1610 } 1611 1612 if (sense_key == NOT_READY) { 1613 /* If Sense is Not Ready, Logical Unit 1614 * Not ready, Manual Intervention 1615 * required 1616 */ 1617 if ((asc == 0x04) && (ascq == 0x03)) { 1618 dev_warn(&h->pdev->dev, "cp %p " 1619 "has check condition: unit " 1620 "not ready, manual " 1621 "intervention required\n", cp); 1622 break; 1623 } 1624 } 1625 if (sense_key == ABORTED_COMMAND) { 1626 /* Aborted command is retryable */ 1627 dev_warn(&h->pdev->dev, "cp %p " 1628 "has check condition: aborted command: " 1629 "ASC: 0x%x, ASCQ: 0x%x\n", 1630 cp, asc, ascq); 1631 cmd->result |= DID_SOFT_ERROR << 16; 1632 break; 1633 } 1634 /* Must be some other type of check condition */ 1635 dev_dbg(&h->pdev->dev, "cp %p has check condition: " 1636 "unknown type: " 1637 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1638 "Returning result: 0x%x, " 1639 "cmd=[%02x %02x %02x %02x %02x " 1640 "%02x %02x %02x %02x %02x %02x " 1641 "%02x %02x %02x %02x %02x]\n", 1642 cp, sense_key, asc, ascq, 1643 cmd->result, 1644 cmd->cmnd[0], cmd->cmnd[1], 1645 cmd->cmnd[2], cmd->cmnd[3], 1646 cmd->cmnd[4], cmd->cmnd[5], 1647 cmd->cmnd[6], cmd->cmnd[7], 1648 cmd->cmnd[8], cmd->cmnd[9], 1649 cmd->cmnd[10], cmd->cmnd[11], 1650 cmd->cmnd[12], cmd->cmnd[13], 1651 cmd->cmnd[14], cmd->cmnd[15]); 1652 break; 1653 } 1654 1655 1656 /* Problem was not a check condition 1657 * Pass it up to the upper layers... 1658 */ 1659 if (ei->ScsiStatus) { 1660 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1661 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1662 "Returning result: 0x%x\n", 1663 cp, ei->ScsiStatus, 1664 sense_key, asc, ascq, 1665 cmd->result); 1666 } else { /* scsi status is zero??? How??? */ 1667 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1668 "Returning no connection.\n", cp), 1669 1670 /* Ordinarily, this case should never happen, 1671 * but there is a bug in some released firmware 1672 * revisions that allows it to happen if, for 1673 * example, a 4100 backplane loses power and 1674 * the tape drive is in it. We assume that 1675 * it's a fatal error of some kind because we 1676 * can't show that it wasn't. We will make it 1677 * look like selection timeout since that is 1678 * the most common reason for this to occur, 1679 * and it's severe enough. 1680 */ 1681 1682 cmd->result = DID_NO_CONNECT << 16; 1683 } 1684 break; 1685 1686 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1687 break; 1688 case CMD_DATA_OVERRUN: 1689 dev_warn(&h->pdev->dev, "cp %p has" 1690 " completed with data overrun " 1691 "reported\n", cp); 1692 break; 1693 case CMD_INVALID: { 1694 /* print_bytes(cp, sizeof(*cp), 1, 0); 1695 print_cmd(cp); */ 1696 /* We get CMD_INVALID if you address a non-existent device 1697 * instead of a selection timeout (no response). You will 1698 * see this if you yank out a drive, then try to access it. 1699 * This is kind of a shame because it means that any other 1700 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1701 * missing target. */ 1702 cmd->result = DID_NO_CONNECT << 16; 1703 } 1704 break; 1705 case CMD_PROTOCOL_ERR: 1706 cmd->result = DID_ERROR << 16; 1707 dev_warn(&h->pdev->dev, "cp %p has " 1708 "protocol error\n", cp); 1709 break; 1710 case CMD_HARDWARE_ERR: 1711 cmd->result = DID_ERROR << 16; 1712 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1713 break; 1714 case CMD_CONNECTION_LOST: 1715 cmd->result = DID_ERROR << 16; 1716 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1717 break; 1718 case CMD_ABORTED: 1719 cmd->result = DID_ABORT << 16; 1720 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1721 cp, ei->ScsiStatus); 1722 break; 1723 case CMD_ABORT_FAILED: 1724 cmd->result = DID_ERROR << 16; 1725 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1726 break; 1727 case CMD_UNSOLICITED_ABORT: 1728 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 1729 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " 1730 "abort\n", cp); 1731 break; 1732 case CMD_TIMEOUT: 1733 cmd->result = DID_TIME_OUT << 16; 1734 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1735 break; 1736 case CMD_UNABORTABLE: 1737 cmd->result = DID_ERROR << 16; 1738 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1739 break; 1740 case CMD_IOACCEL_DISABLED: 1741 /* This only handles the direct pass-through case since RAID 1742 * offload is handled above. Just attempt a retry. 1743 */ 1744 cmd->result = DID_SOFT_ERROR << 16; 1745 dev_warn(&h->pdev->dev, 1746 "cp %p had HP SSD Smart Path error\n", cp); 1747 break; 1748 default: 1749 cmd->result = DID_ERROR << 16; 1750 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1751 cp, ei->CommandStatus); 1752 } 1753 cmd_free(h, cp); 1754 cmd->scsi_done(cmd); 1755 } 1756 1757 static void hpsa_pci_unmap(struct pci_dev *pdev, 1758 struct CommandList *c, int sg_used, int data_direction) 1759 { 1760 int i; 1761 union u64bit addr64; 1762 1763 for (i = 0; i < sg_used; i++) { 1764 addr64.val32.lower = c->SG[i].Addr.lower; 1765 addr64.val32.upper = c->SG[i].Addr.upper; 1766 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, 1767 data_direction); 1768 } 1769 } 1770 1771 static int hpsa_map_one(struct pci_dev *pdev, 1772 struct CommandList *cp, 1773 unsigned char *buf, 1774 size_t buflen, 1775 int data_direction) 1776 { 1777 u64 addr64; 1778 1779 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1780 cp->Header.SGList = 0; 1781 cp->Header.SGTotal = 0; 1782 return 0; 1783 } 1784 1785 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); 1786 if (dma_mapping_error(&pdev->dev, addr64)) { 1787 /* Prevent subsequent unmap of something never mapped */ 1788 cp->Header.SGList = 0; 1789 cp->Header.SGTotal = 0; 1790 return -1; 1791 } 1792 cp->SG[0].Addr.lower = 1793 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1794 cp->SG[0].Addr.upper = 1795 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1796 cp->SG[0].Len = buflen; 1797 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */ 1798 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ 1799 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ 1800 return 0; 1801 } 1802 1803 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1804 struct CommandList *c) 1805 { 1806 DECLARE_COMPLETION_ONSTACK(wait); 1807 1808 c->waiting = &wait; 1809 enqueue_cmd_and_start_io(h, c); 1810 wait_for_completion(&wait); 1811 } 1812 1813 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 1814 struct CommandList *c) 1815 { 1816 unsigned long flags; 1817 1818 /* If controller lockup detected, fake a hardware error. */ 1819 spin_lock_irqsave(&h->lock, flags); 1820 if (unlikely(h->lockup_detected)) { 1821 spin_unlock_irqrestore(&h->lock, flags); 1822 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 1823 } else { 1824 spin_unlock_irqrestore(&h->lock, flags); 1825 hpsa_scsi_do_simple_cmd_core(h, c); 1826 } 1827 } 1828 1829 #define MAX_DRIVER_CMD_RETRIES 25 1830 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 1831 struct CommandList *c, int data_direction) 1832 { 1833 int backoff_time = 10, retry_count = 0; 1834 1835 do { 1836 memset(c->err_info, 0, sizeof(*c->err_info)); 1837 hpsa_scsi_do_simple_cmd_core(h, c); 1838 retry_count++; 1839 if (retry_count > 3) { 1840 msleep(backoff_time); 1841 if (backoff_time < 1000) 1842 backoff_time *= 2; 1843 } 1844 } while ((check_for_unit_attention(h, c) || 1845 check_for_busy(h, c)) && 1846 retry_count <= MAX_DRIVER_CMD_RETRIES); 1847 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 1848 } 1849 1850 static void hpsa_scsi_interpret_error(struct CommandList *cp) 1851 { 1852 struct ErrorInfo *ei; 1853 struct device *d = &cp->h->pdev->dev; 1854 1855 ei = cp->err_info; 1856 switch (ei->CommandStatus) { 1857 case CMD_TARGET_STATUS: 1858 dev_warn(d, "cmd %p has completed with errors\n", cp); 1859 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, 1860 ei->ScsiStatus); 1861 if (ei->ScsiStatus == 0) 1862 dev_warn(d, "SCSI status is abnormally zero. " 1863 "(probably indicates selection timeout " 1864 "reported incorrectly due to a known " 1865 "firmware bug, circa July, 2001.)\n"); 1866 break; 1867 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1868 dev_info(d, "UNDERRUN\n"); 1869 break; 1870 case CMD_DATA_OVERRUN: 1871 dev_warn(d, "cp %p has completed with data overrun\n", cp); 1872 break; 1873 case CMD_INVALID: { 1874 /* controller unfortunately reports SCSI passthru's 1875 * to non-existent targets as invalid commands. 1876 */ 1877 dev_warn(d, "cp %p is reported invalid (probably means " 1878 "target device no longer present)\n", cp); 1879 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); 1880 print_cmd(cp); */ 1881 } 1882 break; 1883 case CMD_PROTOCOL_ERR: 1884 dev_warn(d, "cp %p has protocol error \n", cp); 1885 break; 1886 case CMD_HARDWARE_ERR: 1887 /* cmd->result = DID_ERROR << 16; */ 1888 dev_warn(d, "cp %p had hardware error\n", cp); 1889 break; 1890 case CMD_CONNECTION_LOST: 1891 dev_warn(d, "cp %p had connection lost\n", cp); 1892 break; 1893 case CMD_ABORTED: 1894 dev_warn(d, "cp %p was aborted\n", cp); 1895 break; 1896 case CMD_ABORT_FAILED: 1897 dev_warn(d, "cp %p reports abort failed\n", cp); 1898 break; 1899 case CMD_UNSOLICITED_ABORT: 1900 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); 1901 break; 1902 case CMD_TIMEOUT: 1903 dev_warn(d, "cp %p timed out\n", cp); 1904 break; 1905 case CMD_UNABORTABLE: 1906 dev_warn(d, "Command unabortable\n"); 1907 break; 1908 default: 1909 dev_warn(d, "cp %p returned unknown status %x\n", cp, 1910 ei->CommandStatus); 1911 } 1912 } 1913 1914 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 1915 u16 page, unsigned char *buf, 1916 unsigned char bufsize) 1917 { 1918 int rc = IO_OK; 1919 struct CommandList *c; 1920 struct ErrorInfo *ei; 1921 1922 c = cmd_special_alloc(h); 1923 1924 if (c == NULL) { /* trouble... */ 1925 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1926 return -ENOMEM; 1927 } 1928 1929 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, 1930 page, scsi3addr, TYPE_CMD)) { 1931 rc = -1; 1932 goto out; 1933 } 1934 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1935 ei = c->err_info; 1936 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 1937 hpsa_scsi_interpret_error(c); 1938 rc = -1; 1939 } 1940 out: 1941 cmd_special_free(h, c); 1942 return rc; 1943 } 1944 1945 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, 1946 u8 reset_type) 1947 { 1948 int rc = IO_OK; 1949 struct CommandList *c; 1950 struct ErrorInfo *ei; 1951 1952 c = cmd_special_alloc(h); 1953 1954 if (c == NULL) { /* trouble... */ 1955 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1956 return -ENOMEM; 1957 } 1958 1959 /* fill_cmd can't fail here, no data buffer to map. */ 1960 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 1961 scsi3addr, TYPE_MSG); 1962 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */ 1963 hpsa_scsi_do_simple_cmd_core(h, c); 1964 /* no unmap needed here because no data xfer. */ 1965 1966 ei = c->err_info; 1967 if (ei->CommandStatus != 0) { 1968 hpsa_scsi_interpret_error(c); 1969 rc = -1; 1970 } 1971 cmd_special_free(h, c); 1972 return rc; 1973 } 1974 1975 static void hpsa_get_raid_level(struct ctlr_info *h, 1976 unsigned char *scsi3addr, unsigned char *raid_level) 1977 { 1978 int rc; 1979 unsigned char *buf; 1980 1981 *raid_level = RAID_UNKNOWN; 1982 buf = kzalloc(64, GFP_KERNEL); 1983 if (!buf) 1984 return; 1985 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); 1986 if (rc == 0) 1987 *raid_level = buf[8]; 1988 if (*raid_level > RAID_UNKNOWN) 1989 *raid_level = RAID_UNKNOWN; 1990 kfree(buf); 1991 return; 1992 } 1993 1994 #define HPSA_MAP_DEBUG 1995 #ifdef HPSA_MAP_DEBUG 1996 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, 1997 struct raid_map_data *map_buff) 1998 { 1999 struct raid_map_disk_data *dd = &map_buff->data[0]; 2000 int map, row, col; 2001 u16 map_cnt, row_cnt, disks_per_row; 2002 2003 if (rc != 0) 2004 return; 2005 2006 dev_info(&h->pdev->dev, "structure_size = %u\n", 2007 le32_to_cpu(map_buff->structure_size)); 2008 dev_info(&h->pdev->dev, "volume_blk_size = %u\n", 2009 le32_to_cpu(map_buff->volume_blk_size)); 2010 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", 2011 le64_to_cpu(map_buff->volume_blk_cnt)); 2012 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", 2013 map_buff->phys_blk_shift); 2014 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", 2015 map_buff->parity_rotation_shift); 2016 dev_info(&h->pdev->dev, "strip_size = %u\n", 2017 le16_to_cpu(map_buff->strip_size)); 2018 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", 2019 le64_to_cpu(map_buff->disk_starting_blk)); 2020 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", 2021 le64_to_cpu(map_buff->disk_blk_cnt)); 2022 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", 2023 le16_to_cpu(map_buff->data_disks_per_row)); 2024 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", 2025 le16_to_cpu(map_buff->metadata_disks_per_row)); 2026 dev_info(&h->pdev->dev, "row_cnt = %u\n", 2027 le16_to_cpu(map_buff->row_cnt)); 2028 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 2029 le16_to_cpu(map_buff->layout_map_count)); 2030 2031 map_cnt = le16_to_cpu(map_buff->layout_map_count); 2032 for (map = 0; map < map_cnt; map++) { 2033 dev_info(&h->pdev->dev, "Map%u:\n", map); 2034 row_cnt = le16_to_cpu(map_buff->row_cnt); 2035 for (row = 0; row < row_cnt; row++) { 2036 dev_info(&h->pdev->dev, " Row%u:\n", row); 2037 disks_per_row = 2038 le16_to_cpu(map_buff->data_disks_per_row); 2039 for (col = 0; col < disks_per_row; col++, dd++) 2040 dev_info(&h->pdev->dev, 2041 " D%02u: h=0x%04x xor=%u,%u\n", 2042 col, dd->ioaccel_handle, 2043 dd->xor_mult[0], dd->xor_mult[1]); 2044 disks_per_row = 2045 le16_to_cpu(map_buff->metadata_disks_per_row); 2046 for (col = 0; col < disks_per_row; col++, dd++) 2047 dev_info(&h->pdev->dev, 2048 " M%02u: h=0x%04x xor=%u,%u\n", 2049 col, dd->ioaccel_handle, 2050 dd->xor_mult[0], dd->xor_mult[1]); 2051 } 2052 } 2053 } 2054 #else 2055 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, 2056 __attribute__((unused)) int rc, 2057 __attribute__((unused)) struct raid_map_data *map_buff) 2058 { 2059 } 2060 #endif 2061 2062 static int hpsa_get_raid_map(struct ctlr_info *h, 2063 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2064 { 2065 int rc = 0; 2066 struct CommandList *c; 2067 struct ErrorInfo *ei; 2068 2069 c = cmd_special_alloc(h); 2070 if (c == NULL) { 2071 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2072 return -ENOMEM; 2073 } 2074 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, 2075 sizeof(this_device->raid_map), 0, 2076 scsi3addr, TYPE_CMD)) { 2077 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); 2078 cmd_special_free(h, c); 2079 return -ENOMEM; 2080 } 2081 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2082 ei = c->err_info; 2083 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2084 hpsa_scsi_interpret_error(c); 2085 cmd_special_free(h, c); 2086 return -1; 2087 } 2088 cmd_special_free(h, c); 2089 2090 /* @todo in the future, dynamically allocate RAID map memory */ 2091 if (le32_to_cpu(this_device->raid_map.structure_size) > 2092 sizeof(this_device->raid_map)) { 2093 dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); 2094 rc = -1; 2095 } 2096 hpsa_debug_map_buff(h, rc, &this_device->raid_map); 2097 return rc; 2098 } 2099 2100 static int hpsa_vpd_page_supported(struct ctlr_info *h, 2101 unsigned char scsi3addr[], u8 page) 2102 { 2103 int rc; 2104 int i; 2105 int pages; 2106 unsigned char *buf, bufsize; 2107 2108 buf = kzalloc(256, GFP_KERNEL); 2109 if (!buf) 2110 return 0; 2111 2112 /* Get the size of the page list first */ 2113 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2114 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2115 buf, HPSA_VPD_HEADER_SZ); 2116 if (rc != 0) 2117 goto exit_unsupported; 2118 pages = buf[3]; 2119 if ((pages + HPSA_VPD_HEADER_SZ) <= 255) 2120 bufsize = pages + HPSA_VPD_HEADER_SZ; 2121 else 2122 bufsize = 255; 2123 2124 /* Get the whole VPD page list */ 2125 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2126 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2127 buf, bufsize); 2128 if (rc != 0) 2129 goto exit_unsupported; 2130 2131 pages = buf[3]; 2132 for (i = 1; i <= pages; i++) 2133 if (buf[3 + i] == page) 2134 goto exit_supported; 2135 exit_unsupported: 2136 kfree(buf); 2137 return 0; 2138 exit_supported: 2139 kfree(buf); 2140 return 1; 2141 } 2142 2143 static void hpsa_get_ioaccel_status(struct ctlr_info *h, 2144 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2145 { 2146 int rc; 2147 unsigned char *buf; 2148 u8 ioaccel_status; 2149 2150 this_device->offload_config = 0; 2151 this_device->offload_enabled = 0; 2152 2153 buf = kzalloc(64, GFP_KERNEL); 2154 if (!buf) 2155 return; 2156 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) 2157 goto out; 2158 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2159 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); 2160 if (rc != 0) 2161 goto out; 2162 2163 #define IOACCEL_STATUS_BYTE 4 2164 #define OFFLOAD_CONFIGURED_BIT 0x01 2165 #define OFFLOAD_ENABLED_BIT 0x02 2166 ioaccel_status = buf[IOACCEL_STATUS_BYTE]; 2167 this_device->offload_config = 2168 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 2169 if (this_device->offload_config) { 2170 this_device->offload_enabled = 2171 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 2172 if (hpsa_get_raid_map(h, scsi3addr, this_device)) 2173 this_device->offload_enabled = 0; 2174 } 2175 out: 2176 kfree(buf); 2177 return; 2178 } 2179 2180 /* Get the device id from inquiry page 0x83 */ 2181 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 2182 unsigned char *device_id, int buflen) 2183 { 2184 int rc; 2185 unsigned char *buf; 2186 2187 if (buflen > 16) 2188 buflen = 16; 2189 buf = kzalloc(64, GFP_KERNEL); 2190 if (!buf) 2191 return -1; 2192 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 2193 if (rc == 0) 2194 memcpy(device_id, &buf[8], buflen); 2195 kfree(buf); 2196 return rc != 0; 2197 } 2198 2199 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 2200 struct ReportLUNdata *buf, int bufsize, 2201 int extended_response) 2202 { 2203 int rc = IO_OK; 2204 struct CommandList *c; 2205 unsigned char scsi3addr[8]; 2206 struct ErrorInfo *ei; 2207 2208 c = cmd_special_alloc(h); 2209 if (c == NULL) { /* trouble... */ 2210 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2211 return -1; 2212 } 2213 /* address the controller */ 2214 memset(scsi3addr, 0, sizeof(scsi3addr)); 2215 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 2216 buf, bufsize, 0, scsi3addr, TYPE_CMD)) { 2217 rc = -1; 2218 goto out; 2219 } 2220 if (extended_response) 2221 c->Request.CDB[1] = extended_response; 2222 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2223 ei = c->err_info; 2224 if (ei->CommandStatus != 0 && 2225 ei->CommandStatus != CMD_DATA_UNDERRUN) { 2226 hpsa_scsi_interpret_error(c); 2227 rc = -1; 2228 } else { 2229 if (buf->extended_response_flag != extended_response) { 2230 dev_err(&h->pdev->dev, 2231 "report luns requested format %u, got %u\n", 2232 extended_response, 2233 buf->extended_response_flag); 2234 rc = -1; 2235 } 2236 } 2237 out: 2238 cmd_special_free(h, c); 2239 return rc; 2240 } 2241 2242 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 2243 struct ReportLUNdata *buf, 2244 int bufsize, int extended_response) 2245 { 2246 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 2247 } 2248 2249 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 2250 struct ReportLUNdata *buf, int bufsize) 2251 { 2252 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 2253 } 2254 2255 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 2256 int bus, int target, int lun) 2257 { 2258 device->bus = bus; 2259 device->target = target; 2260 device->lun = lun; 2261 } 2262 2263 static int hpsa_update_device_info(struct ctlr_info *h, 2264 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 2265 unsigned char *is_OBDR_device) 2266 { 2267 2268 #define OBDR_SIG_OFFSET 43 2269 #define OBDR_TAPE_SIG "$DR-10" 2270 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 2271 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 2272 2273 unsigned char *inq_buff; 2274 unsigned char *obdr_sig; 2275 2276 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 2277 if (!inq_buff) 2278 goto bail_out; 2279 2280 /* Do an inquiry to the device to see what it is. */ 2281 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 2282 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 2283 /* Inquiry failed (msg printed already) */ 2284 dev_err(&h->pdev->dev, 2285 "hpsa_update_device_info: inquiry failed\n"); 2286 goto bail_out; 2287 } 2288 2289 this_device->devtype = (inq_buff[0] & 0x1f); 2290 memcpy(this_device->scsi3addr, scsi3addr, 8); 2291 memcpy(this_device->vendor, &inq_buff[8], 2292 sizeof(this_device->vendor)); 2293 memcpy(this_device->model, &inq_buff[16], 2294 sizeof(this_device->model)); 2295 memset(this_device->device_id, 0, 2296 sizeof(this_device->device_id)); 2297 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 2298 sizeof(this_device->device_id)); 2299 2300 if (this_device->devtype == TYPE_DISK && 2301 is_logical_dev_addr_mode(scsi3addr)) { 2302 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 2303 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 2304 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 2305 } else { 2306 this_device->raid_level = RAID_UNKNOWN; 2307 this_device->offload_config = 0; 2308 this_device->offload_enabled = 0; 2309 } 2310 2311 if (is_OBDR_device) { 2312 /* See if this is a One-Button-Disaster-Recovery device 2313 * by looking for "$DR-10" at offset 43 in inquiry data. 2314 */ 2315 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 2316 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 2317 strncmp(obdr_sig, OBDR_TAPE_SIG, 2318 OBDR_SIG_LEN) == 0); 2319 } 2320 2321 kfree(inq_buff); 2322 return 0; 2323 2324 bail_out: 2325 kfree(inq_buff); 2326 return 1; 2327 } 2328 2329 static unsigned char *ext_target_model[] = { 2330 "MSA2012", 2331 "MSA2024", 2332 "MSA2312", 2333 "MSA2324", 2334 "P2000 G3 SAS", 2335 "MSA 2040 SAS", 2336 NULL, 2337 }; 2338 2339 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 2340 { 2341 int i; 2342 2343 for (i = 0; ext_target_model[i]; i++) 2344 if (strncmp(device->model, ext_target_model[i], 2345 strlen(ext_target_model[i])) == 0) 2346 return 1; 2347 return 0; 2348 } 2349 2350 /* Helper function to assign bus, target, lun mapping of devices. 2351 * Puts non-external target logical volumes on bus 0, external target logical 2352 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 2353 * Logical drive target and lun are assigned at this time, but 2354 * physical device lun and target assignment are deferred (assigned 2355 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 2356 */ 2357 static void figure_bus_target_lun(struct ctlr_info *h, 2358 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) 2359 { 2360 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 2361 2362 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 2363 /* physical device, target and lun filled in later */ 2364 if (is_hba_lunid(lunaddrbytes)) 2365 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff); 2366 else 2367 /* defer target, lun assignment for physical devices */ 2368 hpsa_set_bus_target_lun(device, 2, -1, -1); 2369 return; 2370 } 2371 /* It's a logical device */ 2372 if (is_ext_target(h, device)) { 2373 /* external target way, put logicals on bus 1 2374 * and match target/lun numbers box 2375 * reports, other smart array, bus 0, target 0, match lunid 2376 */ 2377 hpsa_set_bus_target_lun(device, 2378 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff); 2379 return; 2380 } 2381 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff); 2382 } 2383 2384 /* 2385 * If there is no lun 0 on a target, linux won't find any devices. 2386 * For the external targets (arrays), we have to manually detect the enclosure 2387 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 2388 * it for some reason. *tmpdevice is the target we're adding, 2389 * this_device is a pointer into the current element of currentsd[] 2390 * that we're building up in update_scsi_devices(), below. 2391 * lunzerobits is a bitmap that tracks which targets already have a 2392 * lun 0 assigned. 2393 * Returns 1 if an enclosure was added, 0 if not. 2394 */ 2395 static int add_ext_target_dev(struct ctlr_info *h, 2396 struct hpsa_scsi_dev_t *tmpdevice, 2397 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 2398 unsigned long lunzerobits[], int *n_ext_target_devs) 2399 { 2400 unsigned char scsi3addr[8]; 2401 2402 if (test_bit(tmpdevice->target, lunzerobits)) 2403 return 0; /* There is already a lun 0 on this target. */ 2404 2405 if (!is_logical_dev_addr_mode(lunaddrbytes)) 2406 return 0; /* It's the logical targets that may lack lun 0. */ 2407 2408 if (!is_ext_target(h, tmpdevice)) 2409 return 0; /* Only external target devices have this problem. */ 2410 2411 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */ 2412 return 0; 2413 2414 memset(scsi3addr, 0, 8); 2415 scsi3addr[3] = tmpdevice->target; 2416 if (is_hba_lunid(scsi3addr)) 2417 return 0; /* Don't add the RAID controller here. */ 2418 2419 if (is_scsi_rev_5(h)) 2420 return 0; /* p1210m doesn't need to do this. */ 2421 2422 if (*n_ext_target_devs >= MAX_EXT_TARGETS) { 2423 dev_warn(&h->pdev->dev, "Maximum number of external " 2424 "target devices exceeded. Check your hardware " 2425 "configuration."); 2426 return 0; 2427 } 2428 2429 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 2430 return 0; 2431 (*n_ext_target_devs)++; 2432 hpsa_set_bus_target_lun(this_device, 2433 tmpdevice->bus, tmpdevice->target, 0); 2434 set_bit(tmpdevice->target, lunzerobits); 2435 return 1; 2436 } 2437 2438 /* 2439 * Get address of physical disk used for an ioaccel2 mode command: 2440 * 1. Extract ioaccel2 handle from the command. 2441 * 2. Find a matching ioaccel2 handle from list of physical disks. 2442 * 3. Return: 2443 * 1 and set scsi3addr to address of matching physical 2444 * 0 if no matching physical disk was found. 2445 */ 2446 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, 2447 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) 2448 { 2449 struct ReportExtendedLUNdata *physicals = NULL; 2450 int responsesize = 24; /* size of physical extended response */ 2451 int extended = 2; /* flag forces reporting 'other dev info'. */ 2452 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; 2453 u32 nphysicals = 0; /* number of reported physical devs */ 2454 int found = 0; /* found match (1) or not (0) */ 2455 u32 find; /* handle we need to match */ 2456 int i; 2457 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 2458 struct hpsa_scsi_dev_t *d; /* device of request being aborted */ 2459 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ 2460 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2461 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2462 2463 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) 2464 return 0; /* no match */ 2465 2466 /* point to the ioaccel2 device handle */ 2467 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; 2468 if (c2a == NULL) 2469 return 0; /* no match */ 2470 2471 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; 2472 if (scmd == NULL) 2473 return 0; /* no match */ 2474 2475 d = scmd->device->hostdata; 2476 if (d == NULL) 2477 return 0; /* no match */ 2478 2479 it_nexus = cpu_to_le32((u32) d->ioaccel_handle); 2480 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus); 2481 find = c2a->scsi_nexus; 2482 2483 /* Get the list of physical devices */ 2484 physicals = kzalloc(reportsize, GFP_KERNEL); 2485 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, 2486 reportsize, extended)) { 2487 dev_err(&h->pdev->dev, 2488 "Can't lookup %s device handle: report physical LUNs failed.\n", 2489 "HP SSD Smart Path"); 2490 kfree(physicals); 2491 return 0; 2492 } 2493 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / 2494 responsesize; 2495 2496 2497 /* find ioaccel2 handle in list of physicals: */ 2498 for (i = 0; i < nphysicals; i++) { 2499 /* handle is in bytes 28-31 of each lun */ 2500 if (memcmp(&((struct ReportExtendedLUNdata *) 2501 physicals)->LUN[i][20], &find, 4) != 0) { 2502 continue; /* didn't match */ 2503 } 2504 found = 1; 2505 memcpy(scsi3addr, &((struct ReportExtendedLUNdata *) 2506 physicals)->LUN[i][0], 8); 2507 break; /* found it */ 2508 } 2509 2510 kfree(physicals); 2511 if (found) 2512 return 1; 2513 else 2514 return 0; 2515 2516 } 2517 /* 2518 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 2519 * logdev. The number of luns in physdev and logdev are returned in 2520 * *nphysicals and *nlogicals, respectively. 2521 * Returns 0 on success, -1 otherwise. 2522 */ 2523 static int hpsa_gather_lun_info(struct ctlr_info *h, 2524 int reportlunsize, 2525 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, 2526 struct ReportLUNdata *logdev, u32 *nlogicals) 2527 { 2528 int physical_entry_size = 8; 2529 2530 *physical_mode = 0; 2531 2532 /* For I/O accelerator mode we need to read physical device handles */ 2533 if (h->transMethod & CFGTBL_Trans_io_accel1 || 2534 h->transMethod & CFGTBL_Trans_io_accel2) { 2535 *physical_mode = HPSA_REPORT_PHYS_EXTENDED; 2536 physical_entry_size = 24; 2537 } 2538 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 2539 *physical_mode)) { 2540 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 2541 return -1; 2542 } 2543 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 2544 physical_entry_size; 2545 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 2546 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 2547 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2548 *nphysicals - HPSA_MAX_PHYS_LUN); 2549 *nphysicals = HPSA_MAX_PHYS_LUN; 2550 } 2551 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { 2552 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 2553 return -1; 2554 } 2555 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 2556 /* Reject Logicals in excess of our max capability. */ 2557 if (*nlogicals > HPSA_MAX_LUN) { 2558 dev_warn(&h->pdev->dev, 2559 "maximum logical LUNs (%d) exceeded. " 2560 "%d LUNs ignored.\n", HPSA_MAX_LUN, 2561 *nlogicals - HPSA_MAX_LUN); 2562 *nlogicals = HPSA_MAX_LUN; 2563 } 2564 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 2565 dev_warn(&h->pdev->dev, 2566 "maximum logical + physical LUNs (%d) exceeded. " 2567 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2568 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 2569 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 2570 } 2571 return 0; 2572 } 2573 2574 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 2575 int nphysicals, int nlogicals, 2576 struct ReportExtendedLUNdata *physdev_list, 2577 struct ReportLUNdata *logdev_list) 2578 { 2579 /* Helper function, figure out where the LUN ID info is coming from 2580 * given index i, lists of physical and logical devices, where in 2581 * the list the raid controller is supposed to appear (first or last) 2582 */ 2583 2584 int logicals_start = nphysicals + (raid_ctlr_position == 0); 2585 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 2586 2587 if (i == raid_ctlr_position) 2588 return RAID_CTLR_LUNID; 2589 2590 if (i < logicals_start) 2591 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 2592 2593 if (i < last_device) 2594 return &logdev_list->LUN[i - nphysicals - 2595 (raid_ctlr_position == 0)][0]; 2596 BUG(); 2597 return NULL; 2598 } 2599 2600 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 2601 { 2602 /* the idea here is we could get notified 2603 * that some devices have changed, so we do a report 2604 * physical luns and report logical luns cmd, and adjust 2605 * our list of devices accordingly. 2606 * 2607 * The scsi3addr's of devices won't change so long as the 2608 * adapter is not reset. That means we can rescan and 2609 * tell which devices we already know about, vs. new 2610 * devices, vs. disappearing devices. 2611 */ 2612 struct ReportExtendedLUNdata *physdev_list = NULL; 2613 struct ReportLUNdata *logdev_list = NULL; 2614 u32 nphysicals = 0; 2615 u32 nlogicals = 0; 2616 int physical_mode = 0; 2617 u32 ndev_allocated = 0; 2618 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 2619 int ncurrent = 0; 2620 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; 2621 int i, n_ext_target_devs, ndevs_to_allocate; 2622 int raid_ctlr_position; 2623 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 2624 2625 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 2626 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 2627 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 2628 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 2629 2630 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 2631 dev_err(&h->pdev->dev, "out of memory\n"); 2632 goto out; 2633 } 2634 memset(lunzerobits, 0, sizeof(lunzerobits)); 2635 2636 if (hpsa_gather_lun_info(h, reportlunsize, 2637 (struct ReportLUNdata *) physdev_list, &nphysicals, 2638 &physical_mode, logdev_list, &nlogicals)) 2639 goto out; 2640 2641 /* We might see up to the maximum number of logical and physical disks 2642 * plus external target devices, and a device for the local RAID 2643 * controller. 2644 */ 2645 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; 2646 2647 /* Allocate the per device structures */ 2648 for (i = 0; i < ndevs_to_allocate; i++) { 2649 if (i >= HPSA_MAX_DEVICES) { 2650 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." 2651 " %d devices ignored.\n", HPSA_MAX_DEVICES, 2652 ndevs_to_allocate - HPSA_MAX_DEVICES); 2653 break; 2654 } 2655 2656 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 2657 if (!currentsd[i]) { 2658 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 2659 __FILE__, __LINE__); 2660 goto out; 2661 } 2662 ndev_allocated++; 2663 } 2664 2665 if (unlikely(is_scsi_rev_5(h))) 2666 raid_ctlr_position = 0; 2667 else 2668 raid_ctlr_position = nphysicals + nlogicals; 2669 2670 /* adjust our table of devices */ 2671 n_ext_target_devs = 0; 2672 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 2673 u8 *lunaddrbytes, is_OBDR = 0; 2674 2675 /* Figure out where the LUN ID info is coming from */ 2676 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 2677 i, nphysicals, nlogicals, physdev_list, logdev_list); 2678 /* skip masked physical devices. */ 2679 if (lunaddrbytes[3] & 0xC0 && 2680 i < nphysicals + (raid_ctlr_position == 0)) 2681 continue; 2682 2683 /* Get device type, vendor, model, device id */ 2684 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 2685 &is_OBDR)) 2686 continue; /* skip it if we can't talk to it. */ 2687 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 2688 this_device = currentsd[ncurrent]; 2689 2690 /* 2691 * For external target devices, we have to insert a LUN 0 which 2692 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 2693 * is nonetheless an enclosure device there. We have to 2694 * present that otherwise linux won't find anything if 2695 * there is no lun 0. 2696 */ 2697 if (add_ext_target_dev(h, tmpdevice, this_device, 2698 lunaddrbytes, lunzerobits, 2699 &n_ext_target_devs)) { 2700 ncurrent++; 2701 this_device = currentsd[ncurrent]; 2702 } 2703 2704 *this_device = *tmpdevice; 2705 2706 switch (this_device->devtype) { 2707 case TYPE_ROM: 2708 /* We don't *really* support actual CD-ROM devices, 2709 * just "One Button Disaster Recovery" tape drive 2710 * which temporarily pretends to be a CD-ROM drive. 2711 * So we check that the device is really an OBDR tape 2712 * device by checking for "$DR-10" in bytes 43-48 of 2713 * the inquiry data. 2714 */ 2715 if (is_OBDR) 2716 ncurrent++; 2717 break; 2718 case TYPE_DISK: 2719 if (i >= nphysicals) { 2720 ncurrent++; 2721 break; 2722 } 2723 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { 2724 memcpy(&this_device->ioaccel_handle, 2725 &lunaddrbytes[20], 2726 sizeof(this_device->ioaccel_handle)); 2727 ncurrent++; 2728 } 2729 break; 2730 case TYPE_TAPE: 2731 case TYPE_MEDIUM_CHANGER: 2732 ncurrent++; 2733 break; 2734 case TYPE_RAID: 2735 /* Only present the Smartarray HBA as a RAID controller. 2736 * If it's a RAID controller other than the HBA itself 2737 * (an external RAID controller, MSA500 or similar) 2738 * don't present it. 2739 */ 2740 if (!is_hba_lunid(lunaddrbytes)) 2741 break; 2742 ncurrent++; 2743 break; 2744 default: 2745 break; 2746 } 2747 if (ncurrent >= HPSA_MAX_DEVICES) 2748 break; 2749 } 2750 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 2751 out: 2752 kfree(tmpdevice); 2753 for (i = 0; i < ndev_allocated; i++) 2754 kfree(currentsd[i]); 2755 kfree(currentsd); 2756 kfree(physdev_list); 2757 kfree(logdev_list); 2758 } 2759 2760 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 2761 * dma mapping and fills in the scatter gather entries of the 2762 * hpsa command, cp. 2763 */ 2764 static int hpsa_scatter_gather(struct ctlr_info *h, 2765 struct CommandList *cp, 2766 struct scsi_cmnd *cmd) 2767 { 2768 unsigned int len; 2769 struct scatterlist *sg; 2770 u64 addr64; 2771 int use_sg, i, sg_index, chained; 2772 struct SGDescriptor *curr_sg; 2773 2774 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 2775 2776 use_sg = scsi_dma_map(cmd); 2777 if (use_sg < 0) 2778 return use_sg; 2779 2780 if (!use_sg) 2781 goto sglist_finished; 2782 2783 curr_sg = cp->SG; 2784 chained = 0; 2785 sg_index = 0; 2786 scsi_for_each_sg(cmd, sg, use_sg, i) { 2787 if (i == h->max_cmd_sg_entries - 1 && 2788 use_sg > h->max_cmd_sg_entries) { 2789 chained = 1; 2790 curr_sg = h->cmd_sg_list[cp->cmdindex]; 2791 sg_index = 0; 2792 } 2793 addr64 = (u64) sg_dma_address(sg); 2794 len = sg_dma_len(sg); 2795 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 2796 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 2797 curr_sg->Len = len; 2798 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST; 2799 curr_sg++; 2800 } 2801 2802 if (use_sg + chained > h->maxSG) 2803 h->maxSG = use_sg + chained; 2804 2805 if (chained) { 2806 cp->Header.SGList = h->max_cmd_sg_entries; 2807 cp->Header.SGTotal = (u16) (use_sg + 1); 2808 if (hpsa_map_sg_chain_block(h, cp)) { 2809 scsi_dma_unmap(cmd); 2810 return -1; 2811 } 2812 return 0; 2813 } 2814 2815 sglist_finished: 2816 2817 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 2818 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ 2819 return 0; 2820 } 2821 2822 #define IO_ACCEL_INELIGIBLE (1) 2823 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) 2824 { 2825 int is_write = 0; 2826 u32 block; 2827 u32 block_cnt; 2828 2829 /* Perform some CDB fixups if needed using 10 byte reads/writes only */ 2830 switch (cdb[0]) { 2831 case WRITE_6: 2832 case WRITE_12: 2833 is_write = 1; 2834 case READ_6: 2835 case READ_12: 2836 if (*cdb_len == 6) { 2837 block = (((u32) cdb[2]) << 8) | cdb[3]; 2838 block_cnt = cdb[4]; 2839 } else { 2840 BUG_ON(*cdb_len != 12); 2841 block = (((u32) cdb[2]) << 24) | 2842 (((u32) cdb[3]) << 16) | 2843 (((u32) cdb[4]) << 8) | 2844 cdb[5]; 2845 block_cnt = 2846 (((u32) cdb[6]) << 24) | 2847 (((u32) cdb[7]) << 16) | 2848 (((u32) cdb[8]) << 8) | 2849 cdb[9]; 2850 } 2851 if (block_cnt > 0xffff) 2852 return IO_ACCEL_INELIGIBLE; 2853 2854 cdb[0] = is_write ? WRITE_10 : READ_10; 2855 cdb[1] = 0; 2856 cdb[2] = (u8) (block >> 24); 2857 cdb[3] = (u8) (block >> 16); 2858 cdb[4] = (u8) (block >> 8); 2859 cdb[5] = (u8) (block); 2860 cdb[6] = 0; 2861 cdb[7] = (u8) (block_cnt >> 8); 2862 cdb[8] = (u8) (block_cnt); 2863 cdb[9] = 0; 2864 *cdb_len = 10; 2865 break; 2866 } 2867 return 0; 2868 } 2869 2870 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, 2871 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 2872 u8 *scsi3addr) 2873 { 2874 struct scsi_cmnd *cmd = c->scsi_cmd; 2875 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 2876 unsigned int len; 2877 unsigned int total_len = 0; 2878 struct scatterlist *sg; 2879 u64 addr64; 2880 int use_sg, i; 2881 struct SGDescriptor *curr_sg; 2882 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; 2883 2884 /* TODO: implement chaining support */ 2885 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 2886 return IO_ACCEL_INELIGIBLE; 2887 2888 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 2889 2890 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 2891 return IO_ACCEL_INELIGIBLE; 2892 2893 c->cmd_type = CMD_IOACCEL1; 2894 2895 /* Adjust the DMA address to point to the accelerated command buffer */ 2896 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + 2897 (c->cmdindex * sizeof(*cp)); 2898 BUG_ON(c->busaddr & 0x0000007F); 2899 2900 use_sg = scsi_dma_map(cmd); 2901 if (use_sg < 0) 2902 return use_sg; 2903 2904 if (use_sg) { 2905 curr_sg = cp->SG; 2906 scsi_for_each_sg(cmd, sg, use_sg, i) { 2907 addr64 = (u64) sg_dma_address(sg); 2908 len = sg_dma_len(sg); 2909 total_len += len; 2910 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 2911 curr_sg->Addr.upper = 2912 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 2913 curr_sg->Len = len; 2914 2915 if (i == (scsi_sg_count(cmd) - 1)) 2916 curr_sg->Ext = HPSA_SG_LAST; 2917 else 2918 curr_sg->Ext = 0; /* we are not chaining */ 2919 curr_sg++; 2920 } 2921 2922 switch (cmd->sc_data_direction) { 2923 case DMA_TO_DEVICE: 2924 control |= IOACCEL1_CONTROL_DATA_OUT; 2925 break; 2926 case DMA_FROM_DEVICE: 2927 control |= IOACCEL1_CONTROL_DATA_IN; 2928 break; 2929 case DMA_NONE: 2930 control |= IOACCEL1_CONTROL_NODATAXFER; 2931 break; 2932 default: 2933 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 2934 cmd->sc_data_direction); 2935 BUG(); 2936 break; 2937 } 2938 } else { 2939 control |= IOACCEL1_CONTROL_NODATAXFER; 2940 } 2941 2942 c->Header.SGList = use_sg; 2943 /* Fill out the command structure to submit */ 2944 cp->dev_handle = ioaccel_handle & 0xFFFF; 2945 cp->transfer_len = total_len; 2946 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ | 2947 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK); 2948 cp->control = control; 2949 memcpy(cp->CDB, cdb, cdb_len); 2950 memcpy(cp->CISS_LUN, scsi3addr, 8); 2951 /* Tag was already set at init time. */ 2952 enqueue_cmd_and_start_io(h, c); 2953 return 0; 2954 } 2955 2956 /* 2957 * Queue a command directly to a device behind the controller using the 2958 * I/O accelerator path. 2959 */ 2960 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, 2961 struct CommandList *c) 2962 { 2963 struct scsi_cmnd *cmd = c->scsi_cmd; 2964 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 2965 2966 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, 2967 cmd->cmnd, cmd->cmd_len, dev->scsi3addr); 2968 } 2969 2970 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 2971 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 2972 u8 *scsi3addr) 2973 { 2974 struct scsi_cmnd *cmd = c->scsi_cmd; 2975 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 2976 struct ioaccel2_sg_element *curr_sg; 2977 int use_sg, i; 2978 struct scatterlist *sg; 2979 u64 addr64; 2980 u32 len; 2981 u32 total_len = 0; 2982 2983 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 2984 return IO_ACCEL_INELIGIBLE; 2985 2986 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 2987 return IO_ACCEL_INELIGIBLE; 2988 c->cmd_type = CMD_IOACCEL2; 2989 /* Adjust the DMA address to point to the accelerated command buffer */ 2990 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 2991 (c->cmdindex * sizeof(*cp)); 2992 BUG_ON(c->busaddr & 0x0000007F); 2993 2994 memset(cp, 0, sizeof(*cp)); 2995 cp->IU_type = IOACCEL2_IU_TYPE; 2996 2997 use_sg = scsi_dma_map(cmd); 2998 if (use_sg < 0) 2999 return use_sg; 3000 3001 if (use_sg) { 3002 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); 3003 curr_sg = cp->sg; 3004 scsi_for_each_sg(cmd, sg, use_sg, i) { 3005 addr64 = (u64) sg_dma_address(sg); 3006 len = sg_dma_len(sg); 3007 total_len += len; 3008 curr_sg->address = cpu_to_le64(addr64); 3009 curr_sg->length = cpu_to_le32(len); 3010 curr_sg->reserved[0] = 0; 3011 curr_sg->reserved[1] = 0; 3012 curr_sg->reserved[2] = 0; 3013 curr_sg->chain_indicator = 0; 3014 curr_sg++; 3015 } 3016 3017 switch (cmd->sc_data_direction) { 3018 case DMA_TO_DEVICE: 3019 cp->direction = IOACCEL2_DIR_DATA_OUT; 3020 break; 3021 case DMA_FROM_DEVICE: 3022 cp->direction = IOACCEL2_DIR_DATA_IN; 3023 break; 3024 case DMA_NONE: 3025 cp->direction = IOACCEL2_DIR_NO_DATA; 3026 break; 3027 default: 3028 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3029 cmd->sc_data_direction); 3030 BUG(); 3031 break; 3032 } 3033 } else { 3034 cp->direction = IOACCEL2_DIR_NO_DATA; 3035 } 3036 cp->scsi_nexus = ioaccel_handle; 3037 cp->Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | 3038 DIRECT_LOOKUP_BIT; 3039 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 3040 memset(cp->cciss_lun, 0, sizeof(cp->cciss_lun)); 3041 cp->cmd_priority_task_attr = 0; 3042 3043 /* fill in sg elements */ 3044 cp->sg_count = (u8) use_sg; 3045 3046 cp->data_len = cpu_to_le32(total_len); 3047 cp->err_ptr = cpu_to_le64(c->busaddr + 3048 offsetof(struct io_accel2_cmd, error_data)); 3049 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data)); 3050 3051 enqueue_cmd_and_start_io(h, c); 3052 return 0; 3053 } 3054 3055 /* 3056 * Queue a command to the correct I/O accelerator path. 3057 */ 3058 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 3059 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3060 u8 *scsi3addr) 3061 { 3062 if (h->transMethod & CFGTBL_Trans_io_accel1) 3063 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, 3064 cdb, cdb_len, scsi3addr); 3065 else 3066 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, 3067 cdb, cdb_len, scsi3addr); 3068 } 3069 3070 static void raid_map_helper(struct raid_map_data *map, 3071 int offload_to_mirror, u32 *map_index, u32 *current_group) 3072 { 3073 if (offload_to_mirror == 0) { 3074 /* use physical disk in the first mirrored group. */ 3075 *map_index %= map->data_disks_per_row; 3076 return; 3077 } 3078 do { 3079 /* determine mirror group that *map_index indicates */ 3080 *current_group = *map_index / map->data_disks_per_row; 3081 if (offload_to_mirror == *current_group) 3082 continue; 3083 if (*current_group < (map->layout_map_count - 1)) { 3084 /* select map index from next group */ 3085 *map_index += map->data_disks_per_row; 3086 (*current_group)++; 3087 } else { 3088 /* select map index from first group */ 3089 *map_index %= map->data_disks_per_row; 3090 *current_group = 0; 3091 } 3092 } while (offload_to_mirror != *current_group); 3093 } 3094 3095 /* 3096 * Attempt to perform offload RAID mapping for a logical volume I/O. 3097 */ 3098 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, 3099 struct CommandList *c) 3100 { 3101 struct scsi_cmnd *cmd = c->scsi_cmd; 3102 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3103 struct raid_map_data *map = &dev->raid_map; 3104 struct raid_map_disk_data *dd = &map->data[0]; 3105 int is_write = 0; 3106 u32 map_index; 3107 u64 first_block, last_block; 3108 u32 block_cnt; 3109 u32 blocks_per_row; 3110 u64 first_row, last_row; 3111 u32 first_row_offset, last_row_offset; 3112 u32 first_column, last_column; 3113 u64 r0_first_row, r0_last_row; 3114 u32 r5or6_blocks_per_row; 3115 u64 r5or6_first_row, r5or6_last_row; 3116 u32 r5or6_first_row_offset, r5or6_last_row_offset; 3117 u32 r5or6_first_column, r5or6_last_column; 3118 u32 total_disks_per_row; 3119 u32 stripesize; 3120 u32 first_group, last_group, current_group; 3121 u32 map_row; 3122 u32 disk_handle; 3123 u64 disk_block; 3124 u32 disk_block_cnt; 3125 u8 cdb[16]; 3126 u8 cdb_len; 3127 #if BITS_PER_LONG == 32 3128 u64 tmpdiv; 3129 #endif 3130 int offload_to_mirror; 3131 3132 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3133 3134 /* check for valid opcode, get LBA and block count */ 3135 switch (cmd->cmnd[0]) { 3136 case WRITE_6: 3137 is_write = 1; 3138 case READ_6: 3139 first_block = 3140 (((u64) cmd->cmnd[2]) << 8) | 3141 cmd->cmnd[3]; 3142 block_cnt = cmd->cmnd[4]; 3143 break; 3144 case WRITE_10: 3145 is_write = 1; 3146 case READ_10: 3147 first_block = 3148 (((u64) cmd->cmnd[2]) << 24) | 3149 (((u64) cmd->cmnd[3]) << 16) | 3150 (((u64) cmd->cmnd[4]) << 8) | 3151 cmd->cmnd[5]; 3152 block_cnt = 3153 (((u32) cmd->cmnd[7]) << 8) | 3154 cmd->cmnd[8]; 3155 break; 3156 case WRITE_12: 3157 is_write = 1; 3158 case READ_12: 3159 first_block = 3160 (((u64) cmd->cmnd[2]) << 24) | 3161 (((u64) cmd->cmnd[3]) << 16) | 3162 (((u64) cmd->cmnd[4]) << 8) | 3163 cmd->cmnd[5]; 3164 block_cnt = 3165 (((u32) cmd->cmnd[6]) << 24) | 3166 (((u32) cmd->cmnd[7]) << 16) | 3167 (((u32) cmd->cmnd[8]) << 8) | 3168 cmd->cmnd[9]; 3169 break; 3170 case WRITE_16: 3171 is_write = 1; 3172 case READ_16: 3173 first_block = 3174 (((u64) cmd->cmnd[2]) << 56) | 3175 (((u64) cmd->cmnd[3]) << 48) | 3176 (((u64) cmd->cmnd[4]) << 40) | 3177 (((u64) cmd->cmnd[5]) << 32) | 3178 (((u64) cmd->cmnd[6]) << 24) | 3179 (((u64) cmd->cmnd[7]) << 16) | 3180 (((u64) cmd->cmnd[8]) << 8) | 3181 cmd->cmnd[9]; 3182 block_cnt = 3183 (((u32) cmd->cmnd[10]) << 24) | 3184 (((u32) cmd->cmnd[11]) << 16) | 3185 (((u32) cmd->cmnd[12]) << 8) | 3186 cmd->cmnd[13]; 3187 break; 3188 default: 3189 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ 3190 } 3191 BUG_ON(block_cnt == 0); 3192 last_block = first_block + block_cnt - 1; 3193 3194 /* check for write to non-RAID-0 */ 3195 if (is_write && dev->raid_level != 0) 3196 return IO_ACCEL_INELIGIBLE; 3197 3198 /* check for invalid block or wraparound */ 3199 if (last_block >= map->volume_blk_cnt || last_block < first_block) 3200 return IO_ACCEL_INELIGIBLE; 3201 3202 /* calculate stripe information for the request */ 3203 blocks_per_row = map->data_disks_per_row * map->strip_size; 3204 #if BITS_PER_LONG == 32 3205 tmpdiv = first_block; 3206 (void) do_div(tmpdiv, blocks_per_row); 3207 first_row = tmpdiv; 3208 tmpdiv = last_block; 3209 (void) do_div(tmpdiv, blocks_per_row); 3210 last_row = tmpdiv; 3211 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3212 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3213 tmpdiv = first_row_offset; 3214 (void) do_div(tmpdiv, map->strip_size); 3215 first_column = tmpdiv; 3216 tmpdiv = last_row_offset; 3217 (void) do_div(tmpdiv, map->strip_size); 3218 last_column = tmpdiv; 3219 #else 3220 first_row = first_block / blocks_per_row; 3221 last_row = last_block / blocks_per_row; 3222 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3223 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3224 first_column = first_row_offset / map->strip_size; 3225 last_column = last_row_offset / map->strip_size; 3226 #endif 3227 3228 /* if this isn't a single row/column then give to the controller */ 3229 if ((first_row != last_row) || (first_column != last_column)) 3230 return IO_ACCEL_INELIGIBLE; 3231 3232 /* proceeding with driver mapping */ 3233 total_disks_per_row = map->data_disks_per_row + 3234 map->metadata_disks_per_row; 3235 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3236 map->row_cnt; 3237 map_index = (map_row * total_disks_per_row) + first_column; 3238 3239 switch (dev->raid_level) { 3240 case HPSA_RAID_0: 3241 break; /* nothing special to do */ 3242 case HPSA_RAID_1: 3243 /* Handles load balance across RAID 1 members. 3244 * (2-drive R1 and R10 with even # of drives.) 3245 * Appropriate for SSDs, not optimal for HDDs 3246 */ 3247 BUG_ON(map->layout_map_count != 2); 3248 if (dev->offload_to_mirror) 3249 map_index += map->data_disks_per_row; 3250 dev->offload_to_mirror = !dev->offload_to_mirror; 3251 break; 3252 case HPSA_RAID_ADM: 3253 /* Handles N-way mirrors (R1-ADM) 3254 * and R10 with # of drives divisible by 3.) 3255 */ 3256 BUG_ON(map->layout_map_count != 3); 3257 3258 offload_to_mirror = dev->offload_to_mirror; 3259 raid_map_helper(map, offload_to_mirror, 3260 &map_index, ¤t_group); 3261 /* set mirror group to use next time */ 3262 offload_to_mirror = 3263 (offload_to_mirror >= map->layout_map_count - 1) 3264 ? 0 : offload_to_mirror + 1; 3265 /* FIXME: remove after debug/dev */ 3266 BUG_ON(offload_to_mirror >= map->layout_map_count); 3267 dev_warn(&h->pdev->dev, 3268 "DEBUG: Using physical disk map index %d from mirror group %d\n", 3269 map_index, offload_to_mirror); 3270 dev->offload_to_mirror = offload_to_mirror; 3271 /* Avoid direct use of dev->offload_to_mirror within this 3272 * function since multiple threads might simultaneously 3273 * increment it beyond the range of dev->layout_map_count -1. 3274 */ 3275 break; 3276 case HPSA_RAID_5: 3277 case HPSA_RAID_6: 3278 if (map->layout_map_count <= 1) 3279 break; 3280 3281 /* Verify first and last block are in same RAID group */ 3282 r5or6_blocks_per_row = 3283 map->strip_size * map->data_disks_per_row; 3284 BUG_ON(r5or6_blocks_per_row == 0); 3285 stripesize = r5or6_blocks_per_row * map->layout_map_count; 3286 #if BITS_PER_LONG == 32 3287 tmpdiv = first_block; 3288 first_group = do_div(tmpdiv, stripesize); 3289 tmpdiv = first_group; 3290 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3291 first_group = tmpdiv; 3292 tmpdiv = last_block; 3293 last_group = do_div(tmpdiv, stripesize); 3294 tmpdiv = last_group; 3295 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3296 last_group = tmpdiv; 3297 #else 3298 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 3299 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 3300 if (first_group != last_group) 3301 #endif 3302 return IO_ACCEL_INELIGIBLE; 3303 3304 /* Verify request is in a single row of RAID 5/6 */ 3305 #if BITS_PER_LONG == 32 3306 tmpdiv = first_block; 3307 (void) do_div(tmpdiv, stripesize); 3308 first_row = r5or6_first_row = r0_first_row = tmpdiv; 3309 tmpdiv = last_block; 3310 (void) do_div(tmpdiv, stripesize); 3311 r5or6_last_row = r0_last_row = tmpdiv; 3312 #else 3313 first_row = r5or6_first_row = r0_first_row = 3314 first_block / stripesize; 3315 r5or6_last_row = r0_last_row = last_block / stripesize; 3316 #endif 3317 if (r5or6_first_row != r5or6_last_row) 3318 return IO_ACCEL_INELIGIBLE; 3319 3320 3321 /* Verify request is in a single column */ 3322 #if BITS_PER_LONG == 32 3323 tmpdiv = first_block; 3324 first_row_offset = do_div(tmpdiv, stripesize); 3325 tmpdiv = first_row_offset; 3326 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); 3327 r5or6_first_row_offset = first_row_offset; 3328 tmpdiv = last_block; 3329 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 3330 tmpdiv = r5or6_last_row_offset; 3331 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 3332 tmpdiv = r5or6_first_row_offset; 3333 (void) do_div(tmpdiv, map->strip_size); 3334 first_column = r5or6_first_column = tmpdiv; 3335 tmpdiv = r5or6_last_row_offset; 3336 (void) do_div(tmpdiv, map->strip_size); 3337 r5or6_last_column = tmpdiv; 3338 #else 3339 first_row_offset = r5or6_first_row_offset = 3340 (u32)((first_block % stripesize) % 3341 r5or6_blocks_per_row); 3342 3343 r5or6_last_row_offset = 3344 (u32)((last_block % stripesize) % 3345 r5or6_blocks_per_row); 3346 3347 first_column = r5or6_first_column = 3348 r5or6_first_row_offset / map->strip_size; 3349 r5or6_last_column = 3350 r5or6_last_row_offset / map->strip_size; 3351 #endif 3352 if (r5or6_first_column != r5or6_last_column) 3353 return IO_ACCEL_INELIGIBLE; 3354 3355 /* Request is eligible */ 3356 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3357 map->row_cnt; 3358 3359 map_index = (first_group * 3360 (map->row_cnt * total_disks_per_row)) + 3361 (map_row * total_disks_per_row) + first_column; 3362 break; 3363 default: 3364 return IO_ACCEL_INELIGIBLE; 3365 } 3366 3367 disk_handle = dd[map_index].ioaccel_handle; 3368 disk_block = map->disk_starting_blk + (first_row * map->strip_size) + 3369 (first_row_offset - (first_column * map->strip_size)); 3370 disk_block_cnt = block_cnt; 3371 3372 /* handle differing logical/physical block sizes */ 3373 if (map->phys_blk_shift) { 3374 disk_block <<= map->phys_blk_shift; 3375 disk_block_cnt <<= map->phys_blk_shift; 3376 } 3377 BUG_ON(disk_block_cnt > 0xffff); 3378 3379 /* build the new CDB for the physical disk I/O */ 3380 if (disk_block > 0xffffffff) { 3381 cdb[0] = is_write ? WRITE_16 : READ_16; 3382 cdb[1] = 0; 3383 cdb[2] = (u8) (disk_block >> 56); 3384 cdb[3] = (u8) (disk_block >> 48); 3385 cdb[4] = (u8) (disk_block >> 40); 3386 cdb[5] = (u8) (disk_block >> 32); 3387 cdb[6] = (u8) (disk_block >> 24); 3388 cdb[7] = (u8) (disk_block >> 16); 3389 cdb[8] = (u8) (disk_block >> 8); 3390 cdb[9] = (u8) (disk_block); 3391 cdb[10] = (u8) (disk_block_cnt >> 24); 3392 cdb[11] = (u8) (disk_block_cnt >> 16); 3393 cdb[12] = (u8) (disk_block_cnt >> 8); 3394 cdb[13] = (u8) (disk_block_cnt); 3395 cdb[14] = 0; 3396 cdb[15] = 0; 3397 cdb_len = 16; 3398 } else { 3399 cdb[0] = is_write ? WRITE_10 : READ_10; 3400 cdb[1] = 0; 3401 cdb[2] = (u8) (disk_block >> 24); 3402 cdb[3] = (u8) (disk_block >> 16); 3403 cdb[4] = (u8) (disk_block >> 8); 3404 cdb[5] = (u8) (disk_block); 3405 cdb[6] = 0; 3406 cdb[7] = (u8) (disk_block_cnt >> 8); 3407 cdb[8] = (u8) (disk_block_cnt); 3408 cdb[9] = 0; 3409 cdb_len = 10; 3410 } 3411 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, 3412 dev->scsi3addr); 3413 } 3414 3415 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 3416 void (*done)(struct scsi_cmnd *)) 3417 { 3418 struct ctlr_info *h; 3419 struct hpsa_scsi_dev_t *dev; 3420 unsigned char scsi3addr[8]; 3421 struct CommandList *c; 3422 unsigned long flags; 3423 int rc = 0; 3424 3425 /* Get the ptr to our adapter structure out of cmd->host. */ 3426 h = sdev_to_hba(cmd->device); 3427 dev = cmd->device->hostdata; 3428 if (!dev) { 3429 cmd->result = DID_NO_CONNECT << 16; 3430 done(cmd); 3431 return 0; 3432 } 3433 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3434 3435 spin_lock_irqsave(&h->lock, flags); 3436 if (unlikely(h->lockup_detected)) { 3437 spin_unlock_irqrestore(&h->lock, flags); 3438 cmd->result = DID_ERROR << 16; 3439 done(cmd); 3440 return 0; 3441 } 3442 spin_unlock_irqrestore(&h->lock, flags); 3443 c = cmd_alloc(h); 3444 if (c == NULL) { /* trouble... */ 3445 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 3446 return SCSI_MLQUEUE_HOST_BUSY; 3447 } 3448 3449 /* Fill in the command list header */ 3450 3451 cmd->scsi_done = done; /* save this for use by completion code */ 3452 3453 /* save c in case we have to abort it */ 3454 cmd->host_scribble = (unsigned char *) c; 3455 3456 c->cmd_type = CMD_SCSI; 3457 c->scsi_cmd = cmd; 3458 3459 /* Call alternate submit routine for I/O accelerated commands. 3460 * Retries always go down the normal I/O path. 3461 */ 3462 if (likely(cmd->retries == 0 && 3463 cmd->request->cmd_type == REQ_TYPE_FS && 3464 h->acciopath_status)) { 3465 if (dev->offload_enabled) { 3466 rc = hpsa_scsi_ioaccel_raid_map(h, c); 3467 if (rc == 0) 3468 return 0; /* Sent on ioaccel path */ 3469 if (rc < 0) { /* scsi_dma_map failed. */ 3470 cmd_free(h, c); 3471 return SCSI_MLQUEUE_HOST_BUSY; 3472 } 3473 } else if (dev->ioaccel_handle) { 3474 rc = hpsa_scsi_ioaccel_direct_map(h, c); 3475 if (rc == 0) 3476 return 0; /* Sent on direct map path */ 3477 if (rc < 0) { /* scsi_dma_map failed. */ 3478 cmd_free(h, c); 3479 return SCSI_MLQUEUE_HOST_BUSY; 3480 } 3481 } 3482 } 3483 3484 c->Header.ReplyQueue = 0; /* unused in simple mode */ 3485 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 3486 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 3487 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; 3488 3489 /* Fill in the request block... */ 3490 3491 c->Request.Timeout = 0; 3492 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 3493 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 3494 c->Request.CDBLen = cmd->cmd_len; 3495 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 3496 c->Request.Type.Type = TYPE_CMD; 3497 c->Request.Type.Attribute = ATTR_SIMPLE; 3498 switch (cmd->sc_data_direction) { 3499 case DMA_TO_DEVICE: 3500 c->Request.Type.Direction = XFER_WRITE; 3501 break; 3502 case DMA_FROM_DEVICE: 3503 c->Request.Type.Direction = XFER_READ; 3504 break; 3505 case DMA_NONE: 3506 c->Request.Type.Direction = XFER_NONE; 3507 break; 3508 case DMA_BIDIRECTIONAL: 3509 /* This can happen if a buggy application does a scsi passthru 3510 * and sets both inlen and outlen to non-zero. ( see 3511 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 3512 */ 3513 3514 c->Request.Type.Direction = XFER_RSVD; 3515 /* This is technically wrong, and hpsa controllers should 3516 * reject it with CMD_INVALID, which is the most correct 3517 * response, but non-fibre backends appear to let it 3518 * slide by, and give the same results as if this field 3519 * were set correctly. Either way is acceptable for 3520 * our purposes here. 3521 */ 3522 3523 break; 3524 3525 default: 3526 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3527 cmd->sc_data_direction); 3528 BUG(); 3529 break; 3530 } 3531 3532 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 3533 cmd_free(h, c); 3534 return SCSI_MLQUEUE_HOST_BUSY; 3535 } 3536 enqueue_cmd_and_start_io(h, c); 3537 /* the cmd'll come back via intr handler in complete_scsi_command() */ 3538 return 0; 3539 } 3540 3541 static DEF_SCSI_QCMD(hpsa_scsi_queue_command) 3542 3543 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) 3544 { 3545 unsigned long flags; 3546 3547 /* 3548 * Don't let rescans be initiated on a controller known 3549 * to be locked up. If the controller locks up *during* 3550 * a rescan, that thread is probably hosed, but at least 3551 * we can prevent new rescan threads from piling up on a 3552 * locked up controller. 3553 */ 3554 spin_lock_irqsave(&h->lock, flags); 3555 if (unlikely(h->lockup_detected)) { 3556 spin_unlock_irqrestore(&h->lock, flags); 3557 spin_lock_irqsave(&h->scan_lock, flags); 3558 h->scan_finished = 1; 3559 wake_up_all(&h->scan_wait_queue); 3560 spin_unlock_irqrestore(&h->scan_lock, flags); 3561 return 1; 3562 } 3563 spin_unlock_irqrestore(&h->lock, flags); 3564 return 0; 3565 } 3566 3567 static void hpsa_scan_start(struct Scsi_Host *sh) 3568 { 3569 struct ctlr_info *h = shost_to_hba(sh); 3570 unsigned long flags; 3571 3572 if (do_not_scan_if_controller_locked_up(h)) 3573 return; 3574 3575 /* wait until any scan already in progress is finished. */ 3576 while (1) { 3577 spin_lock_irqsave(&h->scan_lock, flags); 3578 if (h->scan_finished) 3579 break; 3580 spin_unlock_irqrestore(&h->scan_lock, flags); 3581 wait_event(h->scan_wait_queue, h->scan_finished); 3582 /* Note: We don't need to worry about a race between this 3583 * thread and driver unload because the midlayer will 3584 * have incremented the reference count, so unload won't 3585 * happen if we're in here. 3586 */ 3587 } 3588 h->scan_finished = 0; /* mark scan as in progress */ 3589 spin_unlock_irqrestore(&h->scan_lock, flags); 3590 3591 if (do_not_scan_if_controller_locked_up(h)) 3592 return; 3593 3594 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 3595 3596 spin_lock_irqsave(&h->scan_lock, flags); 3597 h->scan_finished = 1; /* mark scan as finished. */ 3598 wake_up_all(&h->scan_wait_queue); 3599 spin_unlock_irqrestore(&h->scan_lock, flags); 3600 } 3601 3602 static int hpsa_scan_finished(struct Scsi_Host *sh, 3603 unsigned long elapsed_time) 3604 { 3605 struct ctlr_info *h = shost_to_hba(sh); 3606 unsigned long flags; 3607 int finished; 3608 3609 spin_lock_irqsave(&h->scan_lock, flags); 3610 finished = h->scan_finished; 3611 spin_unlock_irqrestore(&h->scan_lock, flags); 3612 return finished; 3613 } 3614 3615 static int hpsa_change_queue_depth(struct scsi_device *sdev, 3616 int qdepth, int reason) 3617 { 3618 struct ctlr_info *h = sdev_to_hba(sdev); 3619 3620 if (reason != SCSI_QDEPTH_DEFAULT) 3621 return -ENOTSUPP; 3622 3623 if (qdepth < 1) 3624 qdepth = 1; 3625 else 3626 if (qdepth > h->nr_cmds) 3627 qdepth = h->nr_cmds; 3628 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 3629 return sdev->queue_depth; 3630 } 3631 3632 static void hpsa_unregister_scsi(struct ctlr_info *h) 3633 { 3634 /* we are being forcibly unloaded, and may not refuse. */ 3635 scsi_remove_host(h->scsi_host); 3636 scsi_host_put(h->scsi_host); 3637 h->scsi_host = NULL; 3638 } 3639 3640 static int hpsa_register_scsi(struct ctlr_info *h) 3641 { 3642 struct Scsi_Host *sh; 3643 int error; 3644 3645 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 3646 if (sh == NULL) 3647 goto fail; 3648 3649 sh->io_port = 0; 3650 sh->n_io_port = 0; 3651 sh->this_id = -1; 3652 sh->max_channel = 3; 3653 sh->max_cmd_len = MAX_COMMAND_SIZE; 3654 sh->max_lun = HPSA_MAX_LUN; 3655 sh->max_id = HPSA_MAX_LUN; 3656 sh->can_queue = h->nr_cmds; 3657 sh->cmd_per_lun = h->nr_cmds; 3658 sh->sg_tablesize = h->maxsgentries; 3659 h->scsi_host = sh; 3660 sh->hostdata[0] = (unsigned long) h; 3661 sh->irq = h->intr[h->intr_mode]; 3662 sh->unique_id = sh->irq; 3663 error = scsi_add_host(sh, &h->pdev->dev); 3664 if (error) 3665 goto fail_host_put; 3666 scsi_scan_host(sh); 3667 return 0; 3668 3669 fail_host_put: 3670 dev_err(&h->pdev->dev, "%s: scsi_add_host" 3671 " failed for controller %d\n", __func__, h->ctlr); 3672 scsi_host_put(sh); 3673 return error; 3674 fail: 3675 dev_err(&h->pdev->dev, "%s: scsi_host_alloc" 3676 " failed for controller %d\n", __func__, h->ctlr); 3677 return -ENOMEM; 3678 } 3679 3680 static int wait_for_device_to_become_ready(struct ctlr_info *h, 3681 unsigned char lunaddr[]) 3682 { 3683 int rc = 0; 3684 int count = 0; 3685 int waittime = 1; /* seconds */ 3686 struct CommandList *c; 3687 3688 c = cmd_special_alloc(h); 3689 if (!c) { 3690 dev_warn(&h->pdev->dev, "out of memory in " 3691 "wait_for_device_to_become_ready.\n"); 3692 return IO_ERROR; 3693 } 3694 3695 /* Send test unit ready until device ready, or give up. */ 3696 while (count < HPSA_TUR_RETRY_LIMIT) { 3697 3698 /* Wait for a bit. do this first, because if we send 3699 * the TUR right away, the reset will just abort it. 3700 */ 3701 msleep(1000 * waittime); 3702 count++; 3703 3704 /* Increase wait time with each try, up to a point. */ 3705 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 3706 waittime = waittime * 2; 3707 3708 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ 3709 (void) fill_cmd(c, TEST_UNIT_READY, h, 3710 NULL, 0, 0, lunaddr, TYPE_CMD); 3711 hpsa_scsi_do_simple_cmd_core(h, c); 3712 /* no unmap needed here because no data xfer. */ 3713 3714 if (c->err_info->CommandStatus == CMD_SUCCESS) 3715 break; 3716 3717 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 3718 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 3719 (c->err_info->SenseInfo[2] == NO_SENSE || 3720 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 3721 break; 3722 3723 dev_warn(&h->pdev->dev, "waiting %d secs " 3724 "for device to become ready.\n", waittime); 3725 rc = 1; /* device not ready. */ 3726 } 3727 3728 if (rc) 3729 dev_warn(&h->pdev->dev, "giving up on device.\n"); 3730 else 3731 dev_warn(&h->pdev->dev, "device is ready.\n"); 3732 3733 cmd_special_free(h, c); 3734 return rc; 3735 } 3736 3737 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 3738 * complaining. Doing a host- or bus-reset can't do anything good here. 3739 */ 3740 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 3741 { 3742 int rc; 3743 struct ctlr_info *h; 3744 struct hpsa_scsi_dev_t *dev; 3745 3746 /* find the controller to which the command to be aborted was sent */ 3747 h = sdev_to_hba(scsicmd->device); 3748 if (h == NULL) /* paranoia */ 3749 return FAILED; 3750 dev = scsicmd->device->hostdata; 3751 if (!dev) { 3752 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 3753 "device lookup failed.\n"); 3754 return FAILED; 3755 } 3756 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 3757 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 3758 /* send a reset to the SCSI LUN which the command was sent to */ 3759 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); 3760 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 3761 return SUCCESS; 3762 3763 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 3764 return FAILED; 3765 } 3766 3767 static void swizzle_abort_tag(u8 *tag) 3768 { 3769 u8 original_tag[8]; 3770 3771 memcpy(original_tag, tag, 8); 3772 tag[0] = original_tag[3]; 3773 tag[1] = original_tag[2]; 3774 tag[2] = original_tag[1]; 3775 tag[3] = original_tag[0]; 3776 tag[4] = original_tag[7]; 3777 tag[5] = original_tag[6]; 3778 tag[6] = original_tag[5]; 3779 tag[7] = original_tag[4]; 3780 } 3781 3782 static void hpsa_get_tag(struct ctlr_info *h, 3783 struct CommandList *c, u32 *taglower, u32 *tagupper) 3784 { 3785 if (c->cmd_type == CMD_IOACCEL1) { 3786 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 3787 &h->ioaccel_cmd_pool[c->cmdindex]; 3788 *tagupper = cm1->Tag.upper; 3789 *taglower = cm1->Tag.lower; 3790 return; 3791 } 3792 if (c->cmd_type == CMD_IOACCEL2) { 3793 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) 3794 &h->ioaccel2_cmd_pool[c->cmdindex]; 3795 *tagupper = cm2->Tag.upper; 3796 *taglower = cm2->Tag.lower; 3797 return; 3798 } 3799 *tagupper = c->Header.Tag.upper; 3800 *taglower = c->Header.Tag.lower; 3801 } 3802 3803 3804 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 3805 struct CommandList *abort, int swizzle) 3806 { 3807 int rc = IO_OK; 3808 struct CommandList *c; 3809 struct ErrorInfo *ei; 3810 u32 tagupper, taglower; 3811 3812 c = cmd_special_alloc(h); 3813 if (c == NULL) { /* trouble... */ 3814 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 3815 return -ENOMEM; 3816 } 3817 3818 /* fill_cmd can't fail here, no buffer to map */ 3819 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, 3820 0, 0, scsi3addr, TYPE_MSG); 3821 if (swizzle) 3822 swizzle_abort_tag(&c->Request.CDB[4]); 3823 hpsa_scsi_do_simple_cmd_core(h, c); 3824 hpsa_get_tag(h, abort, &taglower, &tagupper); 3825 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", 3826 __func__, tagupper, taglower); 3827 /* no unmap needed here because no data xfer. */ 3828 3829 ei = c->err_info; 3830 switch (ei->CommandStatus) { 3831 case CMD_SUCCESS: 3832 break; 3833 case CMD_UNABORTABLE: /* Very common, don't make noise. */ 3834 rc = -1; 3835 break; 3836 default: 3837 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", 3838 __func__, tagupper, taglower); 3839 hpsa_scsi_interpret_error(c); 3840 rc = -1; 3841 break; 3842 } 3843 cmd_special_free(h, c); 3844 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, 3845 abort->Header.Tag.upper, abort->Header.Tag.lower); 3846 return rc; 3847 } 3848 3849 /* 3850 * hpsa_find_cmd_in_queue 3851 * 3852 * Used to determine whether a command (find) is still present 3853 * in queue_head. Optionally excludes the last element of queue_head. 3854 * 3855 * This is used to avoid unnecessary aborts. Commands in h->reqQ have 3856 * not yet been submitted, and so can be aborted by the driver without 3857 * sending an abort to the hardware. 3858 * 3859 * Returns pointer to command if found in queue, NULL otherwise. 3860 */ 3861 static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h, 3862 struct scsi_cmnd *find, struct list_head *queue_head) 3863 { 3864 unsigned long flags; 3865 struct CommandList *c = NULL; /* ptr into cmpQ */ 3866 3867 if (!find) 3868 return 0; 3869 spin_lock_irqsave(&h->lock, flags); 3870 list_for_each_entry(c, queue_head, list) { 3871 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */ 3872 continue; 3873 if (c->scsi_cmd == find) { 3874 spin_unlock_irqrestore(&h->lock, flags); 3875 return c; 3876 } 3877 } 3878 spin_unlock_irqrestore(&h->lock, flags); 3879 return NULL; 3880 } 3881 3882 static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h, 3883 u8 *tag, struct list_head *queue_head) 3884 { 3885 unsigned long flags; 3886 struct CommandList *c; 3887 3888 spin_lock_irqsave(&h->lock, flags); 3889 list_for_each_entry(c, queue_head, list) { 3890 if (memcmp(&c->Header.Tag, tag, 8) != 0) 3891 continue; 3892 spin_unlock_irqrestore(&h->lock, flags); 3893 return c; 3894 } 3895 spin_unlock_irqrestore(&h->lock, flags); 3896 return NULL; 3897 } 3898 3899 /* ioaccel2 path firmware cannot handle abort task requests. 3900 * Change abort requests to physical target reset, and send to the 3901 * address of the physical disk used for the ioaccel 2 command. 3902 * Return 0 on success (IO_OK) 3903 * -1 on failure 3904 */ 3905 3906 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, 3907 unsigned char *scsi3addr, struct CommandList *abort) 3908 { 3909 int rc = IO_OK; 3910 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 3911 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ 3912 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ 3913 unsigned char *psa = &phys_scsi3addr[0]; 3914 3915 /* Get a pointer to the hpsa logical device. */ 3916 scmd = (struct scsi_cmnd *) abort->scsi_cmd; 3917 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); 3918 if (dev == NULL) { 3919 dev_warn(&h->pdev->dev, 3920 "Cannot abort: no device pointer for command.\n"); 3921 return -1; /* not abortable */ 3922 } 3923 3924 if (!dev->offload_enabled) { 3925 dev_warn(&h->pdev->dev, 3926 "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); 3927 return -1; /* not abortable */ 3928 } 3929 3930 /* Incoming scsi3addr is logical addr. We need physical disk addr. */ 3931 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { 3932 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); 3933 return -1; /* not abortable */ 3934 } 3935 3936 /* send the reset */ 3937 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); 3938 if (rc != 0) { 3939 dev_warn(&h->pdev->dev, 3940 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 3941 psa[0], psa[1], psa[2], psa[3], 3942 psa[4], psa[5], psa[6], psa[7]); 3943 return rc; /* failed to reset */ 3944 } 3945 3946 /* wait for device to recover */ 3947 if (wait_for_device_to_become_ready(h, psa) != 0) { 3948 dev_warn(&h->pdev->dev, 3949 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 3950 psa[0], psa[1], psa[2], psa[3], 3951 psa[4], psa[5], psa[6], psa[7]); 3952 return -1; /* failed to recover */ 3953 } 3954 3955 /* device recovered */ 3956 dev_info(&h->pdev->dev, 3957 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 3958 psa[0], psa[1], psa[2], psa[3], 3959 psa[4], psa[5], psa[6], psa[7]); 3960 3961 return rc; /* success */ 3962 } 3963 3964 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to 3965 * tell which kind we're dealing with, so we send the abort both ways. There 3966 * shouldn't be any collisions between swizzled and unswizzled tags due to the 3967 * way we construct our tags but we check anyway in case the assumptions which 3968 * make this true someday become false. 3969 */ 3970 static int hpsa_send_abort_both_ways(struct ctlr_info *h, 3971 unsigned char *scsi3addr, struct CommandList *abort) 3972 { 3973 u8 swizzled_tag[8]; 3974 struct CommandList *c; 3975 int rc = 0, rc2 = 0; 3976 3977 /* ioccelerator mode 2 commands should be aborted via the 3978 * accelerated path, since RAID path is unaware of these commands, 3979 * but underlying firmware can't handle abort TMF. 3980 * Change abort to physical device reset. 3981 */ 3982 if (abort->cmd_type == CMD_IOACCEL2) 3983 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); 3984 3985 /* we do not expect to find the swizzled tag in our queue, but 3986 * check anyway just to be sure the assumptions which make this 3987 * the case haven't become wrong. 3988 */ 3989 memcpy(swizzled_tag, &abort->Request.CDB[4], 8); 3990 swizzle_abort_tag(swizzled_tag); 3991 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ); 3992 if (c != NULL) { 3993 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n"); 3994 return hpsa_send_abort(h, scsi3addr, abort, 0); 3995 } 3996 rc = hpsa_send_abort(h, scsi3addr, abort, 0); 3997 3998 /* if the command is still in our queue, we can't conclude that it was 3999 * aborted (it might have just completed normally) but in any case 4000 * we don't need to try to abort it another way. 4001 */ 4002 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ); 4003 if (c) 4004 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1); 4005 return rc && rc2; 4006 } 4007 4008 /* Send an abort for the specified command. 4009 * If the device and controller support it, 4010 * send a task abort request. 4011 */ 4012 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) 4013 { 4014 4015 int i, rc; 4016 struct ctlr_info *h; 4017 struct hpsa_scsi_dev_t *dev; 4018 struct CommandList *abort; /* pointer to command to be aborted */ 4019 struct CommandList *found; 4020 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ 4021 char msg[256]; /* For debug messaging. */ 4022 int ml = 0; 4023 u32 tagupper, taglower; 4024 4025 /* Find the controller of the command to be aborted */ 4026 h = sdev_to_hba(sc->device); 4027 if (WARN(h == NULL, 4028 "ABORT REQUEST FAILED, Controller lookup failed.\n")) 4029 return FAILED; 4030 4031 /* Check that controller supports some kind of task abort */ 4032 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && 4033 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 4034 return FAILED; 4035 4036 memset(msg, 0, sizeof(msg)); 4037 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ", 4038 h->scsi_host->host_no, sc->device->channel, 4039 sc->device->id, sc->device->lun); 4040 4041 /* Find the device of the command to be aborted */ 4042 dev = sc->device->hostdata; 4043 if (!dev) { 4044 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", 4045 msg); 4046 return FAILED; 4047 } 4048 4049 /* Get SCSI command to be aborted */ 4050 abort = (struct CommandList *) sc->host_scribble; 4051 if (abort == NULL) { 4052 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n", 4053 msg); 4054 return FAILED; 4055 } 4056 hpsa_get_tag(h, abort, &taglower, &tagupper); 4057 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); 4058 as = (struct scsi_cmnd *) abort->scsi_cmd; 4059 if (as != NULL) 4060 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", 4061 as->cmnd[0], as->serial_number); 4062 dev_dbg(&h->pdev->dev, "%s\n", msg); 4063 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", 4064 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4065 4066 /* Search reqQ to See if command is queued but not submitted, 4067 * if so, complete the command with aborted status and remove 4068 * it from the reqQ. 4069 */ 4070 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ); 4071 if (found) { 4072 found->err_info->CommandStatus = CMD_ABORTED; 4073 finish_cmd(found); 4074 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n", 4075 msg); 4076 return SUCCESS; 4077 } 4078 4079 /* not in reqQ, if also not in cmpQ, must have already completed */ 4080 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); 4081 if (!found) { 4082 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n", 4083 msg); 4084 return SUCCESS; 4085 } 4086 4087 /* 4088 * Command is in flight, or possibly already completed 4089 * by the firmware (but not to the scsi mid layer) but we can't 4090 * distinguish which. Send the abort down. 4091 */ 4092 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); 4093 if (rc != 0) { 4094 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); 4095 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", 4096 h->scsi_host->host_no, 4097 dev->bus, dev->target, dev->lun); 4098 return FAILED; 4099 } 4100 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); 4101 4102 /* If the abort(s) above completed and actually aborted the 4103 * command, then the command to be aborted should already be 4104 * completed. If not, wait around a bit more to see if they 4105 * manage to complete normally. 4106 */ 4107 #define ABORT_COMPLETE_WAIT_SECS 30 4108 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { 4109 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); 4110 if (!found) 4111 return SUCCESS; 4112 msleep(100); 4113 } 4114 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", 4115 msg, ABORT_COMPLETE_WAIT_SECS); 4116 return FAILED; 4117 } 4118 4119 4120 /* 4121 * For operations that cannot sleep, a command block is allocated at init, 4122 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 4123 * which ones are free or in use. Lock must be held when calling this. 4124 * cmd_free() is the complement. 4125 */ 4126 static struct CommandList *cmd_alloc(struct ctlr_info *h) 4127 { 4128 struct CommandList *c; 4129 int i; 4130 union u64bit temp64; 4131 dma_addr_t cmd_dma_handle, err_dma_handle; 4132 unsigned long flags; 4133 4134 spin_lock_irqsave(&h->lock, flags); 4135 do { 4136 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 4137 if (i == h->nr_cmds) { 4138 spin_unlock_irqrestore(&h->lock, flags); 4139 return NULL; 4140 } 4141 } while (test_and_set_bit 4142 (i & (BITS_PER_LONG - 1), 4143 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 4144 spin_unlock_irqrestore(&h->lock, flags); 4145 4146 c = h->cmd_pool + i; 4147 memset(c, 0, sizeof(*c)); 4148 cmd_dma_handle = h->cmd_pool_dhandle 4149 + i * sizeof(*c); 4150 c->err_info = h->errinfo_pool + i; 4151 memset(c->err_info, 0, sizeof(*c->err_info)); 4152 err_dma_handle = h->errinfo_pool_dhandle 4153 + i * sizeof(*c->err_info); 4154 4155 c->cmdindex = i; 4156 4157 INIT_LIST_HEAD(&c->list); 4158 c->busaddr = (u32) cmd_dma_handle; 4159 temp64.val = (u64) err_dma_handle; 4160 c->ErrDesc.Addr.lower = temp64.val32.lower; 4161 c->ErrDesc.Addr.upper = temp64.val32.upper; 4162 c->ErrDesc.Len = sizeof(*c->err_info); 4163 4164 c->h = h; 4165 return c; 4166 } 4167 4168 /* For operations that can wait for kmalloc to possibly sleep, 4169 * this routine can be called. Lock need not be held to call 4170 * cmd_special_alloc. cmd_special_free() is the complement. 4171 */ 4172 static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 4173 { 4174 struct CommandList *c; 4175 union u64bit temp64; 4176 dma_addr_t cmd_dma_handle, err_dma_handle; 4177 4178 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 4179 if (c == NULL) 4180 return NULL; 4181 memset(c, 0, sizeof(*c)); 4182 4183 c->cmd_type = CMD_SCSI; 4184 c->cmdindex = -1; 4185 4186 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), 4187 &err_dma_handle); 4188 4189 if (c->err_info == NULL) { 4190 pci_free_consistent(h->pdev, 4191 sizeof(*c), c, cmd_dma_handle); 4192 return NULL; 4193 } 4194 memset(c->err_info, 0, sizeof(*c->err_info)); 4195 4196 INIT_LIST_HEAD(&c->list); 4197 c->busaddr = (u32) cmd_dma_handle; 4198 temp64.val = (u64) err_dma_handle; 4199 c->ErrDesc.Addr.lower = temp64.val32.lower; 4200 c->ErrDesc.Addr.upper = temp64.val32.upper; 4201 c->ErrDesc.Len = sizeof(*c->err_info); 4202 4203 c->h = h; 4204 return c; 4205 } 4206 4207 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 4208 { 4209 int i; 4210 unsigned long flags; 4211 4212 i = c - h->cmd_pool; 4213 spin_lock_irqsave(&h->lock, flags); 4214 clear_bit(i & (BITS_PER_LONG - 1), 4215 h->cmd_pool_bits + (i / BITS_PER_LONG)); 4216 spin_unlock_irqrestore(&h->lock, flags); 4217 } 4218 4219 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 4220 { 4221 union u64bit temp64; 4222 4223 temp64.val32.lower = c->ErrDesc.Addr.lower; 4224 temp64.val32.upper = c->ErrDesc.Addr.upper; 4225 pci_free_consistent(h->pdev, sizeof(*c->err_info), 4226 c->err_info, (dma_addr_t) temp64.val); 4227 pci_free_consistent(h->pdev, sizeof(*c), 4228 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 4229 } 4230 4231 #ifdef CONFIG_COMPAT 4232 4233 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 4234 { 4235 IOCTL32_Command_struct __user *arg32 = 4236 (IOCTL32_Command_struct __user *) arg; 4237 IOCTL_Command_struct arg64; 4238 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 4239 int err; 4240 u32 cp; 4241 4242 memset(&arg64, 0, sizeof(arg64)); 4243 err = 0; 4244 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4245 sizeof(arg64.LUN_info)); 4246 err |= copy_from_user(&arg64.Request, &arg32->Request, 4247 sizeof(arg64.Request)); 4248 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4249 sizeof(arg64.error_info)); 4250 err |= get_user(arg64.buf_size, &arg32->buf_size); 4251 err |= get_user(cp, &arg32->buf); 4252 arg64.buf = compat_ptr(cp); 4253 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4254 4255 if (err) 4256 return -EFAULT; 4257 4258 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); 4259 if (err) 4260 return err; 4261 err |= copy_in_user(&arg32->error_info, &p->error_info, 4262 sizeof(arg32->error_info)); 4263 if (err) 4264 return -EFAULT; 4265 return err; 4266 } 4267 4268 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 4269 int cmd, void *arg) 4270 { 4271 BIG_IOCTL32_Command_struct __user *arg32 = 4272 (BIG_IOCTL32_Command_struct __user *) arg; 4273 BIG_IOCTL_Command_struct arg64; 4274 BIG_IOCTL_Command_struct __user *p = 4275 compat_alloc_user_space(sizeof(arg64)); 4276 int err; 4277 u32 cp; 4278 4279 memset(&arg64, 0, sizeof(arg64)); 4280 err = 0; 4281 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4282 sizeof(arg64.LUN_info)); 4283 err |= copy_from_user(&arg64.Request, &arg32->Request, 4284 sizeof(arg64.Request)); 4285 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4286 sizeof(arg64.error_info)); 4287 err |= get_user(arg64.buf_size, &arg32->buf_size); 4288 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 4289 err |= get_user(cp, &arg32->buf); 4290 arg64.buf = compat_ptr(cp); 4291 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4292 4293 if (err) 4294 return -EFAULT; 4295 4296 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 4297 if (err) 4298 return err; 4299 err |= copy_in_user(&arg32->error_info, &p->error_info, 4300 sizeof(arg32->error_info)); 4301 if (err) 4302 return -EFAULT; 4303 return err; 4304 } 4305 4306 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) 4307 { 4308 switch (cmd) { 4309 case CCISS_GETPCIINFO: 4310 case CCISS_GETINTINFO: 4311 case CCISS_SETINTINFO: 4312 case CCISS_GETNODENAME: 4313 case CCISS_SETNODENAME: 4314 case CCISS_GETHEARTBEAT: 4315 case CCISS_GETBUSTYPES: 4316 case CCISS_GETFIRMVER: 4317 case CCISS_GETDRIVVER: 4318 case CCISS_REVALIDVOLS: 4319 case CCISS_DEREGDISK: 4320 case CCISS_REGNEWDISK: 4321 case CCISS_REGNEWD: 4322 case CCISS_RESCANDISK: 4323 case CCISS_GETLUNINFO: 4324 return hpsa_ioctl(dev, cmd, arg); 4325 4326 case CCISS_PASSTHRU32: 4327 return hpsa_ioctl32_passthru(dev, cmd, arg); 4328 case CCISS_BIG_PASSTHRU32: 4329 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 4330 4331 default: 4332 return -ENOIOCTLCMD; 4333 } 4334 } 4335 #endif 4336 4337 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 4338 { 4339 struct hpsa_pci_info pciinfo; 4340 4341 if (!argp) 4342 return -EINVAL; 4343 pciinfo.domain = pci_domain_nr(h->pdev->bus); 4344 pciinfo.bus = h->pdev->bus->number; 4345 pciinfo.dev_fn = h->pdev->devfn; 4346 pciinfo.board_id = h->board_id; 4347 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 4348 return -EFAULT; 4349 return 0; 4350 } 4351 4352 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 4353 { 4354 DriverVer_type DriverVer; 4355 unsigned char vmaj, vmin, vsubmin; 4356 int rc; 4357 4358 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 4359 &vmaj, &vmin, &vsubmin); 4360 if (rc != 3) { 4361 dev_info(&h->pdev->dev, "driver version string '%s' " 4362 "unrecognized.", HPSA_DRIVER_VERSION); 4363 vmaj = 0; 4364 vmin = 0; 4365 vsubmin = 0; 4366 } 4367 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 4368 if (!argp) 4369 return -EINVAL; 4370 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 4371 return -EFAULT; 4372 return 0; 4373 } 4374 4375 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 4376 { 4377 IOCTL_Command_struct iocommand; 4378 struct CommandList *c; 4379 char *buff = NULL; 4380 union u64bit temp64; 4381 int rc = 0; 4382 4383 if (!argp) 4384 return -EINVAL; 4385 if (!capable(CAP_SYS_RAWIO)) 4386 return -EPERM; 4387 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 4388 return -EFAULT; 4389 if ((iocommand.buf_size < 1) && 4390 (iocommand.Request.Type.Direction != XFER_NONE)) { 4391 return -EINVAL; 4392 } 4393 if (iocommand.buf_size > 0) { 4394 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 4395 if (buff == NULL) 4396 return -EFAULT; 4397 if (iocommand.Request.Type.Direction == XFER_WRITE) { 4398 /* Copy the data into the buffer we created */ 4399 if (copy_from_user(buff, iocommand.buf, 4400 iocommand.buf_size)) { 4401 rc = -EFAULT; 4402 goto out_kfree; 4403 } 4404 } else { 4405 memset(buff, 0, iocommand.buf_size); 4406 } 4407 } 4408 c = cmd_special_alloc(h); 4409 if (c == NULL) { 4410 rc = -ENOMEM; 4411 goto out_kfree; 4412 } 4413 /* Fill in the command type */ 4414 c->cmd_type = CMD_IOCTL_PEND; 4415 /* Fill in Command Header */ 4416 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4417 if (iocommand.buf_size > 0) { /* buffer to fill */ 4418 c->Header.SGList = 1; 4419 c->Header.SGTotal = 1; 4420 } else { /* no buffers to fill */ 4421 c->Header.SGList = 0; 4422 c->Header.SGTotal = 0; 4423 } 4424 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 4425 /* use the kernel address the cmd block for tag */ 4426 c->Header.Tag.lower = c->busaddr; 4427 4428 /* Fill in Request block */ 4429 memcpy(&c->Request, &iocommand.Request, 4430 sizeof(c->Request)); 4431 4432 /* Fill in the scatter gather information */ 4433 if (iocommand.buf_size > 0) { 4434 temp64.val = pci_map_single(h->pdev, buff, 4435 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 4436 if (dma_mapping_error(&h->pdev->dev, temp64.val)) { 4437 c->SG[0].Addr.lower = 0; 4438 c->SG[0].Addr.upper = 0; 4439 c->SG[0].Len = 0; 4440 rc = -ENOMEM; 4441 goto out; 4442 } 4443 c->SG[0].Addr.lower = temp64.val32.lower; 4444 c->SG[0].Addr.upper = temp64.val32.upper; 4445 c->SG[0].Len = iocommand.buf_size; 4446 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/ 4447 } 4448 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 4449 if (iocommand.buf_size > 0) 4450 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 4451 check_ioctl_unit_attention(h, c); 4452 4453 /* Copy the error information out */ 4454 memcpy(&iocommand.error_info, c->err_info, 4455 sizeof(iocommand.error_info)); 4456 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 4457 rc = -EFAULT; 4458 goto out; 4459 } 4460 if (iocommand.Request.Type.Direction == XFER_READ && 4461 iocommand.buf_size > 0) { 4462 /* Copy the data out of the buffer we created */ 4463 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 4464 rc = -EFAULT; 4465 goto out; 4466 } 4467 } 4468 out: 4469 cmd_special_free(h, c); 4470 out_kfree: 4471 kfree(buff); 4472 return rc; 4473 } 4474 4475 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 4476 { 4477 BIG_IOCTL_Command_struct *ioc; 4478 struct CommandList *c; 4479 unsigned char **buff = NULL; 4480 int *buff_size = NULL; 4481 union u64bit temp64; 4482 BYTE sg_used = 0; 4483 int status = 0; 4484 int i; 4485 u32 left; 4486 u32 sz; 4487 BYTE __user *data_ptr; 4488 4489 if (!argp) 4490 return -EINVAL; 4491 if (!capable(CAP_SYS_RAWIO)) 4492 return -EPERM; 4493 ioc = (BIG_IOCTL_Command_struct *) 4494 kmalloc(sizeof(*ioc), GFP_KERNEL); 4495 if (!ioc) { 4496 status = -ENOMEM; 4497 goto cleanup1; 4498 } 4499 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 4500 status = -EFAULT; 4501 goto cleanup1; 4502 } 4503 if ((ioc->buf_size < 1) && 4504 (ioc->Request.Type.Direction != XFER_NONE)) { 4505 status = -EINVAL; 4506 goto cleanup1; 4507 } 4508 /* Check kmalloc limits using all SGs */ 4509 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 4510 status = -EINVAL; 4511 goto cleanup1; 4512 } 4513 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { 4514 status = -EINVAL; 4515 goto cleanup1; 4516 } 4517 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL); 4518 if (!buff) { 4519 status = -ENOMEM; 4520 goto cleanup1; 4521 } 4522 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL); 4523 if (!buff_size) { 4524 status = -ENOMEM; 4525 goto cleanup1; 4526 } 4527 left = ioc->buf_size; 4528 data_ptr = ioc->buf; 4529 while (left) { 4530 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 4531 buff_size[sg_used] = sz; 4532 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 4533 if (buff[sg_used] == NULL) { 4534 status = -ENOMEM; 4535 goto cleanup1; 4536 } 4537 if (ioc->Request.Type.Direction == XFER_WRITE) { 4538 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 4539 status = -ENOMEM; 4540 goto cleanup1; 4541 } 4542 } else 4543 memset(buff[sg_used], 0, sz); 4544 left -= sz; 4545 data_ptr += sz; 4546 sg_used++; 4547 } 4548 c = cmd_special_alloc(h); 4549 if (c == NULL) { 4550 status = -ENOMEM; 4551 goto cleanup1; 4552 } 4553 c->cmd_type = CMD_IOCTL_PEND; 4554 c->Header.ReplyQueue = 0; 4555 c->Header.SGList = c->Header.SGTotal = sg_used; 4556 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 4557 c->Header.Tag.lower = c->busaddr; 4558 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 4559 if (ioc->buf_size > 0) { 4560 int i; 4561 for (i = 0; i < sg_used; i++) { 4562 temp64.val = pci_map_single(h->pdev, buff[i], 4563 buff_size[i], PCI_DMA_BIDIRECTIONAL); 4564 if (dma_mapping_error(&h->pdev->dev, temp64.val)) { 4565 c->SG[i].Addr.lower = 0; 4566 c->SG[i].Addr.upper = 0; 4567 c->SG[i].Len = 0; 4568 hpsa_pci_unmap(h->pdev, c, i, 4569 PCI_DMA_BIDIRECTIONAL); 4570 status = -ENOMEM; 4571 goto cleanup0; 4572 } 4573 c->SG[i].Addr.lower = temp64.val32.lower; 4574 c->SG[i].Addr.upper = temp64.val32.upper; 4575 c->SG[i].Len = buff_size[i]; 4576 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST; 4577 } 4578 } 4579 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 4580 if (sg_used) 4581 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 4582 check_ioctl_unit_attention(h, c); 4583 /* Copy the error information out */ 4584 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 4585 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 4586 status = -EFAULT; 4587 goto cleanup0; 4588 } 4589 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { 4590 /* Copy the data out of the buffer we created */ 4591 BYTE __user *ptr = ioc->buf; 4592 for (i = 0; i < sg_used; i++) { 4593 if (copy_to_user(ptr, buff[i], buff_size[i])) { 4594 status = -EFAULT; 4595 goto cleanup0; 4596 } 4597 ptr += buff_size[i]; 4598 } 4599 } 4600 status = 0; 4601 cleanup0: 4602 cmd_special_free(h, c); 4603 cleanup1: 4604 if (buff) { 4605 for (i = 0; i < sg_used; i++) 4606 kfree(buff[i]); 4607 kfree(buff); 4608 } 4609 kfree(buff_size); 4610 kfree(ioc); 4611 return status; 4612 } 4613 4614 static void check_ioctl_unit_attention(struct ctlr_info *h, 4615 struct CommandList *c) 4616 { 4617 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 4618 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 4619 (void) check_for_unit_attention(h, c); 4620 } 4621 4622 static int increment_passthru_count(struct ctlr_info *h) 4623 { 4624 unsigned long flags; 4625 4626 spin_lock_irqsave(&h->passthru_count_lock, flags); 4627 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { 4628 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 4629 return -1; 4630 } 4631 h->passthru_count++; 4632 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 4633 return 0; 4634 } 4635 4636 static void decrement_passthru_count(struct ctlr_info *h) 4637 { 4638 unsigned long flags; 4639 4640 spin_lock_irqsave(&h->passthru_count_lock, flags); 4641 if (h->passthru_count <= 0) { 4642 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 4643 /* not expecting to get here. */ 4644 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); 4645 return; 4646 } 4647 h->passthru_count--; 4648 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 4649 } 4650 4651 /* 4652 * ioctl 4653 */ 4654 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) 4655 { 4656 struct ctlr_info *h; 4657 void __user *argp = (void __user *)arg; 4658 int rc; 4659 4660 h = sdev_to_hba(dev); 4661 4662 switch (cmd) { 4663 case CCISS_DEREGDISK: 4664 case CCISS_REGNEWDISK: 4665 case CCISS_REGNEWD: 4666 hpsa_scan_start(h->scsi_host); 4667 return 0; 4668 case CCISS_GETPCIINFO: 4669 return hpsa_getpciinfo_ioctl(h, argp); 4670 case CCISS_GETDRIVVER: 4671 return hpsa_getdrivver_ioctl(h, argp); 4672 case CCISS_PASSTHRU: 4673 if (increment_passthru_count(h)) 4674 return -EAGAIN; 4675 rc = hpsa_passthru_ioctl(h, argp); 4676 decrement_passthru_count(h); 4677 return rc; 4678 case CCISS_BIG_PASSTHRU: 4679 if (increment_passthru_count(h)) 4680 return -EAGAIN; 4681 rc = hpsa_big_passthru_ioctl(h, argp); 4682 decrement_passthru_count(h); 4683 return rc; 4684 default: 4685 return -ENOTTY; 4686 } 4687 } 4688 4689 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, 4690 u8 reset_type) 4691 { 4692 struct CommandList *c; 4693 4694 c = cmd_alloc(h); 4695 if (!c) 4696 return -ENOMEM; 4697 /* fill_cmd can't fail here, no data buffer to map */ 4698 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 4699 RAID_CTLR_LUNID, TYPE_MSG); 4700 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 4701 c->waiting = NULL; 4702 enqueue_cmd_and_start_io(h, c); 4703 /* Don't wait for completion, the reset won't complete. Don't free 4704 * the command either. This is the last command we will send before 4705 * re-initializing everything, so it doesn't matter and won't leak. 4706 */ 4707 return 0; 4708 } 4709 4710 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 4711 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 4712 int cmd_type) 4713 { 4714 int pci_dir = XFER_NONE; 4715 struct CommandList *a; /* for commands to be aborted */ 4716 4717 c->cmd_type = CMD_IOCTL_PEND; 4718 c->Header.ReplyQueue = 0; 4719 if (buff != NULL && size > 0) { 4720 c->Header.SGList = 1; 4721 c->Header.SGTotal = 1; 4722 } else { 4723 c->Header.SGList = 0; 4724 c->Header.SGTotal = 0; 4725 } 4726 c->Header.Tag.lower = c->busaddr; 4727 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 4728 4729 c->Request.Type.Type = cmd_type; 4730 if (cmd_type == TYPE_CMD) { 4731 switch (cmd) { 4732 case HPSA_INQUIRY: 4733 /* are we trying to read a vital product page */ 4734 if (page_code & VPD_PAGE) { 4735 c->Request.CDB[1] = 0x01; 4736 c->Request.CDB[2] = (page_code & 0xff); 4737 } 4738 c->Request.CDBLen = 6; 4739 c->Request.Type.Attribute = ATTR_SIMPLE; 4740 c->Request.Type.Direction = XFER_READ; 4741 c->Request.Timeout = 0; 4742 c->Request.CDB[0] = HPSA_INQUIRY; 4743 c->Request.CDB[4] = size & 0xFF; 4744 break; 4745 case HPSA_REPORT_LOG: 4746 case HPSA_REPORT_PHYS: 4747 /* Talking to controller so It's a physical command 4748 mode = 00 target = 0. Nothing to write. 4749 */ 4750 c->Request.CDBLen = 12; 4751 c->Request.Type.Attribute = ATTR_SIMPLE; 4752 c->Request.Type.Direction = XFER_READ; 4753 c->Request.Timeout = 0; 4754 c->Request.CDB[0] = cmd; 4755 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 4756 c->Request.CDB[7] = (size >> 16) & 0xFF; 4757 c->Request.CDB[8] = (size >> 8) & 0xFF; 4758 c->Request.CDB[9] = size & 0xFF; 4759 break; 4760 case HPSA_CACHE_FLUSH: 4761 c->Request.CDBLen = 12; 4762 c->Request.Type.Attribute = ATTR_SIMPLE; 4763 c->Request.Type.Direction = XFER_WRITE; 4764 c->Request.Timeout = 0; 4765 c->Request.CDB[0] = BMIC_WRITE; 4766 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 4767 c->Request.CDB[7] = (size >> 8) & 0xFF; 4768 c->Request.CDB[8] = size & 0xFF; 4769 break; 4770 case TEST_UNIT_READY: 4771 c->Request.CDBLen = 6; 4772 c->Request.Type.Attribute = ATTR_SIMPLE; 4773 c->Request.Type.Direction = XFER_NONE; 4774 c->Request.Timeout = 0; 4775 break; 4776 case HPSA_GET_RAID_MAP: 4777 c->Request.CDBLen = 12; 4778 c->Request.Type.Attribute = ATTR_SIMPLE; 4779 c->Request.Type.Direction = XFER_READ; 4780 c->Request.Timeout = 0; 4781 c->Request.CDB[0] = HPSA_CISS_READ; 4782 c->Request.CDB[1] = cmd; 4783 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 4784 c->Request.CDB[7] = (size >> 16) & 0xFF; 4785 c->Request.CDB[8] = (size >> 8) & 0xFF; 4786 c->Request.CDB[9] = size & 0xFF; 4787 break; 4788 default: 4789 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 4790 BUG(); 4791 return -1; 4792 } 4793 } else if (cmd_type == TYPE_MSG) { 4794 switch (cmd) { 4795 4796 case HPSA_DEVICE_RESET_MSG: 4797 c->Request.CDBLen = 16; 4798 c->Request.Type.Type = 1; /* It is a MSG not a CMD */ 4799 c->Request.Type.Attribute = ATTR_SIMPLE; 4800 c->Request.Type.Direction = XFER_NONE; 4801 c->Request.Timeout = 0; /* Don't time out */ 4802 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 4803 c->Request.CDB[0] = cmd; 4804 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; 4805 /* If bytes 4-7 are zero, it means reset the */ 4806 /* LunID device */ 4807 c->Request.CDB[4] = 0x00; 4808 c->Request.CDB[5] = 0x00; 4809 c->Request.CDB[6] = 0x00; 4810 c->Request.CDB[7] = 0x00; 4811 break; 4812 case HPSA_ABORT_MSG: 4813 a = buff; /* point to command to be aborted */ 4814 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n", 4815 a->Header.Tag.upper, a->Header.Tag.lower, 4816 c->Header.Tag.upper, c->Header.Tag.lower); 4817 c->Request.CDBLen = 16; 4818 c->Request.Type.Type = TYPE_MSG; 4819 c->Request.Type.Attribute = ATTR_SIMPLE; 4820 c->Request.Type.Direction = XFER_WRITE; 4821 c->Request.Timeout = 0; /* Don't time out */ 4822 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; 4823 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; 4824 c->Request.CDB[2] = 0x00; /* reserved */ 4825 c->Request.CDB[3] = 0x00; /* reserved */ 4826 /* Tag to abort goes in CDB[4]-CDB[11] */ 4827 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF; 4828 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF; 4829 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF; 4830 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF; 4831 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF; 4832 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF; 4833 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF; 4834 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF; 4835 c->Request.CDB[12] = 0x00; /* reserved */ 4836 c->Request.CDB[13] = 0x00; /* reserved */ 4837 c->Request.CDB[14] = 0x00; /* reserved */ 4838 c->Request.CDB[15] = 0x00; /* reserved */ 4839 break; 4840 default: 4841 dev_warn(&h->pdev->dev, "unknown message type %d\n", 4842 cmd); 4843 BUG(); 4844 } 4845 } else { 4846 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 4847 BUG(); 4848 } 4849 4850 switch (c->Request.Type.Direction) { 4851 case XFER_READ: 4852 pci_dir = PCI_DMA_FROMDEVICE; 4853 break; 4854 case XFER_WRITE: 4855 pci_dir = PCI_DMA_TODEVICE; 4856 break; 4857 case XFER_NONE: 4858 pci_dir = PCI_DMA_NONE; 4859 break; 4860 default: 4861 pci_dir = PCI_DMA_BIDIRECTIONAL; 4862 } 4863 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) 4864 return -1; 4865 return 0; 4866 } 4867 4868 /* 4869 * Map (physical) PCI mem into (virtual) kernel space 4870 */ 4871 static void __iomem *remap_pci_mem(ulong base, ulong size) 4872 { 4873 ulong page_base = ((ulong) base) & PAGE_MASK; 4874 ulong page_offs = ((ulong) base) - page_base; 4875 void __iomem *page_remapped = ioremap_nocache(page_base, 4876 page_offs + size); 4877 4878 return page_remapped ? (page_remapped + page_offs) : NULL; 4879 } 4880 4881 /* Takes cmds off the submission queue and sends them to the hardware, 4882 * then puts them on the queue of cmds waiting for completion. 4883 */ 4884 static void start_io(struct ctlr_info *h) 4885 { 4886 struct CommandList *c; 4887 unsigned long flags; 4888 4889 spin_lock_irqsave(&h->lock, flags); 4890 while (!list_empty(&h->reqQ)) { 4891 c = list_entry(h->reqQ.next, struct CommandList, list); 4892 /* can't do anything if fifo is full */ 4893 if ((h->access.fifo_full(h))) { 4894 h->fifo_recently_full = 1; 4895 dev_warn(&h->pdev->dev, "fifo full\n"); 4896 break; 4897 } 4898 h->fifo_recently_full = 0; 4899 4900 /* Get the first entry from the Request Q */ 4901 removeQ(c); 4902 h->Qdepth--; 4903 4904 /* Put job onto the completed Q */ 4905 addQ(&h->cmpQ, c); 4906 4907 /* Must increment commands_outstanding before unlocking 4908 * and submitting to avoid race checking for fifo full 4909 * condition. 4910 */ 4911 h->commands_outstanding++; 4912 if (h->commands_outstanding > h->max_outstanding) 4913 h->max_outstanding = h->commands_outstanding; 4914 4915 /* Tell the controller execute command */ 4916 spin_unlock_irqrestore(&h->lock, flags); 4917 h->access.submit_command(h, c); 4918 spin_lock_irqsave(&h->lock, flags); 4919 } 4920 spin_unlock_irqrestore(&h->lock, flags); 4921 } 4922 4923 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) 4924 { 4925 return h->access.command_completed(h, q); 4926 } 4927 4928 static inline bool interrupt_pending(struct ctlr_info *h) 4929 { 4930 return h->access.intr_pending(h); 4931 } 4932 4933 static inline long interrupt_not_for_us(struct ctlr_info *h) 4934 { 4935 return (h->access.intr_pending(h) == 0) || 4936 (h->interrupts_enabled == 0); 4937 } 4938 4939 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 4940 u32 raw_tag) 4941 { 4942 if (unlikely(tag_index >= h->nr_cmds)) { 4943 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 4944 return 1; 4945 } 4946 return 0; 4947 } 4948 4949 static inline void finish_cmd(struct CommandList *c) 4950 { 4951 unsigned long flags; 4952 int io_may_be_stalled = 0; 4953 struct ctlr_info *h = c->h; 4954 4955 spin_lock_irqsave(&h->lock, flags); 4956 removeQ(c); 4957 4958 /* 4959 * Check for possibly stalled i/o. 4960 * 4961 * If a fifo_full condition is encountered, requests will back up 4962 * in h->reqQ. This queue is only emptied out by start_io which is 4963 * only called when a new i/o request comes in. If no i/o's are 4964 * forthcoming, the i/o's in h->reqQ can get stuck. So we call 4965 * start_io from here if we detect such a danger. 4966 * 4967 * Normally, we shouldn't hit this case, but pounding on the 4968 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if 4969 * commands_outstanding is low. We want to avoid calling 4970 * start_io from in here as much as possible, and esp. don't 4971 * want to get in a cycle where we call start_io every time 4972 * through here. 4973 */ 4974 if (unlikely(h->fifo_recently_full) && 4975 h->commands_outstanding < 5) 4976 io_may_be_stalled = 1; 4977 4978 spin_unlock_irqrestore(&h->lock, flags); 4979 4980 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 4981 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 4982 || c->cmd_type == CMD_IOACCEL2)) 4983 complete_scsi_command(c); 4984 else if (c->cmd_type == CMD_IOCTL_PEND) 4985 complete(c->waiting); 4986 if (unlikely(io_may_be_stalled)) 4987 start_io(h); 4988 } 4989 4990 static inline u32 hpsa_tag_contains_index(u32 tag) 4991 { 4992 return tag & DIRECT_LOOKUP_BIT; 4993 } 4994 4995 static inline u32 hpsa_tag_to_index(u32 tag) 4996 { 4997 return tag >> DIRECT_LOOKUP_SHIFT; 4998 } 4999 5000 5001 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) 5002 { 5003 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 5004 #define HPSA_SIMPLE_ERROR_BITS 0x03 5005 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 5006 return tag & ~HPSA_SIMPLE_ERROR_BITS; 5007 return tag & ~HPSA_PERF_ERROR_BITS; 5008 } 5009 5010 /* process completion of an indexed ("direct lookup") command */ 5011 static inline void process_indexed_cmd(struct ctlr_info *h, 5012 u32 raw_tag) 5013 { 5014 u32 tag_index; 5015 struct CommandList *c; 5016 5017 tag_index = hpsa_tag_to_index(raw_tag); 5018 if (!bad_tag(h, tag_index, raw_tag)) { 5019 c = h->cmd_pool + tag_index; 5020 finish_cmd(c); 5021 } 5022 } 5023 5024 /* process completion of a non-indexed command */ 5025 static inline void process_nonindexed_cmd(struct ctlr_info *h, 5026 u32 raw_tag) 5027 { 5028 u32 tag; 5029 struct CommandList *c = NULL; 5030 unsigned long flags; 5031 5032 tag = hpsa_tag_discard_error_bits(h, raw_tag); 5033 spin_lock_irqsave(&h->lock, flags); 5034 list_for_each_entry(c, &h->cmpQ, list) { 5035 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 5036 spin_unlock_irqrestore(&h->lock, flags); 5037 finish_cmd(c); 5038 return; 5039 } 5040 } 5041 spin_unlock_irqrestore(&h->lock, flags); 5042 bad_tag(h, h->nr_cmds + 1, raw_tag); 5043 } 5044 5045 /* Some controllers, like p400, will give us one interrupt 5046 * after a soft reset, even if we turned interrupts off. 5047 * Only need to check for this in the hpsa_xxx_discard_completions 5048 * functions. 5049 */ 5050 static int ignore_bogus_interrupt(struct ctlr_info *h) 5051 { 5052 if (likely(!reset_devices)) 5053 return 0; 5054 5055 if (likely(h->interrupts_enabled)) 5056 return 0; 5057 5058 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 5059 "(known firmware bug.) Ignoring.\n"); 5060 5061 return 1; 5062 } 5063 5064 /* 5065 * Convert &h->q[x] (passed to interrupt handlers) back to h. 5066 * Relies on (h-q[x] == x) being true for x such that 5067 * 0 <= x < MAX_REPLY_QUEUES. 5068 */ 5069 static struct ctlr_info *queue_to_hba(u8 *queue) 5070 { 5071 return container_of((queue - *queue), struct ctlr_info, q[0]); 5072 } 5073 5074 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) 5075 { 5076 struct ctlr_info *h = queue_to_hba(queue); 5077 u8 q = *(u8 *) queue; 5078 u32 raw_tag; 5079 5080 if (ignore_bogus_interrupt(h)) 5081 return IRQ_NONE; 5082 5083 if (interrupt_not_for_us(h)) 5084 return IRQ_NONE; 5085 h->last_intr_timestamp = get_jiffies_64(); 5086 while (interrupt_pending(h)) { 5087 raw_tag = get_next_completion(h, q); 5088 while (raw_tag != FIFO_EMPTY) 5089 raw_tag = next_command(h, q); 5090 } 5091 return IRQ_HANDLED; 5092 } 5093 5094 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) 5095 { 5096 struct ctlr_info *h = queue_to_hba(queue); 5097 u32 raw_tag; 5098 u8 q = *(u8 *) queue; 5099 5100 if (ignore_bogus_interrupt(h)) 5101 return IRQ_NONE; 5102 5103 h->last_intr_timestamp = get_jiffies_64(); 5104 raw_tag = get_next_completion(h, q); 5105 while (raw_tag != FIFO_EMPTY) 5106 raw_tag = next_command(h, q); 5107 return IRQ_HANDLED; 5108 } 5109 5110 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) 5111 { 5112 struct ctlr_info *h = queue_to_hba((u8 *) queue); 5113 u32 raw_tag; 5114 u8 q = *(u8 *) queue; 5115 5116 if (interrupt_not_for_us(h)) 5117 return IRQ_NONE; 5118 h->last_intr_timestamp = get_jiffies_64(); 5119 while (interrupt_pending(h)) { 5120 raw_tag = get_next_completion(h, q); 5121 while (raw_tag != FIFO_EMPTY) { 5122 if (likely(hpsa_tag_contains_index(raw_tag))) 5123 process_indexed_cmd(h, raw_tag); 5124 else 5125 process_nonindexed_cmd(h, raw_tag); 5126 raw_tag = next_command(h, q); 5127 } 5128 } 5129 return IRQ_HANDLED; 5130 } 5131 5132 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) 5133 { 5134 struct ctlr_info *h = queue_to_hba(queue); 5135 u32 raw_tag; 5136 u8 q = *(u8 *) queue; 5137 5138 h->last_intr_timestamp = get_jiffies_64(); 5139 raw_tag = get_next_completion(h, q); 5140 while (raw_tag != FIFO_EMPTY) { 5141 if (likely(hpsa_tag_contains_index(raw_tag))) 5142 process_indexed_cmd(h, raw_tag); 5143 else 5144 process_nonindexed_cmd(h, raw_tag); 5145 raw_tag = next_command(h, q); 5146 } 5147 return IRQ_HANDLED; 5148 } 5149 5150 /* Send a message CDB to the firmware. Careful, this only works 5151 * in simple mode, not performant mode due to the tag lookup. 5152 * We only ever use this immediately after a controller reset. 5153 */ 5154 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 5155 unsigned char type) 5156 { 5157 struct Command { 5158 struct CommandListHeader CommandHeader; 5159 struct RequestBlock Request; 5160 struct ErrDescriptor ErrorDescriptor; 5161 }; 5162 struct Command *cmd; 5163 static const size_t cmd_sz = sizeof(*cmd) + 5164 sizeof(cmd->ErrorDescriptor); 5165 dma_addr_t paddr64; 5166 uint32_t paddr32, tag; 5167 void __iomem *vaddr; 5168 int i, err; 5169 5170 vaddr = pci_ioremap_bar(pdev, 0); 5171 if (vaddr == NULL) 5172 return -ENOMEM; 5173 5174 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 5175 * CCISS commands, so they must be allocated from the lower 4GiB of 5176 * memory. 5177 */ 5178 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5179 if (err) { 5180 iounmap(vaddr); 5181 return -ENOMEM; 5182 } 5183 5184 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 5185 if (cmd == NULL) { 5186 iounmap(vaddr); 5187 return -ENOMEM; 5188 } 5189 5190 /* This must fit, because of the 32-bit consistent DMA mask. Also, 5191 * although there's no guarantee, we assume that the address is at 5192 * least 4-byte aligned (most likely, it's page-aligned). 5193 */ 5194 paddr32 = paddr64; 5195 5196 cmd->CommandHeader.ReplyQueue = 0; 5197 cmd->CommandHeader.SGList = 0; 5198 cmd->CommandHeader.SGTotal = 0; 5199 cmd->CommandHeader.Tag.lower = paddr32; 5200 cmd->CommandHeader.Tag.upper = 0; 5201 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 5202 5203 cmd->Request.CDBLen = 16; 5204 cmd->Request.Type.Type = TYPE_MSG; 5205 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 5206 cmd->Request.Type.Direction = XFER_NONE; 5207 cmd->Request.Timeout = 0; /* Don't time out */ 5208 cmd->Request.CDB[0] = opcode; 5209 cmd->Request.CDB[1] = type; 5210 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 5211 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); 5212 cmd->ErrorDescriptor.Addr.upper = 0; 5213 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); 5214 5215 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 5216 5217 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 5218 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 5219 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) 5220 break; 5221 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 5222 } 5223 5224 iounmap(vaddr); 5225 5226 /* we leak the DMA buffer here ... no choice since the controller could 5227 * still complete the command. 5228 */ 5229 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 5230 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 5231 opcode, type); 5232 return -ETIMEDOUT; 5233 } 5234 5235 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 5236 5237 if (tag & HPSA_ERROR_BIT) { 5238 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 5239 opcode, type); 5240 return -EIO; 5241 } 5242 5243 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 5244 opcode, type); 5245 return 0; 5246 } 5247 5248 #define hpsa_noop(p) hpsa_message(p, 3, 0) 5249 5250 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 5251 void * __iomem vaddr, u32 use_doorbell) 5252 { 5253 u16 pmcsr; 5254 int pos; 5255 5256 if (use_doorbell) { 5257 /* For everything after the P600, the PCI power state method 5258 * of resetting the controller doesn't work, so we have this 5259 * other way using the doorbell register. 5260 */ 5261 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 5262 writel(use_doorbell, vaddr + SA5_DOORBELL); 5263 5264 /* PMC hardware guys tell us we need a 5 second delay after 5265 * doorbell reset and before any attempt to talk to the board 5266 * at all to ensure that this actually works and doesn't fall 5267 * over in some weird corner cases. 5268 */ 5269 msleep(5000); 5270 } else { /* Try to do it the PCI power state way */ 5271 5272 /* Quoting from the Open CISS Specification: "The Power 5273 * Management Control/Status Register (CSR) controls the power 5274 * state of the device. The normal operating state is D0, 5275 * CSR=00h. The software off state is D3, CSR=03h. To reset 5276 * the controller, place the interface device in D3 then to D0, 5277 * this causes a secondary PCI reset which will reset the 5278 * controller." */ 5279 5280 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 5281 if (pos == 0) { 5282 dev_err(&pdev->dev, 5283 "hpsa_reset_controller: " 5284 "PCI PM not supported\n"); 5285 return -ENODEV; 5286 } 5287 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 5288 /* enter the D3hot power management state */ 5289 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 5290 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 5291 pmcsr |= PCI_D3hot; 5292 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 5293 5294 msleep(500); 5295 5296 /* enter the D0 power management state */ 5297 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 5298 pmcsr |= PCI_D0; 5299 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 5300 5301 /* 5302 * The P600 requires a small delay when changing states. 5303 * Otherwise we may think the board did not reset and we bail. 5304 * This for kdump only and is particular to the P600. 5305 */ 5306 msleep(500); 5307 } 5308 return 0; 5309 } 5310 5311 static void init_driver_version(char *driver_version, int len) 5312 { 5313 memset(driver_version, 0, len); 5314 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); 5315 } 5316 5317 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) 5318 { 5319 char *driver_version; 5320 int i, size = sizeof(cfgtable->driver_version); 5321 5322 driver_version = kmalloc(size, GFP_KERNEL); 5323 if (!driver_version) 5324 return -ENOMEM; 5325 5326 init_driver_version(driver_version, size); 5327 for (i = 0; i < size; i++) 5328 writeb(driver_version[i], &cfgtable->driver_version[i]); 5329 kfree(driver_version); 5330 return 0; 5331 } 5332 5333 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, 5334 unsigned char *driver_ver) 5335 { 5336 int i; 5337 5338 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 5339 driver_ver[i] = readb(&cfgtable->driver_version[i]); 5340 } 5341 5342 static int controller_reset_failed(struct CfgTable __iomem *cfgtable) 5343 { 5344 5345 char *driver_ver, *old_driver_ver; 5346 int rc, size = sizeof(cfgtable->driver_version); 5347 5348 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 5349 if (!old_driver_ver) 5350 return -ENOMEM; 5351 driver_ver = old_driver_ver + size; 5352 5353 /* After a reset, the 32 bytes of "driver version" in the cfgtable 5354 * should have been changed, otherwise we know the reset failed. 5355 */ 5356 init_driver_version(old_driver_ver, size); 5357 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 5358 rc = !memcmp(driver_ver, old_driver_ver, size); 5359 kfree(old_driver_ver); 5360 return rc; 5361 } 5362 /* This does a hard reset of the controller using PCI power management 5363 * states or the using the doorbell register. 5364 */ 5365 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 5366 { 5367 u64 cfg_offset; 5368 u32 cfg_base_addr; 5369 u64 cfg_base_addr_index; 5370 void __iomem *vaddr; 5371 unsigned long paddr; 5372 u32 misc_fw_support; 5373 int rc; 5374 struct CfgTable __iomem *cfgtable; 5375 u32 use_doorbell; 5376 u32 board_id; 5377 u16 command_register; 5378 5379 /* For controllers as old as the P600, this is very nearly 5380 * the same thing as 5381 * 5382 * pci_save_state(pci_dev); 5383 * pci_set_power_state(pci_dev, PCI_D3hot); 5384 * pci_set_power_state(pci_dev, PCI_D0); 5385 * pci_restore_state(pci_dev); 5386 * 5387 * For controllers newer than the P600, the pci power state 5388 * method of resetting doesn't work so we have another way 5389 * using the doorbell register. 5390 */ 5391 5392 rc = hpsa_lookup_board_id(pdev, &board_id); 5393 if (rc < 0 || !ctlr_is_resettable(board_id)) { 5394 dev_warn(&pdev->dev, "Not resetting device.\n"); 5395 return -ENODEV; 5396 } 5397 5398 /* if controller is soft- but not hard resettable... */ 5399 if (!ctlr_is_hard_resettable(board_id)) 5400 return -ENOTSUPP; /* try soft reset later. */ 5401 5402 /* Save the PCI command register */ 5403 pci_read_config_word(pdev, 4, &command_register); 5404 /* Turn the board off. This is so that later pci_restore_state() 5405 * won't turn the board on before the rest of config space is ready. 5406 */ 5407 pci_disable_device(pdev); 5408 pci_save_state(pdev); 5409 5410 /* find the first memory BAR, so we can find the cfg table */ 5411 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 5412 if (rc) 5413 return rc; 5414 vaddr = remap_pci_mem(paddr, 0x250); 5415 if (!vaddr) 5416 return -ENOMEM; 5417 5418 /* find cfgtable in order to check if reset via doorbell is supported */ 5419 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 5420 &cfg_base_addr_index, &cfg_offset); 5421 if (rc) 5422 goto unmap_vaddr; 5423 cfgtable = remap_pci_mem(pci_resource_start(pdev, 5424 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 5425 if (!cfgtable) { 5426 rc = -ENOMEM; 5427 goto unmap_vaddr; 5428 } 5429 rc = write_driver_ver_to_cfgtable(cfgtable); 5430 if (rc) 5431 goto unmap_vaddr; 5432 5433 /* If reset via doorbell register is supported, use that. 5434 * There are two such methods. Favor the newest method. 5435 */ 5436 misc_fw_support = readl(&cfgtable->misc_fw_support); 5437 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 5438 if (use_doorbell) { 5439 use_doorbell = DOORBELL_CTLR_RESET2; 5440 } else { 5441 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 5442 if (use_doorbell) { 5443 dev_warn(&pdev->dev, "Soft reset not supported. " 5444 "Firmware update is required.\n"); 5445 rc = -ENOTSUPP; /* try soft reset */ 5446 goto unmap_cfgtable; 5447 } 5448 } 5449 5450 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 5451 if (rc) 5452 goto unmap_cfgtable; 5453 5454 pci_restore_state(pdev); 5455 rc = pci_enable_device(pdev); 5456 if (rc) { 5457 dev_warn(&pdev->dev, "failed to enable device.\n"); 5458 goto unmap_cfgtable; 5459 } 5460 pci_write_config_word(pdev, 4, command_register); 5461 5462 /* Some devices (notably the HP Smart Array 5i Controller) 5463 need a little pause here */ 5464 msleep(HPSA_POST_RESET_PAUSE_MSECS); 5465 5466 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 5467 if (rc) { 5468 dev_warn(&pdev->dev, 5469 "failed waiting for board to become ready " 5470 "after hard reset\n"); 5471 goto unmap_cfgtable; 5472 } 5473 5474 rc = controller_reset_failed(vaddr); 5475 if (rc < 0) 5476 goto unmap_cfgtable; 5477 if (rc) { 5478 dev_warn(&pdev->dev, "Unable to successfully reset " 5479 "controller. Will try soft reset.\n"); 5480 rc = -ENOTSUPP; 5481 } else { 5482 dev_info(&pdev->dev, "board ready after hard reset.\n"); 5483 } 5484 5485 unmap_cfgtable: 5486 iounmap(cfgtable); 5487 5488 unmap_vaddr: 5489 iounmap(vaddr); 5490 return rc; 5491 } 5492 5493 /* 5494 * We cannot read the structure directly, for portability we must use 5495 * the io functions. 5496 * This is for debug only. 5497 */ 5498 static void print_cfg_table(struct device *dev, struct CfgTable *tb) 5499 { 5500 #ifdef HPSA_DEBUG 5501 int i; 5502 char temp_name[17]; 5503 5504 dev_info(dev, "Controller Configuration information\n"); 5505 dev_info(dev, "------------------------------------\n"); 5506 for (i = 0; i < 4; i++) 5507 temp_name[i] = readb(&(tb->Signature[i])); 5508 temp_name[4] = '\0'; 5509 dev_info(dev, " Signature = %s\n", temp_name); 5510 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 5511 dev_info(dev, " Transport methods supported = 0x%x\n", 5512 readl(&(tb->TransportSupport))); 5513 dev_info(dev, " Transport methods active = 0x%x\n", 5514 readl(&(tb->TransportActive))); 5515 dev_info(dev, " Requested transport Method = 0x%x\n", 5516 readl(&(tb->HostWrite.TransportRequest))); 5517 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 5518 readl(&(tb->HostWrite.CoalIntDelay))); 5519 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 5520 readl(&(tb->HostWrite.CoalIntCount))); 5521 dev_info(dev, " Max outstanding commands = 0x%d\n", 5522 readl(&(tb->CmdsOutMax))); 5523 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 5524 for (i = 0; i < 16; i++) 5525 temp_name[i] = readb(&(tb->ServerName[i])); 5526 temp_name[16] = '\0'; 5527 dev_info(dev, " Server Name = %s\n", temp_name); 5528 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 5529 readl(&(tb->HeartBeat))); 5530 #endif /* HPSA_DEBUG */ 5531 } 5532 5533 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 5534 { 5535 int i, offset, mem_type, bar_type; 5536 5537 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 5538 return 0; 5539 offset = 0; 5540 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5541 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 5542 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 5543 offset += 4; 5544 else { 5545 mem_type = pci_resource_flags(pdev, i) & 5546 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 5547 switch (mem_type) { 5548 case PCI_BASE_ADDRESS_MEM_TYPE_32: 5549 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 5550 offset += 4; /* 32 bit */ 5551 break; 5552 case PCI_BASE_ADDRESS_MEM_TYPE_64: 5553 offset += 8; 5554 break; 5555 default: /* reserved in PCI 2.2 */ 5556 dev_warn(&pdev->dev, 5557 "base address is invalid\n"); 5558 return -1; 5559 break; 5560 } 5561 } 5562 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 5563 return i + 1; 5564 } 5565 return -1; 5566 } 5567 5568 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 5569 * controllers that are capable. If not, we use IO-APIC mode. 5570 */ 5571 5572 static void hpsa_interrupt_mode(struct ctlr_info *h) 5573 { 5574 #ifdef CONFIG_PCI_MSI 5575 int err, i; 5576 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES]; 5577 5578 for (i = 0; i < MAX_REPLY_QUEUES; i++) { 5579 hpsa_msix_entries[i].vector = 0; 5580 hpsa_msix_entries[i].entry = i; 5581 } 5582 5583 /* Some boards advertise MSI but don't really support it */ 5584 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 5585 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 5586 goto default_int_mode; 5587 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 5588 dev_info(&h->pdev->dev, "MSIX\n"); 5589 h->msix_vector = MAX_REPLY_QUEUES; 5590 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 5591 h->msix_vector); 5592 if (err > 0) { 5593 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 5594 "available\n", err); 5595 h->msix_vector = err; 5596 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 5597 h->msix_vector); 5598 } 5599 if (!err) { 5600 for (i = 0; i < h->msix_vector; i++) 5601 h->intr[i] = hpsa_msix_entries[i].vector; 5602 return; 5603 } else { 5604 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", 5605 err); 5606 h->msix_vector = 0; 5607 goto default_int_mode; 5608 } 5609 } 5610 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 5611 dev_info(&h->pdev->dev, "MSI\n"); 5612 if (!pci_enable_msi(h->pdev)) 5613 h->msi_vector = 1; 5614 else 5615 dev_warn(&h->pdev->dev, "MSI init failed\n"); 5616 } 5617 default_int_mode: 5618 #endif /* CONFIG_PCI_MSI */ 5619 /* if we get here we're going to use the default interrupt mode */ 5620 h->intr[h->intr_mode] = h->pdev->irq; 5621 } 5622 5623 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 5624 { 5625 int i; 5626 u32 subsystem_vendor_id, subsystem_device_id; 5627 5628 subsystem_vendor_id = pdev->subsystem_vendor; 5629 subsystem_device_id = pdev->subsystem_device; 5630 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 5631 subsystem_vendor_id; 5632 5633 for (i = 0; i < ARRAY_SIZE(products); i++) 5634 if (*board_id == products[i].board_id) 5635 return i; 5636 5637 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 5638 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 5639 !hpsa_allow_any) { 5640 dev_warn(&pdev->dev, "unrecognized board ID: " 5641 "0x%08x, ignoring.\n", *board_id); 5642 return -ENODEV; 5643 } 5644 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 5645 } 5646 5647 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 5648 unsigned long *memory_bar) 5649 { 5650 int i; 5651 5652 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 5653 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 5654 /* addressing mode bits already removed */ 5655 *memory_bar = pci_resource_start(pdev, i); 5656 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 5657 *memory_bar); 5658 return 0; 5659 } 5660 dev_warn(&pdev->dev, "no memory BAR found\n"); 5661 return -ENODEV; 5662 } 5663 5664 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 5665 int wait_for_ready) 5666 { 5667 int i, iterations; 5668 u32 scratchpad; 5669 if (wait_for_ready) 5670 iterations = HPSA_BOARD_READY_ITERATIONS; 5671 else 5672 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 5673 5674 for (i = 0; i < iterations; i++) { 5675 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 5676 if (wait_for_ready) { 5677 if (scratchpad == HPSA_FIRMWARE_READY) 5678 return 0; 5679 } else { 5680 if (scratchpad != HPSA_FIRMWARE_READY) 5681 return 0; 5682 } 5683 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 5684 } 5685 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 5686 return -ENODEV; 5687 } 5688 5689 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 5690 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 5691 u64 *cfg_offset) 5692 { 5693 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 5694 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 5695 *cfg_base_addr &= (u32) 0x0000ffff; 5696 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 5697 if (*cfg_base_addr_index == -1) { 5698 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 5699 return -ENODEV; 5700 } 5701 return 0; 5702 } 5703 5704 static int hpsa_find_cfgtables(struct ctlr_info *h) 5705 { 5706 u64 cfg_offset; 5707 u32 cfg_base_addr; 5708 u64 cfg_base_addr_index; 5709 u32 trans_offset; 5710 int rc; 5711 5712 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 5713 &cfg_base_addr_index, &cfg_offset); 5714 if (rc) 5715 return rc; 5716 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 5717 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 5718 if (!h->cfgtable) 5719 return -ENOMEM; 5720 rc = write_driver_ver_to_cfgtable(h->cfgtable); 5721 if (rc) 5722 return rc; 5723 /* Find performant mode table. */ 5724 trans_offset = readl(&h->cfgtable->TransMethodOffset); 5725 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 5726 cfg_base_addr_index)+cfg_offset+trans_offset, 5727 sizeof(*h->transtable)); 5728 if (!h->transtable) 5729 return -ENOMEM; 5730 return 0; 5731 } 5732 5733 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 5734 { 5735 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 5736 5737 /* Limit commands in memory limited kdump scenario. */ 5738 if (reset_devices && h->max_commands > 32) 5739 h->max_commands = 32; 5740 5741 if (h->max_commands < 16) { 5742 dev_warn(&h->pdev->dev, "Controller reports " 5743 "max supported commands of %d, an obvious lie. " 5744 "Using 16. Ensure that firmware is up to date.\n", 5745 h->max_commands); 5746 h->max_commands = 16; 5747 } 5748 } 5749 5750 /* Interrogate the hardware for some limits: 5751 * max commands, max SG elements without chaining, and with chaining, 5752 * SG chain block size, etc. 5753 */ 5754 static void hpsa_find_board_params(struct ctlr_info *h) 5755 { 5756 hpsa_get_max_perf_mode_cmds(h); 5757 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 5758 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 5759 h->fw_support = readl(&(h->cfgtable->misc_fw_support)); 5760 /* 5761 * Limit in-command s/g elements to 32 save dma'able memory. 5762 * Howvever spec says if 0, use 31 5763 */ 5764 h->max_cmd_sg_entries = 31; 5765 if (h->maxsgentries > 512) { 5766 h->max_cmd_sg_entries = 32; 5767 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 5768 h->maxsgentries--; /* save one for chain pointer */ 5769 } else { 5770 h->maxsgentries = 31; /* default to traditional values */ 5771 h->chainsize = 0; 5772 } 5773 5774 /* Find out what task management functions are supported and cache */ 5775 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); 5776 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) 5777 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); 5778 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 5779 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); 5780 } 5781 5782 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 5783 { 5784 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { 5785 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 5786 return false; 5787 } 5788 return true; 5789 } 5790 5791 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) 5792 { 5793 u32 driver_support; 5794 5795 #ifdef CONFIG_X86 5796 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 5797 driver_support = readl(&(h->cfgtable->driver_support)); 5798 driver_support |= ENABLE_SCSI_PREFETCH; 5799 #endif 5800 driver_support |= ENABLE_UNIT_ATTN; 5801 writel(driver_support, &(h->cfgtable->driver_support)); 5802 } 5803 5804 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 5805 * in a prefetch beyond physical memory. 5806 */ 5807 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 5808 { 5809 u32 dma_prefetch; 5810 5811 if (h->board_id != 0x3225103C) 5812 return; 5813 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 5814 dma_prefetch |= 0x8000; 5815 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 5816 } 5817 5818 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) 5819 { 5820 int i; 5821 u32 doorbell_value; 5822 unsigned long flags; 5823 /* wait until the clear_event_notify bit 6 is cleared by controller. */ 5824 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 5825 spin_lock_irqsave(&h->lock, flags); 5826 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 5827 spin_unlock_irqrestore(&h->lock, flags); 5828 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) 5829 break; 5830 /* delay and try again */ 5831 msleep(20); 5832 } 5833 } 5834 5835 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 5836 { 5837 int i; 5838 u32 doorbell_value; 5839 unsigned long flags; 5840 5841 /* under certain very rare conditions, this can take awhile. 5842 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 5843 * as we enter this code.) 5844 */ 5845 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 5846 spin_lock_irqsave(&h->lock, flags); 5847 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 5848 spin_unlock_irqrestore(&h->lock, flags); 5849 if (!(doorbell_value & CFGTBL_ChangeReq)) 5850 break; 5851 /* delay and try again */ 5852 usleep_range(10000, 20000); 5853 } 5854 } 5855 5856 static int hpsa_enter_simple_mode(struct ctlr_info *h) 5857 { 5858 u32 trans_support; 5859 5860 trans_support = readl(&(h->cfgtable->TransportSupport)); 5861 if (!(trans_support & SIMPLE_MODE)) 5862 return -ENOTSUPP; 5863 5864 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 5865 5866 /* Update the field, and then ring the doorbell */ 5867 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 5868 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 5869 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 5870 hpsa_wait_for_mode_change_ack(h); 5871 print_cfg_table(&h->pdev->dev, h->cfgtable); 5872 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) 5873 goto error; 5874 h->transMethod = CFGTBL_Trans_Simple; 5875 return 0; 5876 error: 5877 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); 5878 return -ENODEV; 5879 } 5880 5881 static int hpsa_pci_init(struct ctlr_info *h) 5882 { 5883 int prod_index, err; 5884 5885 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 5886 if (prod_index < 0) 5887 return -ENODEV; 5888 h->product_name = products[prod_index].product_name; 5889 h->access = *(products[prod_index].access); 5890 5891 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 5892 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 5893 5894 err = pci_enable_device(h->pdev); 5895 if (err) { 5896 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 5897 return err; 5898 } 5899 5900 /* Enable bus mastering (pci_disable_device may disable this) */ 5901 pci_set_master(h->pdev); 5902 5903 err = pci_request_regions(h->pdev, HPSA); 5904 if (err) { 5905 dev_err(&h->pdev->dev, 5906 "cannot obtain PCI resources, aborting\n"); 5907 return err; 5908 } 5909 hpsa_interrupt_mode(h); 5910 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 5911 if (err) 5912 goto err_out_free_res; 5913 h->vaddr = remap_pci_mem(h->paddr, 0x250); 5914 if (!h->vaddr) { 5915 err = -ENOMEM; 5916 goto err_out_free_res; 5917 } 5918 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 5919 if (err) 5920 goto err_out_free_res; 5921 err = hpsa_find_cfgtables(h); 5922 if (err) 5923 goto err_out_free_res; 5924 hpsa_find_board_params(h); 5925 5926 if (!hpsa_CISS_signature_present(h)) { 5927 err = -ENODEV; 5928 goto err_out_free_res; 5929 } 5930 hpsa_set_driver_support_bits(h); 5931 hpsa_p600_dma_prefetch_quirk(h); 5932 err = hpsa_enter_simple_mode(h); 5933 if (err) 5934 goto err_out_free_res; 5935 return 0; 5936 5937 err_out_free_res: 5938 if (h->transtable) 5939 iounmap(h->transtable); 5940 if (h->cfgtable) 5941 iounmap(h->cfgtable); 5942 if (h->vaddr) 5943 iounmap(h->vaddr); 5944 pci_disable_device(h->pdev); 5945 pci_release_regions(h->pdev); 5946 return err; 5947 } 5948 5949 static void hpsa_hba_inquiry(struct ctlr_info *h) 5950 { 5951 int rc; 5952 5953 #define HBA_INQUIRY_BYTE_COUNT 64 5954 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 5955 if (!h->hba_inquiry_data) 5956 return; 5957 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 5958 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 5959 if (rc != 0) { 5960 kfree(h->hba_inquiry_data); 5961 h->hba_inquiry_data = NULL; 5962 } 5963 } 5964 5965 static int hpsa_init_reset_devices(struct pci_dev *pdev) 5966 { 5967 int rc, i; 5968 5969 if (!reset_devices) 5970 return 0; 5971 5972 /* Reset the controller with a PCI power-cycle or via doorbell */ 5973 rc = hpsa_kdump_hard_reset_controller(pdev); 5974 5975 /* -ENOTSUPP here means we cannot reset the controller 5976 * but it's already (and still) up and running in 5977 * "performant mode". Or, it might be 640x, which can't reset 5978 * due to concerns about shared bbwc between 6402/6404 pair. 5979 */ 5980 if (rc == -ENOTSUPP) 5981 return rc; /* just try to do the kdump anyhow. */ 5982 if (rc) 5983 return -ENODEV; 5984 5985 /* Now try to get the controller to respond to a no-op */ 5986 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); 5987 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 5988 if (hpsa_noop(pdev) == 0) 5989 break; 5990 else 5991 dev_warn(&pdev->dev, "no-op failed%s\n", 5992 (i < 11 ? "; re-trying" : "")); 5993 } 5994 return 0; 5995 } 5996 5997 static int hpsa_allocate_cmd_pool(struct ctlr_info *h) 5998 { 5999 h->cmd_pool_bits = kzalloc( 6000 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 6001 sizeof(unsigned long), GFP_KERNEL); 6002 h->cmd_pool = pci_alloc_consistent(h->pdev, 6003 h->nr_cmds * sizeof(*h->cmd_pool), 6004 &(h->cmd_pool_dhandle)); 6005 h->errinfo_pool = pci_alloc_consistent(h->pdev, 6006 h->nr_cmds * sizeof(*h->errinfo_pool), 6007 &(h->errinfo_pool_dhandle)); 6008 if ((h->cmd_pool_bits == NULL) 6009 || (h->cmd_pool == NULL) 6010 || (h->errinfo_pool == NULL)) { 6011 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 6012 return -ENOMEM; 6013 } 6014 return 0; 6015 } 6016 6017 static void hpsa_free_cmd_pool(struct ctlr_info *h) 6018 { 6019 kfree(h->cmd_pool_bits); 6020 if (h->cmd_pool) 6021 pci_free_consistent(h->pdev, 6022 h->nr_cmds * sizeof(struct CommandList), 6023 h->cmd_pool, h->cmd_pool_dhandle); 6024 if (h->ioaccel2_cmd_pool) 6025 pci_free_consistent(h->pdev, 6026 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 6027 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 6028 if (h->errinfo_pool) 6029 pci_free_consistent(h->pdev, 6030 h->nr_cmds * sizeof(struct ErrorInfo), 6031 h->errinfo_pool, 6032 h->errinfo_pool_dhandle); 6033 if (h->ioaccel_cmd_pool) 6034 pci_free_consistent(h->pdev, 6035 h->nr_cmds * sizeof(struct io_accel1_cmd), 6036 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 6037 } 6038 6039 static int hpsa_request_irq(struct ctlr_info *h, 6040 irqreturn_t (*msixhandler)(int, void *), 6041 irqreturn_t (*intxhandler)(int, void *)) 6042 { 6043 int rc, i; 6044 6045 /* 6046 * initialize h->q[x] = x so that interrupt handlers know which 6047 * queue to process. 6048 */ 6049 for (i = 0; i < MAX_REPLY_QUEUES; i++) 6050 h->q[i] = (u8) i; 6051 6052 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { 6053 /* If performant mode and MSI-X, use multiple reply queues */ 6054 for (i = 0; i < h->msix_vector; i++) 6055 rc = request_irq(h->intr[i], msixhandler, 6056 0, h->devname, 6057 &h->q[i]); 6058 } else { 6059 /* Use single reply pool */ 6060 if (h->msix_vector > 0 || h->msi_vector) { 6061 rc = request_irq(h->intr[h->intr_mode], 6062 msixhandler, 0, h->devname, 6063 &h->q[h->intr_mode]); 6064 } else { 6065 rc = request_irq(h->intr[h->intr_mode], 6066 intxhandler, IRQF_SHARED, h->devname, 6067 &h->q[h->intr_mode]); 6068 } 6069 } 6070 if (rc) { 6071 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 6072 h->intr[h->intr_mode], h->devname); 6073 return -ENODEV; 6074 } 6075 return 0; 6076 } 6077 6078 static int hpsa_kdump_soft_reset(struct ctlr_info *h) 6079 { 6080 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, 6081 HPSA_RESET_TYPE_CONTROLLER)) { 6082 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 6083 return -EIO; 6084 } 6085 6086 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 6087 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 6088 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 6089 return -1; 6090 } 6091 6092 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 6093 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 6094 dev_warn(&h->pdev->dev, "Board failed to become ready " 6095 "after soft reset.\n"); 6096 return -1; 6097 } 6098 6099 return 0; 6100 } 6101 6102 static void free_irqs(struct ctlr_info *h) 6103 { 6104 int i; 6105 6106 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 6107 /* Single reply queue, only one irq to free */ 6108 i = h->intr_mode; 6109 free_irq(h->intr[i], &h->q[i]); 6110 return; 6111 } 6112 6113 for (i = 0; i < h->msix_vector; i++) 6114 free_irq(h->intr[i], &h->q[i]); 6115 } 6116 6117 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) 6118 { 6119 free_irqs(h); 6120 #ifdef CONFIG_PCI_MSI 6121 if (h->msix_vector) { 6122 if (h->pdev->msix_enabled) 6123 pci_disable_msix(h->pdev); 6124 } else if (h->msi_vector) { 6125 if (h->pdev->msi_enabled) 6126 pci_disable_msi(h->pdev); 6127 } 6128 #endif /* CONFIG_PCI_MSI */ 6129 } 6130 6131 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 6132 { 6133 hpsa_free_irqs_and_disable_msix(h); 6134 hpsa_free_sg_chain_blocks(h); 6135 hpsa_free_cmd_pool(h); 6136 kfree(h->ioaccel1_blockFetchTable); 6137 kfree(h->blockFetchTable); 6138 pci_free_consistent(h->pdev, h->reply_pool_size, 6139 h->reply_pool, h->reply_pool_dhandle); 6140 if (h->vaddr) 6141 iounmap(h->vaddr); 6142 if (h->transtable) 6143 iounmap(h->transtable); 6144 if (h->cfgtable) 6145 iounmap(h->cfgtable); 6146 pci_release_regions(h->pdev); 6147 kfree(h); 6148 } 6149 6150 /* Called when controller lockup detected. */ 6151 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) 6152 { 6153 struct CommandList *c = NULL; 6154 6155 assert_spin_locked(&h->lock); 6156 /* Mark all outstanding commands as failed and complete them. */ 6157 while (!list_empty(list)) { 6158 c = list_entry(list->next, struct CommandList, list); 6159 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 6160 finish_cmd(c); 6161 } 6162 } 6163 6164 static void controller_lockup_detected(struct ctlr_info *h) 6165 { 6166 unsigned long flags; 6167 6168 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6169 spin_lock_irqsave(&h->lock, flags); 6170 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6171 spin_unlock_irqrestore(&h->lock, flags); 6172 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 6173 h->lockup_detected); 6174 pci_disable_device(h->pdev); 6175 spin_lock_irqsave(&h->lock, flags); 6176 fail_all_cmds_on_list(h, &h->cmpQ); 6177 fail_all_cmds_on_list(h, &h->reqQ); 6178 spin_unlock_irqrestore(&h->lock, flags); 6179 } 6180 6181 static void detect_controller_lockup(struct ctlr_info *h) 6182 { 6183 u64 now; 6184 u32 heartbeat; 6185 unsigned long flags; 6186 6187 now = get_jiffies_64(); 6188 /* If we've received an interrupt recently, we're ok. */ 6189 if (time_after64(h->last_intr_timestamp + 6190 (h->heartbeat_sample_interval), now)) 6191 return; 6192 6193 /* 6194 * If we've already checked the heartbeat recently, we're ok. 6195 * This could happen if someone sends us a signal. We 6196 * otherwise don't care about signals in this thread. 6197 */ 6198 if (time_after64(h->last_heartbeat_timestamp + 6199 (h->heartbeat_sample_interval), now)) 6200 return; 6201 6202 /* If heartbeat has not changed since we last looked, we're not ok. */ 6203 spin_lock_irqsave(&h->lock, flags); 6204 heartbeat = readl(&h->cfgtable->HeartBeat); 6205 spin_unlock_irqrestore(&h->lock, flags); 6206 if (h->last_heartbeat == heartbeat) { 6207 controller_lockup_detected(h); 6208 return; 6209 } 6210 6211 /* We're ok. */ 6212 h->last_heartbeat = heartbeat; 6213 h->last_heartbeat_timestamp = now; 6214 } 6215 6216 static int hpsa_kickoff_rescan(struct ctlr_info *h) 6217 { 6218 int i; 6219 char *event_type; 6220 6221 /* Clear the driver-requested rescan flag */ 6222 h->drv_req_rescan = 0; 6223 6224 /* Ask the controller to clear the events we're handling. */ 6225 if ((h->transMethod & (CFGTBL_Trans_io_accel1 6226 | CFGTBL_Trans_io_accel2)) && 6227 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || 6228 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { 6229 6230 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) 6231 event_type = "state change"; 6232 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) 6233 event_type = "configuration change"; 6234 /* Stop sending new RAID offload reqs via the IO accelerator */ 6235 scsi_block_requests(h->scsi_host); 6236 for (i = 0; i < h->ndevices; i++) 6237 h->dev[i]->offload_enabled = 0; 6238 hpsa_drain_commands(h); 6239 /* Set 'accelerator path config change' bit */ 6240 dev_warn(&h->pdev->dev, 6241 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", 6242 h->events, event_type); 6243 writel(h->events, &(h->cfgtable->clear_event_notify)); 6244 /* Set the "clear event notify field update" bit 6 */ 6245 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6246 /* Wait until ctlr clears 'clear event notify field', bit 6 */ 6247 hpsa_wait_for_clear_event_notify_ack(h); 6248 scsi_unblock_requests(h->scsi_host); 6249 } else { 6250 /* Acknowledge controller notification events. */ 6251 writel(h->events, &(h->cfgtable->clear_event_notify)); 6252 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6253 hpsa_wait_for_clear_event_notify_ack(h); 6254 #if 0 6255 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6256 hpsa_wait_for_mode_change_ack(h); 6257 #endif 6258 } 6259 6260 /* Something in the device list may have changed to trigger 6261 * the event, so do a rescan. 6262 */ 6263 hpsa_scan_start(h->scsi_host); 6264 /* release reference taken on scsi host in check_controller_events */ 6265 scsi_host_put(h->scsi_host); 6266 return 0; 6267 } 6268 6269 /* Check a register on the controller to see if there are configuration 6270 * changes (added/changed/removed logical drives, etc.) which mean that 6271 * we should rescan the controller for devices. 6272 * Also check flag for driver-initiated rescan. 6273 * If either flag or controller event indicate rescan, add the controller 6274 * to the list of controllers needing to be rescanned, and gets a 6275 * reference to the associated scsi_host. 6276 */ 6277 static void hpsa_ctlr_needs_rescan(struct ctlr_info *h) 6278 { 6279 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 6280 return; 6281 6282 h->events = readl(&(h->cfgtable->event_notify)); 6283 if (!h->events && !h->drv_req_rescan) 6284 return; 6285 6286 /* 6287 * Take a reference on scsi host for the duration of the scan 6288 * Release in hpsa_kickoff_rescan(). No lock needed for scan_list 6289 * as only a single thread accesses this list. 6290 */ 6291 scsi_host_get(h->scsi_host); 6292 hpsa_kickoff_rescan(h); 6293 } 6294 6295 static void hpsa_monitor_ctlr_worker(struct work_struct *work) 6296 { 6297 unsigned long flags; 6298 struct ctlr_info *h = container_of(to_delayed_work(work), 6299 struct ctlr_info, monitor_ctlr_work); 6300 detect_controller_lockup(h); 6301 if (h->lockup_detected) 6302 return; 6303 hpsa_ctlr_needs_rescan(h); 6304 spin_lock_irqsave(&h->lock, flags); 6305 if (h->remove_in_progress) { 6306 spin_unlock_irqrestore(&h->lock, flags); 6307 return; 6308 } 6309 schedule_delayed_work(&h->monitor_ctlr_work, 6310 h->heartbeat_sample_interval); 6311 spin_unlock_irqrestore(&h->lock, flags); 6312 } 6313 6314 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6315 { 6316 int dac, rc; 6317 struct ctlr_info *h; 6318 int try_soft_reset = 0; 6319 unsigned long flags; 6320 6321 if (number_of_controllers == 0) 6322 printk(KERN_INFO DRIVER_NAME "\n"); 6323 6324 rc = hpsa_init_reset_devices(pdev); 6325 if (rc) { 6326 if (rc != -ENOTSUPP) 6327 return rc; 6328 /* If the reset fails in a particular way (it has no way to do 6329 * a proper hard reset, so returns -ENOTSUPP) we can try to do 6330 * a soft reset once we get the controller configured up to the 6331 * point that it can accept a command. 6332 */ 6333 try_soft_reset = 1; 6334 rc = 0; 6335 } 6336 6337 reinit_after_soft_reset: 6338 6339 /* Command structures must be aligned on a 32-byte boundary because 6340 * the 5 lower bits of the address are used by the hardware. and by 6341 * the driver. See comments in hpsa.h for more info. 6342 */ 6343 #define COMMANDLIST_ALIGNMENT 128 6344 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 6345 h = kzalloc(sizeof(*h), GFP_KERNEL); 6346 if (!h) 6347 return -ENOMEM; 6348 6349 h->pdev = pdev; 6350 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 6351 INIT_LIST_HEAD(&h->cmpQ); 6352 INIT_LIST_HEAD(&h->reqQ); 6353 spin_lock_init(&h->lock); 6354 spin_lock_init(&h->scan_lock); 6355 spin_lock_init(&h->passthru_count_lock); 6356 rc = hpsa_pci_init(h); 6357 if (rc != 0) 6358 goto clean1; 6359 6360 sprintf(h->devname, HPSA "%d", number_of_controllers); 6361 h->ctlr = number_of_controllers; 6362 number_of_controllers++; 6363 6364 /* configure PCI DMA stuff */ 6365 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 6366 if (rc == 0) { 6367 dac = 1; 6368 } else { 6369 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6370 if (rc == 0) { 6371 dac = 0; 6372 } else { 6373 dev_err(&pdev->dev, "no suitable DMA available\n"); 6374 goto clean1; 6375 } 6376 } 6377 6378 /* make sure the board interrupts are off */ 6379 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6380 6381 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 6382 goto clean2; 6383 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 6384 h->devname, pdev->device, 6385 h->intr[h->intr_mode], dac ? "" : " not"); 6386 if (hpsa_allocate_cmd_pool(h)) 6387 goto clean4; 6388 if (hpsa_allocate_sg_chain_blocks(h)) 6389 goto clean4; 6390 init_waitqueue_head(&h->scan_wait_queue); 6391 h->scan_finished = 1; /* no scan currently in progress */ 6392 6393 pci_set_drvdata(pdev, h); 6394 h->ndevices = 0; 6395 h->scsi_host = NULL; 6396 spin_lock_init(&h->devlock); 6397 hpsa_put_ctlr_into_performant_mode(h); 6398 6399 /* At this point, the controller is ready to take commands. 6400 * Now, if reset_devices and the hard reset didn't work, try 6401 * the soft reset and see if that works. 6402 */ 6403 if (try_soft_reset) { 6404 6405 /* This is kind of gross. We may or may not get a completion 6406 * from the soft reset command, and if we do, then the value 6407 * from the fifo may or may not be valid. So, we wait 10 secs 6408 * after the reset throwing away any completions we get during 6409 * that time. Unregister the interrupt handler and register 6410 * fake ones to scoop up any residual completions. 6411 */ 6412 spin_lock_irqsave(&h->lock, flags); 6413 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6414 spin_unlock_irqrestore(&h->lock, flags); 6415 free_irqs(h); 6416 rc = hpsa_request_irq(h, hpsa_msix_discard_completions, 6417 hpsa_intx_discard_completions); 6418 if (rc) { 6419 dev_warn(&h->pdev->dev, "Failed to request_irq after " 6420 "soft reset.\n"); 6421 goto clean4; 6422 } 6423 6424 rc = hpsa_kdump_soft_reset(h); 6425 if (rc) 6426 /* Neither hard nor soft reset worked, we're hosed. */ 6427 goto clean4; 6428 6429 dev_info(&h->pdev->dev, "Board READY.\n"); 6430 dev_info(&h->pdev->dev, 6431 "Waiting for stale completions to drain.\n"); 6432 h->access.set_intr_mask(h, HPSA_INTR_ON); 6433 msleep(10000); 6434 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6435 6436 rc = controller_reset_failed(h->cfgtable); 6437 if (rc) 6438 dev_info(&h->pdev->dev, 6439 "Soft reset appears to have failed.\n"); 6440 6441 /* since the controller's reset, we have to go back and re-init 6442 * everything. Easiest to just forget what we've done and do it 6443 * all over again. 6444 */ 6445 hpsa_undo_allocations_after_kdump_soft_reset(h); 6446 try_soft_reset = 0; 6447 if (rc) 6448 /* don't go to clean4, we already unallocated */ 6449 return -ENODEV; 6450 6451 goto reinit_after_soft_reset; 6452 } 6453 6454 /* Enable Accelerated IO path at driver layer */ 6455 h->acciopath_status = 1; 6456 6457 h->drv_req_rescan = 0; 6458 6459 /* Turn the interrupts on so we can service requests */ 6460 h->access.set_intr_mask(h, HPSA_INTR_ON); 6461 6462 hpsa_hba_inquiry(h); 6463 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 6464 6465 /* Monitor the controller for firmware lockups */ 6466 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 6467 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); 6468 schedule_delayed_work(&h->monitor_ctlr_work, 6469 h->heartbeat_sample_interval); 6470 return 0; 6471 6472 clean4: 6473 hpsa_free_sg_chain_blocks(h); 6474 hpsa_free_cmd_pool(h); 6475 free_irqs(h); 6476 clean2: 6477 clean1: 6478 kfree(h); 6479 return rc; 6480 } 6481 6482 static void hpsa_flush_cache(struct ctlr_info *h) 6483 { 6484 char *flush_buf; 6485 struct CommandList *c; 6486 unsigned long flags; 6487 6488 /* Don't bother trying to flush the cache if locked up */ 6489 spin_lock_irqsave(&h->lock, flags); 6490 if (unlikely(h->lockup_detected)) { 6491 spin_unlock_irqrestore(&h->lock, flags); 6492 return; 6493 } 6494 spin_unlock_irqrestore(&h->lock, flags); 6495 6496 flush_buf = kzalloc(4, GFP_KERNEL); 6497 if (!flush_buf) 6498 return; 6499 6500 c = cmd_special_alloc(h); 6501 if (!c) { 6502 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 6503 goto out_of_memory; 6504 } 6505 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 6506 RAID_CTLR_LUNID, TYPE_CMD)) { 6507 goto out; 6508 } 6509 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 6510 if (c->err_info->CommandStatus != 0) 6511 out: 6512 dev_warn(&h->pdev->dev, 6513 "error flushing cache on controller\n"); 6514 cmd_special_free(h, c); 6515 out_of_memory: 6516 kfree(flush_buf); 6517 } 6518 6519 static void hpsa_shutdown(struct pci_dev *pdev) 6520 { 6521 struct ctlr_info *h; 6522 6523 h = pci_get_drvdata(pdev); 6524 /* Turn board interrupts off and send the flush cache command 6525 * sendcmd will turn off interrupt, and send the flush... 6526 * To write all data in the battery backed cache to disks 6527 */ 6528 hpsa_flush_cache(h); 6529 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6530 hpsa_free_irqs_and_disable_msix(h); 6531 } 6532 6533 static void hpsa_free_device_info(struct ctlr_info *h) 6534 { 6535 int i; 6536 6537 for (i = 0; i < h->ndevices; i++) 6538 kfree(h->dev[i]); 6539 } 6540 6541 static void hpsa_remove_one(struct pci_dev *pdev) 6542 { 6543 struct ctlr_info *h; 6544 unsigned long flags; 6545 6546 if (pci_get_drvdata(pdev) == NULL) { 6547 dev_err(&pdev->dev, "unable to remove device\n"); 6548 return; 6549 } 6550 h = pci_get_drvdata(pdev); 6551 6552 /* Get rid of any controller monitoring work items */ 6553 spin_lock_irqsave(&h->lock, flags); 6554 h->remove_in_progress = 1; 6555 cancel_delayed_work(&h->monitor_ctlr_work); 6556 spin_unlock_irqrestore(&h->lock, flags); 6557 6558 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 6559 hpsa_shutdown(pdev); 6560 iounmap(h->vaddr); 6561 iounmap(h->transtable); 6562 iounmap(h->cfgtable); 6563 hpsa_free_device_info(h); 6564 hpsa_free_sg_chain_blocks(h); 6565 pci_free_consistent(h->pdev, 6566 h->nr_cmds * sizeof(struct CommandList), 6567 h->cmd_pool, h->cmd_pool_dhandle); 6568 pci_free_consistent(h->pdev, 6569 h->nr_cmds * sizeof(struct ErrorInfo), 6570 h->errinfo_pool, h->errinfo_pool_dhandle); 6571 pci_free_consistent(h->pdev, h->reply_pool_size, 6572 h->reply_pool, h->reply_pool_dhandle); 6573 kfree(h->cmd_pool_bits); 6574 kfree(h->blockFetchTable); 6575 kfree(h->ioaccel1_blockFetchTable); 6576 kfree(h->ioaccel2_blockFetchTable); 6577 kfree(h->hba_inquiry_data); 6578 pci_disable_device(pdev); 6579 pci_release_regions(pdev); 6580 kfree(h); 6581 } 6582 6583 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 6584 __attribute__((unused)) pm_message_t state) 6585 { 6586 return -ENOSYS; 6587 } 6588 6589 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 6590 { 6591 return -ENOSYS; 6592 } 6593 6594 static struct pci_driver hpsa_pci_driver = { 6595 .name = HPSA, 6596 .probe = hpsa_init_one, 6597 .remove = hpsa_remove_one, 6598 .id_table = hpsa_pci_device_id, /* id_table */ 6599 .shutdown = hpsa_shutdown, 6600 .suspend = hpsa_suspend, 6601 .resume = hpsa_resume, 6602 }; 6603 6604 /* Fill in bucket_map[], given nsgs (the max number of 6605 * scatter gather elements supported) and bucket[], 6606 * which is an array of 8 integers. The bucket[] array 6607 * contains 8 different DMA transfer sizes (in 16 6608 * byte increments) which the controller uses to fetch 6609 * commands. This function fills in bucket_map[], which 6610 * maps a given number of scatter gather elements to one of 6611 * the 8 DMA transfer sizes. The point of it is to allow the 6612 * controller to only do as much DMA as needed to fetch the 6613 * command, with the DMA transfer size encoded in the lower 6614 * bits of the command address. 6615 */ 6616 static void calc_bucket_map(int bucket[], int num_buckets, 6617 int nsgs, int min_blocks, int *bucket_map) 6618 { 6619 int i, j, b, size; 6620 6621 /* Note, bucket_map must have nsgs+1 entries. */ 6622 for (i = 0; i <= nsgs; i++) { 6623 /* Compute size of a command with i SG entries */ 6624 size = i + min_blocks; 6625 b = num_buckets; /* Assume the biggest bucket */ 6626 /* Find the bucket that is just big enough */ 6627 for (j = 0; j < num_buckets; j++) { 6628 if (bucket[j] >= size) { 6629 b = j; 6630 break; 6631 } 6632 } 6633 /* for a command with i SG entries, use bucket b. */ 6634 bucket_map[i] = b; 6635 } 6636 } 6637 6638 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) 6639 { 6640 int i; 6641 unsigned long register_value; 6642 unsigned long transMethod = CFGTBL_Trans_Performant | 6643 (trans_support & CFGTBL_Trans_use_short_tags) | 6644 CFGTBL_Trans_enable_directed_msix | 6645 (trans_support & (CFGTBL_Trans_io_accel1 | 6646 CFGTBL_Trans_io_accel2)); 6647 struct access_method access = SA5_performant_access; 6648 6649 /* This is a bit complicated. There are 8 registers on 6650 * the controller which we write to to tell it 8 different 6651 * sizes of commands which there may be. It's a way of 6652 * reducing the DMA done to fetch each command. Encoded into 6653 * each command's tag are 3 bits which communicate to the controller 6654 * which of the eight sizes that command fits within. The size of 6655 * each command depends on how many scatter gather entries there are. 6656 * Each SG entry requires 16 bytes. The eight registers are programmed 6657 * with the number of 16-byte blocks a command of that size requires. 6658 * The smallest command possible requires 5 such 16 byte blocks. 6659 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte 6660 * blocks. Note, this only extends to the SG entries contained 6661 * within the command block, and does not extend to chained blocks 6662 * of SG elements. bft[] contains the eight values we write to 6663 * the registers. They are not evenly distributed, but have more 6664 * sizes for small commands, and fewer sizes for larger commands. 6665 */ 6666 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; 6667 #define MIN_IOACCEL2_BFT_ENTRY 5 6668 #define HPSA_IOACCEL2_HEADER_SZ 4 6669 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, 6670 13, 14, 15, 16, 17, 18, 19, 6671 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; 6672 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); 6673 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); 6674 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > 6675 16 * MIN_IOACCEL2_BFT_ENTRY); 6676 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); 6677 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); 6678 /* 5 = 1 s/g entry or 4k 6679 * 6 = 2 s/g entry or 8k 6680 * 8 = 4 s/g entry or 16k 6681 * 10 = 6 s/g entry or 24k 6682 */ 6683 6684 /* Controller spec: zero out this buffer. */ 6685 memset(h->reply_pool, 0, h->reply_pool_size); 6686 6687 bft[7] = SG_ENTRIES_IN_CMD + 4; 6688 calc_bucket_map(bft, ARRAY_SIZE(bft), 6689 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); 6690 for (i = 0; i < 8; i++) 6691 writel(bft[i], &h->transtable->BlockFetch[i]); 6692 6693 /* size of controller ring buffer */ 6694 writel(h->max_commands, &h->transtable->RepQSize); 6695 writel(h->nreply_queues, &h->transtable->RepQCount); 6696 writel(0, &h->transtable->RepQCtrAddrLow32); 6697 writel(0, &h->transtable->RepQCtrAddrHigh32); 6698 6699 for (i = 0; i < h->nreply_queues; i++) { 6700 writel(0, &h->transtable->RepQAddr[i].upper); 6701 writel(h->reply_pool_dhandle + 6702 (h->max_commands * sizeof(u64) * i), 6703 &h->transtable->RepQAddr[i].lower); 6704 } 6705 6706 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 6707 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); 6708 /* 6709 * enable outbound interrupt coalescing in accelerator mode; 6710 */ 6711 if (trans_support & CFGTBL_Trans_io_accel1) { 6712 access = SA5_ioaccel_mode1_access; 6713 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 6714 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 6715 } else { 6716 if (trans_support & CFGTBL_Trans_io_accel2) { 6717 access = SA5_ioaccel_mode2_access; 6718 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 6719 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 6720 } 6721 } 6722 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6723 hpsa_wait_for_mode_change_ack(h); 6724 register_value = readl(&(h->cfgtable->TransportActive)); 6725 if (!(register_value & CFGTBL_Trans_Performant)) { 6726 dev_warn(&h->pdev->dev, "unable to get board into" 6727 " performant mode\n"); 6728 return; 6729 } 6730 /* Change the access methods to the performant access methods */ 6731 h->access = access; 6732 h->transMethod = transMethod; 6733 6734 if (!((trans_support & CFGTBL_Trans_io_accel1) || 6735 (trans_support & CFGTBL_Trans_io_accel2))) 6736 return; 6737 6738 if (trans_support & CFGTBL_Trans_io_accel1) { 6739 /* Set up I/O accelerator mode */ 6740 for (i = 0; i < h->nreply_queues; i++) { 6741 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); 6742 h->reply_queue[i].current_entry = 6743 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); 6744 } 6745 bft[7] = h->ioaccel_maxsg + 8; 6746 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, 6747 h->ioaccel1_blockFetchTable); 6748 6749 /* initialize all reply queue entries to unused */ 6750 memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED, 6751 h->reply_pool_size); 6752 6753 /* set all the constant fields in the accelerator command 6754 * frames once at init time to save CPU cycles later. 6755 */ 6756 for (i = 0; i < h->nr_cmds; i++) { 6757 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; 6758 6759 cp->function = IOACCEL1_FUNCTION_SCSIIO; 6760 cp->err_info = (u32) (h->errinfo_pool_dhandle + 6761 (i * sizeof(struct ErrorInfo))); 6762 cp->err_info_len = sizeof(struct ErrorInfo); 6763 cp->sgl_offset = IOACCEL1_SGLOFFSET; 6764 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; 6765 cp->timeout_sec = 0; 6766 cp->ReplyQueue = 0; 6767 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) | 6768 DIRECT_LOOKUP_BIT; 6769 cp->Tag.upper = 0; 6770 cp->host_addr.lower = 6771 (u32) (h->ioaccel_cmd_pool_dhandle + 6772 (i * sizeof(struct io_accel1_cmd))); 6773 cp->host_addr.upper = 0; 6774 } 6775 } else if (trans_support & CFGTBL_Trans_io_accel2) { 6776 u64 cfg_offset, cfg_base_addr_index; 6777 u32 bft2_offset, cfg_base_addr; 6778 int rc; 6779 6780 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 6781 &cfg_base_addr_index, &cfg_offset); 6782 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); 6783 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; 6784 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, 6785 4, h->ioaccel2_blockFetchTable); 6786 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); 6787 BUILD_BUG_ON(offsetof(struct CfgTable, 6788 io_accel_request_size_offset) != 0xb8); 6789 h->ioaccel2_bft2_regs = 6790 remap_pci_mem(pci_resource_start(h->pdev, 6791 cfg_base_addr_index) + 6792 cfg_offset + bft2_offset, 6793 ARRAY_SIZE(bft2) * 6794 sizeof(*h->ioaccel2_bft2_regs)); 6795 for (i = 0; i < ARRAY_SIZE(bft2); i++) 6796 writel(bft2[i], &h->ioaccel2_bft2_regs[i]); 6797 } 6798 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6799 hpsa_wait_for_mode_change_ack(h); 6800 } 6801 6802 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) 6803 { 6804 h->ioaccel_maxsg = 6805 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 6806 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) 6807 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; 6808 6809 /* Command structures must be aligned on a 128-byte boundary 6810 * because the 7 lower bits of the address are used by the 6811 * hardware. 6812 */ 6813 #define IOACCEL1_COMMANDLIST_ALIGNMENT 128 6814 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 6815 IOACCEL1_COMMANDLIST_ALIGNMENT); 6816 h->ioaccel_cmd_pool = 6817 pci_alloc_consistent(h->pdev, 6818 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 6819 &(h->ioaccel_cmd_pool_dhandle)); 6820 6821 h->ioaccel1_blockFetchTable = 6822 kmalloc(((h->ioaccel_maxsg + 1) * 6823 sizeof(u32)), GFP_KERNEL); 6824 6825 if ((h->ioaccel_cmd_pool == NULL) || 6826 (h->ioaccel1_blockFetchTable == NULL)) 6827 goto clean_up; 6828 6829 memset(h->ioaccel_cmd_pool, 0, 6830 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); 6831 return 0; 6832 6833 clean_up: 6834 if (h->ioaccel_cmd_pool) 6835 pci_free_consistent(h->pdev, 6836 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 6837 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 6838 kfree(h->ioaccel1_blockFetchTable); 6839 return 1; 6840 } 6841 6842 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) 6843 { 6844 /* Allocate ioaccel2 mode command blocks and block fetch table */ 6845 6846 h->ioaccel_maxsg = 6847 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 6848 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 6849 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 6850 6851 #define IOACCEL2_COMMANDLIST_ALIGNMENT 128 6852 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 6853 IOACCEL2_COMMANDLIST_ALIGNMENT); 6854 h->ioaccel2_cmd_pool = 6855 pci_alloc_consistent(h->pdev, 6856 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 6857 &(h->ioaccel2_cmd_pool_dhandle)); 6858 6859 h->ioaccel2_blockFetchTable = 6860 kmalloc(((h->ioaccel_maxsg + 1) * 6861 sizeof(u32)), GFP_KERNEL); 6862 6863 if ((h->ioaccel2_cmd_pool == NULL) || 6864 (h->ioaccel2_blockFetchTable == NULL)) 6865 goto clean_up; 6866 6867 memset(h->ioaccel2_cmd_pool, 0, 6868 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); 6869 return 0; 6870 6871 clean_up: 6872 if (h->ioaccel2_cmd_pool) 6873 pci_free_consistent(h->pdev, 6874 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 6875 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 6876 kfree(h->ioaccel2_blockFetchTable); 6877 return 1; 6878 } 6879 6880 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 6881 { 6882 u32 trans_support; 6883 unsigned long transMethod = CFGTBL_Trans_Performant | 6884 CFGTBL_Trans_use_short_tags; 6885 int i; 6886 6887 if (hpsa_simple_mode) 6888 return; 6889 6890 /* Check for I/O accelerator mode support */ 6891 if (trans_support & CFGTBL_Trans_io_accel1) { 6892 transMethod |= CFGTBL_Trans_io_accel1 | 6893 CFGTBL_Trans_enable_directed_msix; 6894 if (hpsa_alloc_ioaccel_cmd_and_bft(h)) 6895 goto clean_up; 6896 } else { 6897 if (trans_support & CFGTBL_Trans_io_accel2) { 6898 transMethod |= CFGTBL_Trans_io_accel2 | 6899 CFGTBL_Trans_enable_directed_msix; 6900 if (ioaccel2_alloc_cmds_and_bft(h)) 6901 goto clean_up; 6902 } 6903 } 6904 6905 /* TODO, check that this next line h->nreply_queues is correct */ 6906 trans_support = readl(&(h->cfgtable->TransportSupport)); 6907 if (!(trans_support & PERFORMANT_MODE)) 6908 return; 6909 6910 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 6911 hpsa_get_max_perf_mode_cmds(h); 6912 /* Performant mode ring buffer and supporting data structures */ 6913 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; 6914 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 6915 &(h->reply_pool_dhandle)); 6916 6917 for (i = 0; i < h->nreply_queues; i++) { 6918 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; 6919 h->reply_queue[i].size = h->max_commands; 6920 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 6921 h->reply_queue[i].current_entry = 0; 6922 } 6923 6924 /* Need a block fetch table for performant mode */ 6925 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 6926 sizeof(u32)), GFP_KERNEL); 6927 6928 if ((h->reply_pool == NULL) 6929 || (h->blockFetchTable == NULL)) 6930 goto clean_up; 6931 6932 hpsa_enter_performant_mode(h, trans_support); 6933 return; 6934 6935 clean_up: 6936 if (h->reply_pool) 6937 pci_free_consistent(h->pdev, h->reply_pool_size, 6938 h->reply_pool, h->reply_pool_dhandle); 6939 kfree(h->blockFetchTable); 6940 } 6941 6942 static void hpsa_drain_commands(struct ctlr_info *h) 6943 { 6944 int cmds_out; 6945 unsigned long flags; 6946 6947 do { /* wait for all outstanding commands to drain out */ 6948 spin_lock_irqsave(&h->lock, flags); 6949 cmds_out = h->commands_outstanding; 6950 spin_unlock_irqrestore(&h->lock, flags); 6951 if (cmds_out <= 0) 6952 break; 6953 msleep(100); 6954 } while (1); 6955 } 6956 6957 /* 6958 * This is it. Register the PCI driver information for the cards we control 6959 * the OS will call our registered routines when it finds one of our cards. 6960 */ 6961 static int __init hpsa_init(void) 6962 { 6963 return pci_register_driver(&hpsa_pci_driver); 6964 } 6965 6966 static void __exit hpsa_cleanup(void) 6967 { 6968 pci_unregister_driver(&hpsa_pci_driver); 6969 } 6970 6971 static void __attribute__((unused)) verify_offsets(void) 6972 { 6973 #define VERIFY_OFFSET(member, offset) \ 6974 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) 6975 6976 VERIFY_OFFSET(IU_type, 0); 6977 VERIFY_OFFSET(direction, 1); 6978 VERIFY_OFFSET(reply_queue, 2); 6979 /* VERIFY_OFFSET(reserved1, 3); */ 6980 VERIFY_OFFSET(scsi_nexus, 4); 6981 VERIFY_OFFSET(Tag, 8); 6982 VERIFY_OFFSET(cdb, 16); 6983 VERIFY_OFFSET(cciss_lun, 32); 6984 VERIFY_OFFSET(data_len, 40); 6985 VERIFY_OFFSET(cmd_priority_task_attr, 44); 6986 VERIFY_OFFSET(sg_count, 45); 6987 /* VERIFY_OFFSET(reserved3 */ 6988 VERIFY_OFFSET(err_ptr, 48); 6989 VERIFY_OFFSET(err_len, 56); 6990 /* VERIFY_OFFSET(reserved4 */ 6991 VERIFY_OFFSET(sg, 64); 6992 6993 #undef VERIFY_OFFSET 6994 6995 #define VERIFY_OFFSET(member, offset) \ 6996 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) 6997 6998 VERIFY_OFFSET(dev_handle, 0x00); 6999 VERIFY_OFFSET(reserved1, 0x02); 7000 VERIFY_OFFSET(function, 0x03); 7001 VERIFY_OFFSET(reserved2, 0x04); 7002 VERIFY_OFFSET(err_info, 0x0C); 7003 VERIFY_OFFSET(reserved3, 0x10); 7004 VERIFY_OFFSET(err_info_len, 0x12); 7005 VERIFY_OFFSET(reserved4, 0x13); 7006 VERIFY_OFFSET(sgl_offset, 0x14); 7007 VERIFY_OFFSET(reserved5, 0x15); 7008 VERIFY_OFFSET(transfer_len, 0x1C); 7009 VERIFY_OFFSET(reserved6, 0x20); 7010 VERIFY_OFFSET(io_flags, 0x24); 7011 VERIFY_OFFSET(reserved7, 0x26); 7012 VERIFY_OFFSET(LUN, 0x34); 7013 VERIFY_OFFSET(control, 0x3C); 7014 VERIFY_OFFSET(CDB, 0x40); 7015 VERIFY_OFFSET(reserved8, 0x50); 7016 VERIFY_OFFSET(host_context_flags, 0x60); 7017 VERIFY_OFFSET(timeout_sec, 0x62); 7018 VERIFY_OFFSET(ReplyQueue, 0x64); 7019 VERIFY_OFFSET(reserved9, 0x65); 7020 VERIFY_OFFSET(Tag, 0x68); 7021 VERIFY_OFFSET(host_addr, 0x70); 7022 VERIFY_OFFSET(CISS_LUN, 0x78); 7023 VERIFY_OFFSET(SG, 0x78 + 8); 7024 #undef VERIFY_OFFSET 7025 } 7026 7027 module_init(hpsa_init); 7028 module_exit(hpsa_cleanup); 7029