1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/pci-aspm.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/delay.h> 30 #include <linux/fs.h> 31 #include <linux/timer.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/compat.h> 35 #include <linux/blktrace_api.h> 36 #include <linux/uaccess.h> 37 #include <linux/io.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/completion.h> 40 #include <linux/moduleparam.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_tcq.h> 46 #include <linux/cciss_ioctl.h> 47 #include <linux/string.h> 48 #include <linux/bitmap.h> 49 #include <linux/atomic.h> 50 #include <linux/jiffies.h> 51 #include <linux/percpu-defs.h> 52 #include <linux/percpu.h> 53 #include <asm/unaligned.h> 54 #include <asm/div64.h> 55 #include "hpsa_cmd.h" 56 #include "hpsa.h" 57 58 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 59 #define HPSA_DRIVER_VERSION "3.4.4-1" 60 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 61 #define HPSA "hpsa" 62 63 /* How long to wait (in milliseconds) for board to go into simple mode */ 64 #define MAX_CONFIG_WAIT 30000 65 #define MAX_IOCTL_CONFIG_WAIT 1000 66 67 /*define how many times we will try a command because of bus resets */ 68 #define MAX_CMD_RETRIES 3 69 70 /* Embedded module documentation macros - see modules.h */ 71 MODULE_AUTHOR("Hewlett-Packard Company"); 72 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 73 HPSA_DRIVER_VERSION); 74 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 75 MODULE_VERSION(HPSA_DRIVER_VERSION); 76 MODULE_LICENSE("GPL"); 77 78 static int hpsa_allow_any; 79 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 80 MODULE_PARM_DESC(hpsa_allow_any, 81 "Allow hpsa driver to access unknown HP Smart Array hardware"); 82 static int hpsa_simple_mode; 83 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 84 MODULE_PARM_DESC(hpsa_simple_mode, 85 "Use 'simple mode' rather than 'performant mode'"); 86 87 /* define the PCI info for the cards we can control */ 88 static const struct pci_device_id hpsa_pci_device_id[] = { 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, 112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, 113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, 114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, 115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, 116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, 117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, 118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, 119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, 120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, 121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, 125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, 126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, 127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, 128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, 129 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, 130 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, 131 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, 132 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, 133 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, 134 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 135 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 136 {0,} 137 }; 138 139 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 140 141 /* board_id = Subsystem Device ID & Vendor ID 142 * product = Marketing Name for the board 143 * access = Address of the struct of function pointers 144 */ 145 static struct board_type products[] = { 146 {0x3241103C, "Smart Array P212", &SA5_access}, 147 {0x3243103C, "Smart Array P410", &SA5_access}, 148 {0x3245103C, "Smart Array P410i", &SA5_access}, 149 {0x3247103C, "Smart Array P411", &SA5_access}, 150 {0x3249103C, "Smart Array P812", &SA5_access}, 151 {0x324A103C, "Smart Array P712m", &SA5_access}, 152 {0x324B103C, "Smart Array P711m", &SA5_access}, 153 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ 154 {0x3350103C, "Smart Array P222", &SA5_access}, 155 {0x3351103C, "Smart Array P420", &SA5_access}, 156 {0x3352103C, "Smart Array P421", &SA5_access}, 157 {0x3353103C, "Smart Array P822", &SA5_access}, 158 {0x3354103C, "Smart Array P420i", &SA5_access}, 159 {0x3355103C, "Smart Array P220i", &SA5_access}, 160 {0x3356103C, "Smart Array P721m", &SA5_access}, 161 {0x1921103C, "Smart Array P830i", &SA5_access}, 162 {0x1922103C, "Smart Array P430", &SA5_access}, 163 {0x1923103C, "Smart Array P431", &SA5_access}, 164 {0x1924103C, "Smart Array P830", &SA5_access}, 165 {0x1926103C, "Smart Array P731m", &SA5_access}, 166 {0x1928103C, "Smart Array P230i", &SA5_access}, 167 {0x1929103C, "Smart Array P530", &SA5_access}, 168 {0x21BD103C, "Smart Array", &SA5_access}, 169 {0x21BE103C, "Smart Array", &SA5_access}, 170 {0x21BF103C, "Smart Array", &SA5_access}, 171 {0x21C0103C, "Smart Array", &SA5_access}, 172 {0x21C1103C, "Smart Array", &SA5_access}, 173 {0x21C2103C, "Smart Array", &SA5_access}, 174 {0x21C3103C, "Smart Array", &SA5_access}, 175 {0x21C4103C, "Smart Array", &SA5_access}, 176 {0x21C5103C, "Smart Array", &SA5_access}, 177 {0x21C6103C, "Smart Array", &SA5_access}, 178 {0x21C7103C, "Smart Array", &SA5_access}, 179 {0x21C8103C, "Smart Array", &SA5_access}, 180 {0x21C9103C, "Smart Array", &SA5_access}, 181 {0x21CA103C, "Smart Array", &SA5_access}, 182 {0x21CB103C, "Smart Array", &SA5_access}, 183 {0x21CC103C, "Smart Array", &SA5_access}, 184 {0x21CD103C, "Smart Array", &SA5_access}, 185 {0x21CE103C, "Smart Array", &SA5_access}, 186 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, 187 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, 188 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, 189 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, 190 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, 191 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 192 }; 193 194 static int number_of_controllers; 195 196 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 197 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 198 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 199 200 #ifdef CONFIG_COMPAT 201 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, 202 void __user *arg); 203 #endif 204 205 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 206 static struct CommandList *cmd_alloc(struct ctlr_info *h); 207 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 208 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 209 int cmd_type); 210 static void hpsa_free_cmd_pool(struct ctlr_info *h); 211 #define VPD_PAGE (1 << 8) 212 213 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 214 static void hpsa_scan_start(struct Scsi_Host *); 215 static int hpsa_scan_finished(struct Scsi_Host *sh, 216 unsigned long elapsed_time); 217 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); 218 219 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 220 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); 221 static int hpsa_slave_alloc(struct scsi_device *sdev); 222 static void hpsa_slave_destroy(struct scsi_device *sdev); 223 224 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 225 static int check_for_unit_attention(struct ctlr_info *h, 226 struct CommandList *c); 227 static void check_ioctl_unit_attention(struct ctlr_info *h, 228 struct CommandList *c); 229 /* performant mode helper functions */ 230 static void calc_bucket_map(int *bucket, int num_buckets, 231 int nsgs, int min_blocks, u32 *bucket_map); 232 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 233 static inline u32 next_command(struct ctlr_info *h, u8 q); 234 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 235 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 236 u64 *cfg_offset); 237 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 238 unsigned long *memory_bar); 239 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 240 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 241 int wait_for_ready); 242 static inline void finish_cmd(struct CommandList *c); 243 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h); 244 #define BOARD_NOT_READY 0 245 #define BOARD_READY 1 246 static void hpsa_drain_accel_commands(struct ctlr_info *h); 247 static void hpsa_flush_cache(struct ctlr_info *h); 248 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 249 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 250 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); 251 static void hpsa_command_resubmit_worker(struct work_struct *work); 252 253 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 254 { 255 unsigned long *priv = shost_priv(sdev->host); 256 return (struct ctlr_info *) *priv; 257 } 258 259 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 260 { 261 unsigned long *priv = shost_priv(sh); 262 return (struct ctlr_info *) *priv; 263 } 264 265 static int check_for_unit_attention(struct ctlr_info *h, 266 struct CommandList *c) 267 { 268 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 269 return 0; 270 271 switch (c->err_info->SenseInfo[12]) { 272 case STATE_CHANGED: 273 dev_warn(&h->pdev->dev, HPSA "%d: a state change " 274 "detected, command retried\n", h->ctlr); 275 break; 276 case LUN_FAILED: 277 dev_warn(&h->pdev->dev, 278 HPSA "%d: LUN failure detected\n", h->ctlr); 279 break; 280 case REPORT_LUNS_CHANGED: 281 dev_warn(&h->pdev->dev, 282 HPSA "%d: report LUN data changed\n", h->ctlr); 283 /* 284 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 285 * target (array) devices. 286 */ 287 break; 288 case POWER_OR_RESET: 289 dev_warn(&h->pdev->dev, HPSA "%d: a power on " 290 "or device reset detected\n", h->ctlr); 291 break; 292 case UNIT_ATTENTION_CLEARED: 293 dev_warn(&h->pdev->dev, HPSA "%d: unit attention " 294 "cleared by another initiator\n", h->ctlr); 295 break; 296 default: 297 dev_warn(&h->pdev->dev, HPSA "%d: unknown " 298 "unit attention detected\n", h->ctlr); 299 break; 300 } 301 return 1; 302 } 303 304 static int check_for_busy(struct ctlr_info *h, struct CommandList *c) 305 { 306 if (c->err_info->CommandStatus != CMD_TARGET_STATUS || 307 (c->err_info->ScsiStatus != SAM_STAT_BUSY && 308 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) 309 return 0; 310 dev_warn(&h->pdev->dev, HPSA "device busy"); 311 return 1; 312 } 313 314 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, 315 struct device_attribute *attr, 316 const char *buf, size_t count) 317 { 318 int status, len; 319 struct ctlr_info *h; 320 struct Scsi_Host *shost = class_to_shost(dev); 321 char tmpbuf[10]; 322 323 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 324 return -EACCES; 325 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 326 strncpy(tmpbuf, buf, len); 327 tmpbuf[len] = '\0'; 328 if (sscanf(tmpbuf, "%d", &status) != 1) 329 return -EINVAL; 330 h = shost_to_hba(shost); 331 h->acciopath_status = !!status; 332 dev_warn(&h->pdev->dev, 333 "hpsa: HP SSD Smart Path %s via sysfs update.\n", 334 h->acciopath_status ? "enabled" : "disabled"); 335 return count; 336 } 337 338 static ssize_t host_store_raid_offload_debug(struct device *dev, 339 struct device_attribute *attr, 340 const char *buf, size_t count) 341 { 342 int debug_level, len; 343 struct ctlr_info *h; 344 struct Scsi_Host *shost = class_to_shost(dev); 345 char tmpbuf[10]; 346 347 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 348 return -EACCES; 349 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 350 strncpy(tmpbuf, buf, len); 351 tmpbuf[len] = '\0'; 352 if (sscanf(tmpbuf, "%d", &debug_level) != 1) 353 return -EINVAL; 354 if (debug_level < 0) 355 debug_level = 0; 356 h = shost_to_hba(shost); 357 h->raid_offload_debug = debug_level; 358 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", 359 h->raid_offload_debug); 360 return count; 361 } 362 363 static ssize_t host_store_rescan(struct device *dev, 364 struct device_attribute *attr, 365 const char *buf, size_t count) 366 { 367 struct ctlr_info *h; 368 struct Scsi_Host *shost = class_to_shost(dev); 369 h = shost_to_hba(shost); 370 hpsa_scan_start(h->scsi_host); 371 return count; 372 } 373 374 static ssize_t host_show_firmware_revision(struct device *dev, 375 struct device_attribute *attr, char *buf) 376 { 377 struct ctlr_info *h; 378 struct Scsi_Host *shost = class_to_shost(dev); 379 unsigned char *fwrev; 380 381 h = shost_to_hba(shost); 382 if (!h->hba_inquiry_data) 383 return 0; 384 fwrev = &h->hba_inquiry_data[32]; 385 return snprintf(buf, 20, "%c%c%c%c\n", 386 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 387 } 388 389 static ssize_t host_show_commands_outstanding(struct device *dev, 390 struct device_attribute *attr, char *buf) 391 { 392 struct Scsi_Host *shost = class_to_shost(dev); 393 struct ctlr_info *h = shost_to_hba(shost); 394 395 return snprintf(buf, 20, "%d\n", 396 atomic_read(&h->commands_outstanding)); 397 } 398 399 static ssize_t host_show_transport_mode(struct device *dev, 400 struct device_attribute *attr, char *buf) 401 { 402 struct ctlr_info *h; 403 struct Scsi_Host *shost = class_to_shost(dev); 404 405 h = shost_to_hba(shost); 406 return snprintf(buf, 20, "%s\n", 407 h->transMethod & CFGTBL_Trans_Performant ? 408 "performant" : "simple"); 409 } 410 411 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, 412 struct device_attribute *attr, char *buf) 413 { 414 struct ctlr_info *h; 415 struct Scsi_Host *shost = class_to_shost(dev); 416 417 h = shost_to_hba(shost); 418 return snprintf(buf, 30, "HP SSD Smart Path %s\n", 419 (h->acciopath_status == 1) ? "enabled" : "disabled"); 420 } 421 422 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 423 static u32 unresettable_controller[] = { 424 0x324a103C, /* Smart Array P712m */ 425 0x324b103C, /* SmartArray P711m */ 426 0x3223103C, /* Smart Array P800 */ 427 0x3234103C, /* Smart Array P400 */ 428 0x3235103C, /* Smart Array P400i */ 429 0x3211103C, /* Smart Array E200i */ 430 0x3212103C, /* Smart Array E200 */ 431 0x3213103C, /* Smart Array E200i */ 432 0x3214103C, /* Smart Array E200i */ 433 0x3215103C, /* Smart Array E200i */ 434 0x3237103C, /* Smart Array E500 */ 435 0x323D103C, /* Smart Array P700m */ 436 0x40800E11, /* Smart Array 5i */ 437 0x409C0E11, /* Smart Array 6400 */ 438 0x409D0E11, /* Smart Array 6400 EM */ 439 0x40700E11, /* Smart Array 5300 */ 440 0x40820E11, /* Smart Array 532 */ 441 0x40830E11, /* Smart Array 5312 */ 442 0x409A0E11, /* Smart Array 641 */ 443 0x409B0E11, /* Smart Array 642 */ 444 0x40910E11, /* Smart Array 6i */ 445 }; 446 447 /* List of controllers which cannot even be soft reset */ 448 static u32 soft_unresettable_controller[] = { 449 0x40800E11, /* Smart Array 5i */ 450 0x40700E11, /* Smart Array 5300 */ 451 0x40820E11, /* Smart Array 532 */ 452 0x40830E11, /* Smart Array 5312 */ 453 0x409A0E11, /* Smart Array 641 */ 454 0x409B0E11, /* Smart Array 642 */ 455 0x40910E11, /* Smart Array 6i */ 456 /* Exclude 640x boards. These are two pci devices in one slot 457 * which share a battery backed cache module. One controls the 458 * cache, the other accesses the cache through the one that controls 459 * it. If we reset the one controlling the cache, the other will 460 * likely not be happy. Just forbid resetting this conjoined mess. 461 * The 640x isn't really supported by hpsa anyway. 462 */ 463 0x409C0E11, /* Smart Array 6400 */ 464 0x409D0E11, /* Smart Array 6400 EM */ 465 }; 466 467 static int ctlr_is_hard_resettable(u32 board_id) 468 { 469 int i; 470 471 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 472 if (unresettable_controller[i] == board_id) 473 return 0; 474 return 1; 475 } 476 477 static int ctlr_is_soft_resettable(u32 board_id) 478 { 479 int i; 480 481 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 482 if (soft_unresettable_controller[i] == board_id) 483 return 0; 484 return 1; 485 } 486 487 static int ctlr_is_resettable(u32 board_id) 488 { 489 return ctlr_is_hard_resettable(board_id) || 490 ctlr_is_soft_resettable(board_id); 491 } 492 493 static ssize_t host_show_resettable(struct device *dev, 494 struct device_attribute *attr, char *buf) 495 { 496 struct ctlr_info *h; 497 struct Scsi_Host *shost = class_to_shost(dev); 498 499 h = shost_to_hba(shost); 500 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 501 } 502 503 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 504 { 505 return (scsi3addr[3] & 0xC0) == 0x40; 506 } 507 508 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", 509 "1(+0)ADM", "UNKNOWN" 510 }; 511 #define HPSA_RAID_0 0 512 #define HPSA_RAID_4 1 513 #define HPSA_RAID_1 2 /* also used for RAID 10 */ 514 #define HPSA_RAID_5 3 /* also used for RAID 50 */ 515 #define HPSA_RAID_51 4 516 #define HPSA_RAID_6 5 /* also used for RAID 60 */ 517 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 518 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 519 520 static ssize_t raid_level_show(struct device *dev, 521 struct device_attribute *attr, char *buf) 522 { 523 ssize_t l = 0; 524 unsigned char rlevel; 525 struct ctlr_info *h; 526 struct scsi_device *sdev; 527 struct hpsa_scsi_dev_t *hdev; 528 unsigned long flags; 529 530 sdev = to_scsi_device(dev); 531 h = sdev_to_hba(sdev); 532 spin_lock_irqsave(&h->lock, flags); 533 hdev = sdev->hostdata; 534 if (!hdev) { 535 spin_unlock_irqrestore(&h->lock, flags); 536 return -ENODEV; 537 } 538 539 /* Is this even a logical drive? */ 540 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 541 spin_unlock_irqrestore(&h->lock, flags); 542 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 543 return l; 544 } 545 546 rlevel = hdev->raid_level; 547 spin_unlock_irqrestore(&h->lock, flags); 548 if (rlevel > RAID_UNKNOWN) 549 rlevel = RAID_UNKNOWN; 550 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 551 return l; 552 } 553 554 static ssize_t lunid_show(struct device *dev, 555 struct device_attribute *attr, char *buf) 556 { 557 struct ctlr_info *h; 558 struct scsi_device *sdev; 559 struct hpsa_scsi_dev_t *hdev; 560 unsigned long flags; 561 unsigned char lunid[8]; 562 563 sdev = to_scsi_device(dev); 564 h = sdev_to_hba(sdev); 565 spin_lock_irqsave(&h->lock, flags); 566 hdev = sdev->hostdata; 567 if (!hdev) { 568 spin_unlock_irqrestore(&h->lock, flags); 569 return -ENODEV; 570 } 571 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 572 spin_unlock_irqrestore(&h->lock, flags); 573 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 574 lunid[0], lunid[1], lunid[2], lunid[3], 575 lunid[4], lunid[5], lunid[6], lunid[7]); 576 } 577 578 static ssize_t unique_id_show(struct device *dev, 579 struct device_attribute *attr, char *buf) 580 { 581 struct ctlr_info *h; 582 struct scsi_device *sdev; 583 struct hpsa_scsi_dev_t *hdev; 584 unsigned long flags; 585 unsigned char sn[16]; 586 587 sdev = to_scsi_device(dev); 588 h = sdev_to_hba(sdev); 589 spin_lock_irqsave(&h->lock, flags); 590 hdev = sdev->hostdata; 591 if (!hdev) { 592 spin_unlock_irqrestore(&h->lock, flags); 593 return -ENODEV; 594 } 595 memcpy(sn, hdev->device_id, sizeof(sn)); 596 spin_unlock_irqrestore(&h->lock, flags); 597 return snprintf(buf, 16 * 2 + 2, 598 "%02X%02X%02X%02X%02X%02X%02X%02X" 599 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 600 sn[0], sn[1], sn[2], sn[3], 601 sn[4], sn[5], sn[6], sn[7], 602 sn[8], sn[9], sn[10], sn[11], 603 sn[12], sn[13], sn[14], sn[15]); 604 } 605 606 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, 607 struct device_attribute *attr, char *buf) 608 { 609 struct ctlr_info *h; 610 struct scsi_device *sdev; 611 struct hpsa_scsi_dev_t *hdev; 612 unsigned long flags; 613 int offload_enabled; 614 615 sdev = to_scsi_device(dev); 616 h = sdev_to_hba(sdev); 617 spin_lock_irqsave(&h->lock, flags); 618 hdev = sdev->hostdata; 619 if (!hdev) { 620 spin_unlock_irqrestore(&h->lock, flags); 621 return -ENODEV; 622 } 623 offload_enabled = hdev->offload_enabled; 624 spin_unlock_irqrestore(&h->lock, flags); 625 return snprintf(buf, 20, "%d\n", offload_enabled); 626 } 627 628 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 629 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 630 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 631 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 632 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, 633 host_show_hp_ssd_smart_path_enabled, NULL); 634 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, 635 host_show_hp_ssd_smart_path_status, 636 host_store_hp_ssd_smart_path_status); 637 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, 638 host_store_raid_offload_debug); 639 static DEVICE_ATTR(firmware_revision, S_IRUGO, 640 host_show_firmware_revision, NULL); 641 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 642 host_show_commands_outstanding, NULL); 643 static DEVICE_ATTR(transport_mode, S_IRUGO, 644 host_show_transport_mode, NULL); 645 static DEVICE_ATTR(resettable, S_IRUGO, 646 host_show_resettable, NULL); 647 648 static struct device_attribute *hpsa_sdev_attrs[] = { 649 &dev_attr_raid_level, 650 &dev_attr_lunid, 651 &dev_attr_unique_id, 652 &dev_attr_hp_ssd_smart_path_enabled, 653 NULL, 654 }; 655 656 static struct device_attribute *hpsa_shost_attrs[] = { 657 &dev_attr_rescan, 658 &dev_attr_firmware_revision, 659 &dev_attr_commands_outstanding, 660 &dev_attr_transport_mode, 661 &dev_attr_resettable, 662 &dev_attr_hp_ssd_smart_path_status, 663 &dev_attr_raid_offload_debug, 664 NULL, 665 }; 666 667 static struct scsi_host_template hpsa_driver_template = { 668 .module = THIS_MODULE, 669 .name = HPSA, 670 .proc_name = HPSA, 671 .queuecommand = hpsa_scsi_queue_command, 672 .scan_start = hpsa_scan_start, 673 .scan_finished = hpsa_scan_finished, 674 .change_queue_depth = hpsa_change_queue_depth, 675 .this_id = -1, 676 .use_clustering = ENABLE_CLUSTERING, 677 .eh_abort_handler = hpsa_eh_abort_handler, 678 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 679 .ioctl = hpsa_ioctl, 680 .slave_alloc = hpsa_slave_alloc, 681 .slave_destroy = hpsa_slave_destroy, 682 #ifdef CONFIG_COMPAT 683 .compat_ioctl = hpsa_compat_ioctl, 684 #endif 685 .sdev_attrs = hpsa_sdev_attrs, 686 .shost_attrs = hpsa_shost_attrs, 687 .max_sectors = 8192, 688 .no_write_same = 1, 689 }; 690 691 static inline u32 next_command(struct ctlr_info *h, u8 q) 692 { 693 u32 a; 694 struct reply_queue_buffer *rq = &h->reply_queue[q]; 695 696 if (h->transMethod & CFGTBL_Trans_io_accel1) 697 return h->access.command_completed(h, q); 698 699 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 700 return h->access.command_completed(h, q); 701 702 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 703 a = rq->head[rq->current_entry]; 704 rq->current_entry++; 705 atomic_dec(&h->commands_outstanding); 706 } else { 707 a = FIFO_EMPTY; 708 } 709 /* Check for wraparound */ 710 if (rq->current_entry == h->max_commands) { 711 rq->current_entry = 0; 712 rq->wraparound ^= 1; 713 } 714 return a; 715 } 716 717 /* 718 * There are some special bits in the bus address of the 719 * command that we have to set for the controller to know 720 * how to process the command: 721 * 722 * Normal performant mode: 723 * bit 0: 1 means performant mode, 0 means simple mode. 724 * bits 1-3 = block fetch table entry 725 * bits 4-6 = command type (== 0) 726 * 727 * ioaccel1 mode: 728 * bit 0 = "performant mode" bit. 729 * bits 1-3 = block fetch table entry 730 * bits 4-6 = command type (== 110) 731 * (command type is needed because ioaccel1 mode 732 * commands are submitted through the same register as normal 733 * mode commands, so this is how the controller knows whether 734 * the command is normal mode or ioaccel1 mode.) 735 * 736 * ioaccel2 mode: 737 * bit 0 = "performant mode" bit. 738 * bits 1-4 = block fetch table entry (note extra bit) 739 * bits 4-6 = not needed, because ioaccel2 mode has 740 * a separate special register for submitting commands. 741 */ 742 743 /* set_performant_mode: Modify the tag for cciss performant 744 * set bit 0 for pull model, bits 3-1 for block fetch 745 * register number 746 */ 747 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 748 { 749 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 750 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 751 if (likely(h->msix_vector > 0)) 752 c->Header.ReplyQueue = 753 raw_smp_processor_id() % h->nreply_queues; 754 } 755 } 756 757 static void set_ioaccel1_performant_mode(struct ctlr_info *h, 758 struct CommandList *c) 759 { 760 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 761 762 /* Tell the controller to post the reply to the queue for this 763 * processor. This seems to give the best I/O throughput. 764 */ 765 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; 766 /* Set the bits in the address sent down to include: 767 * - performant mode bit (bit 0) 768 * - pull count (bits 1-3) 769 * - command type (bits 4-6) 770 */ 771 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | 772 IOACCEL1_BUSADDR_CMDTYPE; 773 } 774 775 static void set_ioaccel2_performant_mode(struct ctlr_info *h, 776 struct CommandList *c) 777 { 778 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 779 780 /* Tell the controller to post the reply to the queue for this 781 * processor. This seems to give the best I/O throughput. 782 */ 783 cp->reply_queue = smp_processor_id() % h->nreply_queues; 784 /* Set the bits in the address sent down to include: 785 * - performant mode bit not used in ioaccel mode 2 786 * - pull count (bits 0-3) 787 * - command type isn't needed for ioaccel2 788 */ 789 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); 790 } 791 792 static int is_firmware_flash_cmd(u8 *cdb) 793 { 794 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; 795 } 796 797 /* 798 * During firmware flash, the heartbeat register may not update as frequently 799 * as it should. So we dial down lockup detection during firmware flash. and 800 * dial it back up when firmware flash completes. 801 */ 802 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 803 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) 804 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 805 struct CommandList *c) 806 { 807 if (!is_firmware_flash_cmd(c->Request.CDB)) 808 return; 809 atomic_inc(&h->firmware_flash_in_progress); 810 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; 811 } 812 813 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, 814 struct CommandList *c) 815 { 816 if (is_firmware_flash_cmd(c->Request.CDB) && 817 atomic_dec_and_test(&h->firmware_flash_in_progress)) 818 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 819 } 820 821 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 822 struct CommandList *c) 823 { 824 switch (c->cmd_type) { 825 case CMD_IOACCEL1: 826 set_ioaccel1_performant_mode(h, c); 827 break; 828 case CMD_IOACCEL2: 829 set_ioaccel2_performant_mode(h, c); 830 break; 831 default: 832 set_performant_mode(h, c); 833 } 834 dial_down_lockup_detection_during_fw_flash(h, c); 835 atomic_inc(&h->commands_outstanding); 836 h->access.submit_command(h, c); 837 } 838 839 static inline int is_hba_lunid(unsigned char scsi3addr[]) 840 { 841 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 842 } 843 844 static inline int is_scsi_rev_5(struct ctlr_info *h) 845 { 846 if (!h->hba_inquiry_data) 847 return 0; 848 if ((h->hba_inquiry_data[2] & 0x07) == 5) 849 return 1; 850 return 0; 851 } 852 853 static int hpsa_find_target_lun(struct ctlr_info *h, 854 unsigned char scsi3addr[], int bus, int *target, int *lun) 855 { 856 /* finds an unused bus, target, lun for a new physical device 857 * assumes h->devlock is held 858 */ 859 int i, found = 0; 860 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 861 862 bitmap_zero(lun_taken, HPSA_MAX_DEVICES); 863 864 for (i = 0; i < h->ndevices; i++) { 865 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 866 __set_bit(h->dev[i]->target, lun_taken); 867 } 868 869 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); 870 if (i < HPSA_MAX_DEVICES) { 871 /* *bus = 1; */ 872 *target = i; 873 *lun = 0; 874 found = 1; 875 } 876 return !found; 877 } 878 879 /* Add an entry into h->dev[] array. */ 880 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 881 struct hpsa_scsi_dev_t *device, 882 struct hpsa_scsi_dev_t *added[], int *nadded) 883 { 884 /* assumes h->devlock is held */ 885 int n = h->ndevices; 886 int i; 887 unsigned char addr1[8], addr2[8]; 888 struct hpsa_scsi_dev_t *sd; 889 890 if (n >= HPSA_MAX_DEVICES) { 891 dev_err(&h->pdev->dev, "too many devices, some will be " 892 "inaccessible.\n"); 893 return -1; 894 } 895 896 /* physical devices do not have lun or target assigned until now. */ 897 if (device->lun != -1) 898 /* Logical device, lun is already assigned. */ 899 goto lun_assigned; 900 901 /* If this device a non-zero lun of a multi-lun device 902 * byte 4 of the 8-byte LUN addr will contain the logical 903 * unit no, zero otherwise. 904 */ 905 if (device->scsi3addr[4] == 0) { 906 /* This is not a non-zero lun of a multi-lun device */ 907 if (hpsa_find_target_lun(h, device->scsi3addr, 908 device->bus, &device->target, &device->lun) != 0) 909 return -1; 910 goto lun_assigned; 911 } 912 913 /* This is a non-zero lun of a multi-lun device. 914 * Search through our list and find the device which 915 * has the same 8 byte LUN address, excepting byte 4. 916 * Assign the same bus and target for this new LUN. 917 * Use the logical unit number from the firmware. 918 */ 919 memcpy(addr1, device->scsi3addr, 8); 920 addr1[4] = 0; 921 for (i = 0; i < n; i++) { 922 sd = h->dev[i]; 923 memcpy(addr2, sd->scsi3addr, 8); 924 addr2[4] = 0; 925 /* differ only in byte 4? */ 926 if (memcmp(addr1, addr2, 8) == 0) { 927 device->bus = sd->bus; 928 device->target = sd->target; 929 device->lun = device->scsi3addr[4]; 930 break; 931 } 932 } 933 if (device->lun == -1) { 934 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 935 " suspect firmware bug or unsupported hardware " 936 "configuration.\n"); 937 return -1; 938 } 939 940 lun_assigned: 941 942 h->dev[n] = device; 943 h->ndevices++; 944 added[*nadded] = device; 945 (*nadded)++; 946 947 /* initially, (before registering with scsi layer) we don't 948 * know our hostno and we don't want to print anything first 949 * time anyway (the scsi layer's inquiries will show that info) 950 */ 951 /* if (hostno != -1) */ 952 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 953 scsi_device_type(device->devtype), hostno, 954 device->bus, device->target, device->lun); 955 return 0; 956 } 957 958 /* Update an entry in h->dev[] array. */ 959 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, 960 int entry, struct hpsa_scsi_dev_t *new_entry) 961 { 962 /* assumes h->devlock is held */ 963 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 964 965 /* Raid level changed. */ 966 h->dev[entry]->raid_level = new_entry->raid_level; 967 968 /* Raid offload parameters changed. Careful about the ordering. */ 969 if (new_entry->offload_config && new_entry->offload_enabled) { 970 /* 971 * if drive is newly offload_enabled, we want to copy the 972 * raid map data first. If previously offload_enabled and 973 * offload_config were set, raid map data had better be 974 * the same as it was before. if raid map data is changed 975 * then it had better be the case that 976 * h->dev[entry]->offload_enabled is currently 0. 977 */ 978 h->dev[entry]->raid_map = new_entry->raid_map; 979 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 980 wmb(); /* ensure raid map updated prior to ->offload_enabled */ 981 } 982 h->dev[entry]->offload_config = new_entry->offload_config; 983 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; 984 h->dev[entry]->offload_enabled = new_entry->offload_enabled; 985 h->dev[entry]->queue_depth = new_entry->queue_depth; 986 987 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", 988 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 989 new_entry->target, new_entry->lun); 990 } 991 992 /* Replace an entry from h->dev[] array. */ 993 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 994 int entry, struct hpsa_scsi_dev_t *new_entry, 995 struct hpsa_scsi_dev_t *added[], int *nadded, 996 struct hpsa_scsi_dev_t *removed[], int *nremoved) 997 { 998 /* assumes h->devlock is held */ 999 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1000 removed[*nremoved] = h->dev[entry]; 1001 (*nremoved)++; 1002 1003 /* 1004 * New physical devices won't have target/lun assigned yet 1005 * so we need to preserve the values in the slot we are replacing. 1006 */ 1007 if (new_entry->target == -1) { 1008 new_entry->target = h->dev[entry]->target; 1009 new_entry->lun = h->dev[entry]->lun; 1010 } 1011 1012 h->dev[entry] = new_entry; 1013 added[*nadded] = new_entry; 1014 (*nadded)++; 1015 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 1016 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 1017 new_entry->target, new_entry->lun); 1018 } 1019 1020 /* Remove an entry from h->dev[] array. */ 1021 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 1022 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1023 { 1024 /* assumes h->devlock is held */ 1025 int i; 1026 struct hpsa_scsi_dev_t *sd; 1027 1028 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1029 1030 sd = h->dev[entry]; 1031 removed[*nremoved] = h->dev[entry]; 1032 (*nremoved)++; 1033 1034 for (i = entry; i < h->ndevices-1; i++) 1035 h->dev[i] = h->dev[i+1]; 1036 h->ndevices--; 1037 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 1038 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 1039 sd->lun); 1040 } 1041 1042 #define SCSI3ADDR_EQ(a, b) ( \ 1043 (a)[7] == (b)[7] && \ 1044 (a)[6] == (b)[6] && \ 1045 (a)[5] == (b)[5] && \ 1046 (a)[4] == (b)[4] && \ 1047 (a)[3] == (b)[3] && \ 1048 (a)[2] == (b)[2] && \ 1049 (a)[1] == (b)[1] && \ 1050 (a)[0] == (b)[0]) 1051 1052 static void fixup_botched_add(struct ctlr_info *h, 1053 struct hpsa_scsi_dev_t *added) 1054 { 1055 /* called when scsi_add_device fails in order to re-adjust 1056 * h->dev[] to match the mid layer's view. 1057 */ 1058 unsigned long flags; 1059 int i, j; 1060 1061 spin_lock_irqsave(&h->lock, flags); 1062 for (i = 0; i < h->ndevices; i++) { 1063 if (h->dev[i] == added) { 1064 for (j = i; j < h->ndevices-1; j++) 1065 h->dev[j] = h->dev[j+1]; 1066 h->ndevices--; 1067 break; 1068 } 1069 } 1070 spin_unlock_irqrestore(&h->lock, flags); 1071 kfree(added); 1072 } 1073 1074 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 1075 struct hpsa_scsi_dev_t *dev2) 1076 { 1077 /* we compare everything except lun and target as these 1078 * are not yet assigned. Compare parts likely 1079 * to differ first 1080 */ 1081 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 1082 sizeof(dev1->scsi3addr)) != 0) 1083 return 0; 1084 if (memcmp(dev1->device_id, dev2->device_id, 1085 sizeof(dev1->device_id)) != 0) 1086 return 0; 1087 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 1088 return 0; 1089 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 1090 return 0; 1091 if (dev1->devtype != dev2->devtype) 1092 return 0; 1093 if (dev1->bus != dev2->bus) 1094 return 0; 1095 return 1; 1096 } 1097 1098 static inline int device_updated(struct hpsa_scsi_dev_t *dev1, 1099 struct hpsa_scsi_dev_t *dev2) 1100 { 1101 /* Device attributes that can change, but don't mean 1102 * that the device is a different device, nor that the OS 1103 * needs to be told anything about the change. 1104 */ 1105 if (dev1->raid_level != dev2->raid_level) 1106 return 1; 1107 if (dev1->offload_config != dev2->offload_config) 1108 return 1; 1109 if (dev1->offload_enabled != dev2->offload_enabled) 1110 return 1; 1111 if (dev1->queue_depth != dev2->queue_depth) 1112 return 1; 1113 return 0; 1114 } 1115 1116 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 1117 * and return needle location in *index. If scsi3addr matches, but not 1118 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 1119 * location in *index. 1120 * In the case of a minor device attribute change, such as RAID level, just 1121 * return DEVICE_UPDATED, along with the updated device's location in index. 1122 * If needle not found, return DEVICE_NOT_FOUND. 1123 */ 1124 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 1125 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 1126 int *index) 1127 { 1128 int i; 1129 #define DEVICE_NOT_FOUND 0 1130 #define DEVICE_CHANGED 1 1131 #define DEVICE_SAME 2 1132 #define DEVICE_UPDATED 3 1133 for (i = 0; i < haystack_size; i++) { 1134 if (haystack[i] == NULL) /* previously removed. */ 1135 continue; 1136 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 1137 *index = i; 1138 if (device_is_the_same(needle, haystack[i])) { 1139 if (device_updated(needle, haystack[i])) 1140 return DEVICE_UPDATED; 1141 return DEVICE_SAME; 1142 } else { 1143 /* Keep offline devices offline */ 1144 if (needle->volume_offline) 1145 return DEVICE_NOT_FOUND; 1146 return DEVICE_CHANGED; 1147 } 1148 } 1149 } 1150 *index = -1; 1151 return DEVICE_NOT_FOUND; 1152 } 1153 1154 static void hpsa_monitor_offline_device(struct ctlr_info *h, 1155 unsigned char scsi3addr[]) 1156 { 1157 struct offline_device_entry *device; 1158 unsigned long flags; 1159 1160 /* Check to see if device is already on the list */ 1161 spin_lock_irqsave(&h->offline_device_lock, flags); 1162 list_for_each_entry(device, &h->offline_device_list, offline_list) { 1163 if (memcmp(device->scsi3addr, scsi3addr, 1164 sizeof(device->scsi3addr)) == 0) { 1165 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1166 return; 1167 } 1168 } 1169 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1170 1171 /* Device is not on the list, add it. */ 1172 device = kmalloc(sizeof(*device), GFP_KERNEL); 1173 if (!device) { 1174 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); 1175 return; 1176 } 1177 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1178 spin_lock_irqsave(&h->offline_device_lock, flags); 1179 list_add_tail(&device->offline_list, &h->offline_device_list); 1180 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1181 } 1182 1183 /* Print a message explaining various offline volume states */ 1184 static void hpsa_show_volume_status(struct ctlr_info *h, 1185 struct hpsa_scsi_dev_t *sd) 1186 { 1187 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) 1188 dev_info(&h->pdev->dev, 1189 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", 1190 h->scsi_host->host_no, 1191 sd->bus, sd->target, sd->lun); 1192 switch (sd->volume_offline) { 1193 case HPSA_LV_OK: 1194 break; 1195 case HPSA_LV_UNDERGOING_ERASE: 1196 dev_info(&h->pdev->dev, 1197 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", 1198 h->scsi_host->host_no, 1199 sd->bus, sd->target, sd->lun); 1200 break; 1201 case HPSA_LV_UNDERGOING_RPI: 1202 dev_info(&h->pdev->dev, 1203 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n", 1204 h->scsi_host->host_no, 1205 sd->bus, sd->target, sd->lun); 1206 break; 1207 case HPSA_LV_PENDING_RPI: 1208 dev_info(&h->pdev->dev, 1209 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", 1210 h->scsi_host->host_no, 1211 sd->bus, sd->target, sd->lun); 1212 break; 1213 case HPSA_LV_ENCRYPTED_NO_KEY: 1214 dev_info(&h->pdev->dev, 1215 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", 1216 h->scsi_host->host_no, 1217 sd->bus, sd->target, sd->lun); 1218 break; 1219 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 1220 dev_info(&h->pdev->dev, 1221 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", 1222 h->scsi_host->host_no, 1223 sd->bus, sd->target, sd->lun); 1224 break; 1225 case HPSA_LV_UNDERGOING_ENCRYPTION: 1226 dev_info(&h->pdev->dev, 1227 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", 1228 h->scsi_host->host_no, 1229 sd->bus, sd->target, sd->lun); 1230 break; 1231 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 1232 dev_info(&h->pdev->dev, 1233 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", 1234 h->scsi_host->host_no, 1235 sd->bus, sd->target, sd->lun); 1236 break; 1237 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1238 dev_info(&h->pdev->dev, 1239 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", 1240 h->scsi_host->host_no, 1241 sd->bus, sd->target, sd->lun); 1242 break; 1243 case HPSA_LV_PENDING_ENCRYPTION: 1244 dev_info(&h->pdev->dev, 1245 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", 1246 h->scsi_host->host_no, 1247 sd->bus, sd->target, sd->lun); 1248 break; 1249 case HPSA_LV_PENDING_ENCRYPTION_REKEYING: 1250 dev_info(&h->pdev->dev, 1251 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", 1252 h->scsi_host->host_no, 1253 sd->bus, sd->target, sd->lun); 1254 break; 1255 } 1256 } 1257 1258 /* 1259 * Figure the list of physical drive pointers for a logical drive with 1260 * raid offload configured. 1261 */ 1262 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, 1263 struct hpsa_scsi_dev_t *dev[], int ndevices, 1264 struct hpsa_scsi_dev_t *logical_drive) 1265 { 1266 struct raid_map_data *map = &logical_drive->raid_map; 1267 struct raid_map_disk_data *dd = &map->data[0]; 1268 int i, j; 1269 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + 1270 le16_to_cpu(map->metadata_disks_per_row); 1271 int nraid_map_entries = le16_to_cpu(map->row_cnt) * 1272 le16_to_cpu(map->layout_map_count) * 1273 total_disks_per_row; 1274 int nphys_disk = le16_to_cpu(map->layout_map_count) * 1275 total_disks_per_row; 1276 int qdepth; 1277 1278 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) 1279 nraid_map_entries = RAID_MAP_MAX_ENTRIES; 1280 1281 qdepth = 0; 1282 for (i = 0; i < nraid_map_entries; i++) { 1283 logical_drive->phys_disk[i] = NULL; 1284 if (!logical_drive->offload_config) 1285 continue; 1286 for (j = 0; j < ndevices; j++) { 1287 if (dev[j]->devtype != TYPE_DISK) 1288 continue; 1289 if (is_logical_dev_addr_mode(dev[j]->scsi3addr)) 1290 continue; 1291 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) 1292 continue; 1293 1294 logical_drive->phys_disk[i] = dev[j]; 1295 if (i < nphys_disk) 1296 qdepth = min(h->nr_cmds, qdepth + 1297 logical_drive->phys_disk[i]->queue_depth); 1298 break; 1299 } 1300 1301 /* 1302 * This can happen if a physical drive is removed and 1303 * the logical drive is degraded. In that case, the RAID 1304 * map data will refer to a physical disk which isn't actually 1305 * present. And in that case offload_enabled should already 1306 * be 0, but we'll turn it off here just in case 1307 */ 1308 if (!logical_drive->phys_disk[i]) { 1309 logical_drive->offload_enabled = 0; 1310 logical_drive->queue_depth = h->nr_cmds; 1311 } 1312 } 1313 if (nraid_map_entries) 1314 /* 1315 * This is correct for reads, too high for full stripe writes, 1316 * way too high for partial stripe writes 1317 */ 1318 logical_drive->queue_depth = qdepth; 1319 else 1320 logical_drive->queue_depth = h->nr_cmds; 1321 } 1322 1323 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, 1324 struct hpsa_scsi_dev_t *dev[], int ndevices) 1325 { 1326 int i; 1327 1328 for (i = 0; i < ndevices; i++) { 1329 if (dev[i]->devtype != TYPE_DISK) 1330 continue; 1331 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr)) 1332 continue; 1333 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); 1334 } 1335 } 1336 1337 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 1338 struct hpsa_scsi_dev_t *sd[], int nsds) 1339 { 1340 /* sd contains scsi3 addresses and devtypes, and inquiry 1341 * data. This function takes what's in sd to be the current 1342 * reality and updates h->dev[] to reflect that reality. 1343 */ 1344 int i, entry, device_change, changes = 0; 1345 struct hpsa_scsi_dev_t *csd; 1346 unsigned long flags; 1347 struct hpsa_scsi_dev_t **added, **removed; 1348 int nadded, nremoved; 1349 struct Scsi_Host *sh = NULL; 1350 1351 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 1352 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 1353 1354 if (!added || !removed) { 1355 dev_warn(&h->pdev->dev, "out of memory in " 1356 "adjust_hpsa_scsi_table\n"); 1357 goto free_and_out; 1358 } 1359 1360 spin_lock_irqsave(&h->devlock, flags); 1361 1362 /* find any devices in h->dev[] that are not in 1363 * sd[] and remove them from h->dev[], and for any 1364 * devices which have changed, remove the old device 1365 * info and add the new device info. 1366 * If minor device attributes change, just update 1367 * the existing device structure. 1368 */ 1369 i = 0; 1370 nremoved = 0; 1371 nadded = 0; 1372 while (i < h->ndevices) { 1373 csd = h->dev[i]; 1374 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 1375 if (device_change == DEVICE_NOT_FOUND) { 1376 changes++; 1377 hpsa_scsi_remove_entry(h, hostno, i, 1378 removed, &nremoved); 1379 continue; /* remove ^^^, hence i not incremented */ 1380 } else if (device_change == DEVICE_CHANGED) { 1381 changes++; 1382 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 1383 added, &nadded, removed, &nremoved); 1384 /* Set it to NULL to prevent it from being freed 1385 * at the bottom of hpsa_update_scsi_devices() 1386 */ 1387 sd[entry] = NULL; 1388 } else if (device_change == DEVICE_UPDATED) { 1389 hpsa_scsi_update_entry(h, hostno, i, sd[entry]); 1390 } 1391 i++; 1392 } 1393 1394 /* Now, make sure every device listed in sd[] is also 1395 * listed in h->dev[], adding them if they aren't found 1396 */ 1397 1398 for (i = 0; i < nsds; i++) { 1399 if (!sd[i]) /* if already added above. */ 1400 continue; 1401 1402 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS 1403 * as the SCSI mid-layer does not handle such devices well. 1404 * It relentlessly loops sending TUR at 3Hz, then READ(10) 1405 * at 160Hz, and prevents the system from coming up. 1406 */ 1407 if (sd[i]->volume_offline) { 1408 hpsa_show_volume_status(h, sd[i]); 1409 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", 1410 h->scsi_host->host_no, 1411 sd[i]->bus, sd[i]->target, sd[i]->lun); 1412 continue; 1413 } 1414 1415 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 1416 h->ndevices, &entry); 1417 if (device_change == DEVICE_NOT_FOUND) { 1418 changes++; 1419 if (hpsa_scsi_add_entry(h, hostno, sd[i], 1420 added, &nadded) != 0) 1421 break; 1422 sd[i] = NULL; /* prevent from being freed later. */ 1423 } else if (device_change == DEVICE_CHANGED) { 1424 /* should never happen... */ 1425 changes++; 1426 dev_warn(&h->pdev->dev, 1427 "device unexpectedly changed.\n"); 1428 /* but if it does happen, we just ignore that device */ 1429 } 1430 } 1431 spin_unlock_irqrestore(&h->devlock, flags); 1432 1433 /* Monitor devices which are in one of several NOT READY states to be 1434 * brought online later. This must be done without holding h->devlock, 1435 * so don't touch h->dev[] 1436 */ 1437 for (i = 0; i < nsds; i++) { 1438 if (!sd[i]) /* if already added above. */ 1439 continue; 1440 if (sd[i]->volume_offline) 1441 hpsa_monitor_offline_device(h, sd[i]->scsi3addr); 1442 } 1443 1444 /* Don't notify scsi mid layer of any changes the first time through 1445 * (or if there are no changes) scsi_scan_host will do it later the 1446 * first time through. 1447 */ 1448 if (hostno == -1 || !changes) 1449 goto free_and_out; 1450 1451 sh = h->scsi_host; 1452 /* Notify scsi mid layer of any removed devices */ 1453 for (i = 0; i < nremoved; i++) { 1454 struct scsi_device *sdev = 1455 scsi_device_lookup(sh, removed[i]->bus, 1456 removed[i]->target, removed[i]->lun); 1457 if (sdev != NULL) { 1458 scsi_remove_device(sdev); 1459 scsi_device_put(sdev); 1460 } else { 1461 /* We don't expect to get here. 1462 * future cmds to this device will get selection 1463 * timeout as if the device was gone. 1464 */ 1465 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 1466 " for removal.", hostno, removed[i]->bus, 1467 removed[i]->target, removed[i]->lun); 1468 } 1469 kfree(removed[i]); 1470 removed[i] = NULL; 1471 } 1472 1473 /* Notify scsi mid layer of any added devices */ 1474 for (i = 0; i < nadded; i++) { 1475 if (scsi_add_device(sh, added[i]->bus, 1476 added[i]->target, added[i]->lun) == 0) 1477 continue; 1478 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 1479 "device not added.\n", hostno, added[i]->bus, 1480 added[i]->target, added[i]->lun); 1481 /* now we have to remove it from h->dev, 1482 * since it didn't get added to scsi mid layer 1483 */ 1484 fixup_botched_add(h, added[i]); 1485 } 1486 1487 free_and_out: 1488 kfree(added); 1489 kfree(removed); 1490 } 1491 1492 /* 1493 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * 1494 * Assume's h->devlock is held. 1495 */ 1496 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 1497 int bus, int target, int lun) 1498 { 1499 int i; 1500 struct hpsa_scsi_dev_t *sd; 1501 1502 for (i = 0; i < h->ndevices; i++) { 1503 sd = h->dev[i]; 1504 if (sd->bus == bus && sd->target == target && sd->lun == lun) 1505 return sd; 1506 } 1507 return NULL; 1508 } 1509 1510 /* link sdev->hostdata to our per-device structure. */ 1511 static int hpsa_slave_alloc(struct scsi_device *sdev) 1512 { 1513 struct hpsa_scsi_dev_t *sd; 1514 unsigned long flags; 1515 struct ctlr_info *h; 1516 1517 h = sdev_to_hba(sdev); 1518 spin_lock_irqsave(&h->devlock, flags); 1519 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 1520 sdev_id(sdev), sdev->lun); 1521 if (sd != NULL) { 1522 sdev->hostdata = sd; 1523 if (sd->queue_depth) 1524 scsi_change_queue_depth(sdev, sd->queue_depth); 1525 atomic_set(&sd->ioaccel_cmds_out, 0); 1526 } 1527 spin_unlock_irqrestore(&h->devlock, flags); 1528 return 0; 1529 } 1530 1531 static void hpsa_slave_destroy(struct scsi_device *sdev) 1532 { 1533 /* nothing to do. */ 1534 } 1535 1536 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 1537 { 1538 int i; 1539 1540 if (!h->cmd_sg_list) 1541 return; 1542 for (i = 0; i < h->nr_cmds; i++) { 1543 kfree(h->cmd_sg_list[i]); 1544 h->cmd_sg_list[i] = NULL; 1545 } 1546 kfree(h->cmd_sg_list); 1547 h->cmd_sg_list = NULL; 1548 } 1549 1550 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 1551 { 1552 int i; 1553 1554 if (h->chainsize <= 0) 1555 return 0; 1556 1557 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 1558 GFP_KERNEL); 1559 if (!h->cmd_sg_list) { 1560 dev_err(&h->pdev->dev, "Failed to allocate SG list\n"); 1561 return -ENOMEM; 1562 } 1563 for (i = 0; i < h->nr_cmds; i++) { 1564 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 1565 h->chainsize, GFP_KERNEL); 1566 if (!h->cmd_sg_list[i]) { 1567 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n"); 1568 goto clean; 1569 } 1570 } 1571 return 0; 1572 1573 clean: 1574 hpsa_free_sg_chain_blocks(h); 1575 return -ENOMEM; 1576 } 1577 1578 static int hpsa_map_sg_chain_block(struct ctlr_info *h, 1579 struct CommandList *c) 1580 { 1581 struct SGDescriptor *chain_sg, *chain_block; 1582 u64 temp64; 1583 u32 chain_len; 1584 1585 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1586 chain_block = h->cmd_sg_list[c->cmdindex]; 1587 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); 1588 chain_len = sizeof(*chain_sg) * 1589 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); 1590 chain_sg->Len = cpu_to_le32(chain_len); 1591 temp64 = pci_map_single(h->pdev, chain_block, chain_len, 1592 PCI_DMA_TODEVICE); 1593 if (dma_mapping_error(&h->pdev->dev, temp64)) { 1594 /* prevent subsequent unmapping */ 1595 chain_sg->Addr = cpu_to_le64(0); 1596 return -1; 1597 } 1598 chain_sg->Addr = cpu_to_le64(temp64); 1599 return 0; 1600 } 1601 1602 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 1603 struct CommandList *c) 1604 { 1605 struct SGDescriptor *chain_sg; 1606 1607 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) 1608 return; 1609 1610 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1611 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), 1612 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); 1613 } 1614 1615 1616 /* Decode the various types of errors on ioaccel2 path. 1617 * Return 1 for any error that should generate a RAID path retry. 1618 * Return 0 for errors that don't require a RAID path retry. 1619 */ 1620 static int handle_ioaccel_mode2_error(struct ctlr_info *h, 1621 struct CommandList *c, 1622 struct scsi_cmnd *cmd, 1623 struct io_accel2_cmd *c2) 1624 { 1625 int data_len; 1626 int retry = 0; 1627 1628 switch (c2->error_data.serv_response) { 1629 case IOACCEL2_SERV_RESPONSE_COMPLETE: 1630 switch (c2->error_data.status) { 1631 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: 1632 break; 1633 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: 1634 dev_warn(&h->pdev->dev, 1635 "%s: task complete with check condition.\n", 1636 "HP SSD Smart Path"); 1637 cmd->result |= SAM_STAT_CHECK_CONDITION; 1638 if (c2->error_data.data_present != 1639 IOACCEL2_SENSE_DATA_PRESENT) { 1640 memset(cmd->sense_buffer, 0, 1641 SCSI_SENSE_BUFFERSIZE); 1642 break; 1643 } 1644 /* copy the sense data */ 1645 data_len = c2->error_data.sense_data_len; 1646 if (data_len > SCSI_SENSE_BUFFERSIZE) 1647 data_len = SCSI_SENSE_BUFFERSIZE; 1648 if (data_len > sizeof(c2->error_data.sense_data_buff)) 1649 data_len = 1650 sizeof(c2->error_data.sense_data_buff); 1651 memcpy(cmd->sense_buffer, 1652 c2->error_data.sense_data_buff, data_len); 1653 retry = 1; 1654 break; 1655 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 1656 dev_warn(&h->pdev->dev, 1657 "%s: task complete with BUSY status.\n", 1658 "HP SSD Smart Path"); 1659 retry = 1; 1660 break; 1661 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: 1662 dev_warn(&h->pdev->dev, 1663 "%s: task complete with reservation conflict.\n", 1664 "HP SSD Smart Path"); 1665 retry = 1; 1666 break; 1667 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: 1668 /* Make scsi midlayer do unlimited retries */ 1669 cmd->result = DID_IMM_RETRY << 16; 1670 break; 1671 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: 1672 dev_warn(&h->pdev->dev, 1673 "%s: task complete with aborted status.\n", 1674 "HP SSD Smart Path"); 1675 retry = 1; 1676 break; 1677 default: 1678 dev_warn(&h->pdev->dev, 1679 "%s: task complete with unrecognized status: 0x%02x\n", 1680 "HP SSD Smart Path", c2->error_data.status); 1681 retry = 1; 1682 break; 1683 } 1684 break; 1685 case IOACCEL2_SERV_RESPONSE_FAILURE: 1686 /* don't expect to get here. */ 1687 dev_warn(&h->pdev->dev, 1688 "unexpected delivery or target failure, status = 0x%02x\n", 1689 c2->error_data.status); 1690 retry = 1; 1691 break; 1692 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 1693 break; 1694 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 1695 break; 1696 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 1697 dev_warn(&h->pdev->dev, "task management function rejected.\n"); 1698 retry = 1; 1699 break; 1700 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 1701 dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); 1702 break; 1703 default: 1704 dev_warn(&h->pdev->dev, 1705 "%s: Unrecognized server response: 0x%02x\n", 1706 "HP SSD Smart Path", 1707 c2->error_data.serv_response); 1708 retry = 1; 1709 break; 1710 } 1711 1712 return retry; /* retry on raid path? */ 1713 } 1714 1715 static void process_ioaccel2_completion(struct ctlr_info *h, 1716 struct CommandList *c, struct scsi_cmnd *cmd, 1717 struct hpsa_scsi_dev_t *dev) 1718 { 1719 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 1720 1721 /* check for good status */ 1722 if (likely(c2->error_data.serv_response == 0 && 1723 c2->error_data.status == 0)) { 1724 cmd_free(h, c); 1725 cmd->scsi_done(cmd); 1726 return; 1727 } 1728 1729 /* Any RAID offload error results in retry which will use 1730 * the normal I/O path so the controller can handle whatever's 1731 * wrong. 1732 */ 1733 if (is_logical_dev_addr_mode(dev->scsi3addr) && 1734 c2->error_data.serv_response == 1735 IOACCEL2_SERV_RESPONSE_FAILURE) { 1736 if (c2->error_data.status == 1737 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) 1738 dev->offload_enabled = 0; 1739 goto retry_cmd; 1740 } 1741 1742 if (handle_ioaccel_mode2_error(h, c, cmd, c2)) 1743 goto retry_cmd; 1744 1745 cmd_free(h, c); 1746 cmd->scsi_done(cmd); 1747 return; 1748 1749 retry_cmd: 1750 INIT_WORK(&c->work, hpsa_command_resubmit_worker); 1751 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); 1752 } 1753 1754 static void complete_scsi_command(struct CommandList *cp) 1755 { 1756 struct scsi_cmnd *cmd; 1757 struct ctlr_info *h; 1758 struct ErrorInfo *ei; 1759 struct hpsa_scsi_dev_t *dev; 1760 1761 unsigned char sense_key; 1762 unsigned char asc; /* additional sense code */ 1763 unsigned char ascq; /* additional sense code qualifier */ 1764 unsigned long sense_data_size; 1765 1766 ei = cp->err_info; 1767 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1768 h = cp->h; 1769 dev = cmd->device->hostdata; 1770 1771 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1772 if ((cp->cmd_type == CMD_SCSI) && 1773 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) 1774 hpsa_unmap_sg_chain_block(h, cp); 1775 1776 cmd->result = (DID_OK << 16); /* host byte */ 1777 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1778 1779 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) 1780 atomic_dec(&cp->phys_disk->ioaccel_cmds_out); 1781 1782 if (cp->cmd_type == CMD_IOACCEL2) 1783 return process_ioaccel2_completion(h, cp, cmd, dev); 1784 1785 cmd->result |= ei->ScsiStatus; 1786 1787 scsi_set_resid(cmd, ei->ResidualCnt); 1788 if (ei->CommandStatus == 0) { 1789 if (cp->cmd_type == CMD_IOACCEL1) 1790 atomic_dec(&cp->phys_disk->ioaccel_cmds_out); 1791 cmd_free(h, cp); 1792 cmd->scsi_done(cmd); 1793 return; 1794 } 1795 1796 /* copy the sense data */ 1797 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 1798 sense_data_size = SCSI_SENSE_BUFFERSIZE; 1799 else 1800 sense_data_size = sizeof(ei->SenseInfo); 1801 if (ei->SenseLen < sense_data_size) 1802 sense_data_size = ei->SenseLen; 1803 1804 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 1805 1806 /* For I/O accelerator commands, copy over some fields to the normal 1807 * CISS header used below for error handling. 1808 */ 1809 if (cp->cmd_type == CMD_IOACCEL1) { 1810 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 1811 cp->Header.SGList = scsi_sg_count(cmd); 1812 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); 1813 cp->Request.CDBLen = le16_to_cpu(c->io_flags) & 1814 IOACCEL1_IOFLAGS_CDBLEN_MASK; 1815 cp->Header.tag = c->tag; 1816 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 1817 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 1818 1819 /* Any RAID offload error results in retry which will use 1820 * the normal I/O path so the controller can handle whatever's 1821 * wrong. 1822 */ 1823 if (is_logical_dev_addr_mode(dev->scsi3addr)) { 1824 if (ei->CommandStatus == CMD_IOACCEL_DISABLED) 1825 dev->offload_enabled = 0; 1826 INIT_WORK(&cp->work, hpsa_command_resubmit_worker); 1827 queue_work_on(raw_smp_processor_id(), 1828 h->resubmit_wq, &cp->work); 1829 return; 1830 } 1831 } 1832 1833 /* an error has occurred */ 1834 switch (ei->CommandStatus) { 1835 1836 case CMD_TARGET_STATUS: 1837 if (ei->ScsiStatus) { 1838 /* Get sense key */ 1839 sense_key = 0xf & ei->SenseInfo[2]; 1840 /* Get additional sense code */ 1841 asc = ei->SenseInfo[12]; 1842 /* Get addition sense code qualifier */ 1843 ascq = ei->SenseInfo[13]; 1844 } 1845 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1846 if (sense_key == ABORTED_COMMAND) { 1847 cmd->result |= DID_SOFT_ERROR << 16; 1848 break; 1849 } 1850 break; 1851 } 1852 /* Problem was not a check condition 1853 * Pass it up to the upper layers... 1854 */ 1855 if (ei->ScsiStatus) { 1856 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1857 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1858 "Returning result: 0x%x\n", 1859 cp, ei->ScsiStatus, 1860 sense_key, asc, ascq, 1861 cmd->result); 1862 } else { /* scsi status is zero??? How??? */ 1863 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1864 "Returning no connection.\n", cp), 1865 1866 /* Ordinarily, this case should never happen, 1867 * but there is a bug in some released firmware 1868 * revisions that allows it to happen if, for 1869 * example, a 4100 backplane loses power and 1870 * the tape drive is in it. We assume that 1871 * it's a fatal error of some kind because we 1872 * can't show that it wasn't. We will make it 1873 * look like selection timeout since that is 1874 * the most common reason for this to occur, 1875 * and it's severe enough. 1876 */ 1877 1878 cmd->result = DID_NO_CONNECT << 16; 1879 } 1880 break; 1881 1882 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1883 break; 1884 case CMD_DATA_OVERRUN: 1885 dev_warn(&h->pdev->dev, "cp %p has" 1886 " completed with data overrun " 1887 "reported\n", cp); 1888 break; 1889 case CMD_INVALID: { 1890 /* print_bytes(cp, sizeof(*cp), 1, 0); 1891 print_cmd(cp); */ 1892 /* We get CMD_INVALID if you address a non-existent device 1893 * instead of a selection timeout (no response). You will 1894 * see this if you yank out a drive, then try to access it. 1895 * This is kind of a shame because it means that any other 1896 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1897 * missing target. */ 1898 cmd->result = DID_NO_CONNECT << 16; 1899 } 1900 break; 1901 case CMD_PROTOCOL_ERR: 1902 cmd->result = DID_ERROR << 16; 1903 dev_warn(&h->pdev->dev, "cp %p has " 1904 "protocol error\n", cp); 1905 break; 1906 case CMD_HARDWARE_ERR: 1907 cmd->result = DID_ERROR << 16; 1908 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1909 break; 1910 case CMD_CONNECTION_LOST: 1911 cmd->result = DID_ERROR << 16; 1912 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1913 break; 1914 case CMD_ABORTED: 1915 cmd->result = DID_ABORT << 16; 1916 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1917 cp, ei->ScsiStatus); 1918 break; 1919 case CMD_ABORT_FAILED: 1920 cmd->result = DID_ERROR << 16; 1921 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1922 break; 1923 case CMD_UNSOLICITED_ABORT: 1924 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 1925 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " 1926 "abort\n", cp); 1927 break; 1928 case CMD_TIMEOUT: 1929 cmd->result = DID_TIME_OUT << 16; 1930 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1931 break; 1932 case CMD_UNABORTABLE: 1933 cmd->result = DID_ERROR << 16; 1934 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1935 break; 1936 case CMD_IOACCEL_DISABLED: 1937 /* This only handles the direct pass-through case since RAID 1938 * offload is handled above. Just attempt a retry. 1939 */ 1940 cmd->result = DID_SOFT_ERROR << 16; 1941 dev_warn(&h->pdev->dev, 1942 "cp %p had HP SSD Smart Path error\n", cp); 1943 break; 1944 default: 1945 cmd->result = DID_ERROR << 16; 1946 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1947 cp, ei->CommandStatus); 1948 } 1949 cmd_free(h, cp); 1950 cmd->scsi_done(cmd); 1951 } 1952 1953 static void hpsa_pci_unmap(struct pci_dev *pdev, 1954 struct CommandList *c, int sg_used, int data_direction) 1955 { 1956 int i; 1957 1958 for (i = 0; i < sg_used; i++) 1959 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), 1960 le32_to_cpu(c->SG[i].Len), 1961 data_direction); 1962 } 1963 1964 static int hpsa_map_one(struct pci_dev *pdev, 1965 struct CommandList *cp, 1966 unsigned char *buf, 1967 size_t buflen, 1968 int data_direction) 1969 { 1970 u64 addr64; 1971 1972 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1973 cp->Header.SGList = 0; 1974 cp->Header.SGTotal = cpu_to_le16(0); 1975 return 0; 1976 } 1977 1978 addr64 = pci_map_single(pdev, buf, buflen, data_direction); 1979 if (dma_mapping_error(&pdev->dev, addr64)) { 1980 /* Prevent subsequent unmap of something never mapped */ 1981 cp->Header.SGList = 0; 1982 cp->Header.SGTotal = cpu_to_le16(0); 1983 return -1; 1984 } 1985 cp->SG[0].Addr = cpu_to_le64(addr64); 1986 cp->SG[0].Len = cpu_to_le32(buflen); 1987 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ 1988 cp->Header.SGList = 1; /* no. SGs contig in this cmd */ 1989 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ 1990 return 0; 1991 } 1992 1993 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1994 struct CommandList *c) 1995 { 1996 DECLARE_COMPLETION_ONSTACK(wait); 1997 1998 c->waiting = &wait; 1999 enqueue_cmd_and_start_io(h, c); 2000 wait_for_completion(&wait); 2001 } 2002 2003 static u32 lockup_detected(struct ctlr_info *h) 2004 { 2005 int cpu; 2006 u32 rc, *lockup_detected; 2007 2008 cpu = get_cpu(); 2009 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 2010 rc = *lockup_detected; 2011 put_cpu(); 2012 return rc; 2013 } 2014 2015 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 2016 struct CommandList *c) 2017 { 2018 /* If controller lockup detected, fake a hardware error. */ 2019 if (unlikely(lockup_detected(h))) 2020 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 2021 else 2022 hpsa_scsi_do_simple_cmd_core(h, c); 2023 } 2024 2025 #define MAX_DRIVER_CMD_RETRIES 25 2026 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 2027 struct CommandList *c, int data_direction) 2028 { 2029 int backoff_time = 10, retry_count = 0; 2030 2031 do { 2032 memset(c->err_info, 0, sizeof(*c->err_info)); 2033 hpsa_scsi_do_simple_cmd_core(h, c); 2034 retry_count++; 2035 if (retry_count > 3) { 2036 msleep(backoff_time); 2037 if (backoff_time < 1000) 2038 backoff_time *= 2; 2039 } 2040 } while ((check_for_unit_attention(h, c) || 2041 check_for_busy(h, c)) && 2042 retry_count <= MAX_DRIVER_CMD_RETRIES); 2043 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 2044 } 2045 2046 static void hpsa_print_cmd(struct ctlr_info *h, char *txt, 2047 struct CommandList *c) 2048 { 2049 const u8 *cdb = c->Request.CDB; 2050 const u8 *lun = c->Header.LUN.LunAddrBytes; 2051 2052 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" 2053 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", 2054 txt, lun[0], lun[1], lun[2], lun[3], 2055 lun[4], lun[5], lun[6], lun[7], 2056 cdb[0], cdb[1], cdb[2], cdb[3], 2057 cdb[4], cdb[5], cdb[6], cdb[7], 2058 cdb[8], cdb[9], cdb[10], cdb[11], 2059 cdb[12], cdb[13], cdb[14], cdb[15]); 2060 } 2061 2062 static void hpsa_scsi_interpret_error(struct ctlr_info *h, 2063 struct CommandList *cp) 2064 { 2065 const struct ErrorInfo *ei = cp->err_info; 2066 struct device *d = &cp->h->pdev->dev; 2067 const u8 *sd = ei->SenseInfo; 2068 2069 switch (ei->CommandStatus) { 2070 case CMD_TARGET_STATUS: 2071 hpsa_print_cmd(h, "SCSI status", cp); 2072 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) 2073 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", 2074 sd[2] & 0x0f, sd[12], sd[13]); 2075 else 2076 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus); 2077 if (ei->ScsiStatus == 0) 2078 dev_warn(d, "SCSI status is abnormally zero. " 2079 "(probably indicates selection timeout " 2080 "reported incorrectly due to a known " 2081 "firmware bug, circa July, 2001.)\n"); 2082 break; 2083 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 2084 break; 2085 case CMD_DATA_OVERRUN: 2086 hpsa_print_cmd(h, "overrun condition", cp); 2087 break; 2088 case CMD_INVALID: { 2089 /* controller unfortunately reports SCSI passthru's 2090 * to non-existent targets as invalid commands. 2091 */ 2092 hpsa_print_cmd(h, "invalid command", cp); 2093 dev_warn(d, "probably means device no longer present\n"); 2094 } 2095 break; 2096 case CMD_PROTOCOL_ERR: 2097 hpsa_print_cmd(h, "protocol error", cp); 2098 break; 2099 case CMD_HARDWARE_ERR: 2100 hpsa_print_cmd(h, "hardware error", cp); 2101 break; 2102 case CMD_CONNECTION_LOST: 2103 hpsa_print_cmd(h, "connection lost", cp); 2104 break; 2105 case CMD_ABORTED: 2106 hpsa_print_cmd(h, "aborted", cp); 2107 break; 2108 case CMD_ABORT_FAILED: 2109 hpsa_print_cmd(h, "abort failed", cp); 2110 break; 2111 case CMD_UNSOLICITED_ABORT: 2112 hpsa_print_cmd(h, "unsolicited abort", cp); 2113 break; 2114 case CMD_TIMEOUT: 2115 hpsa_print_cmd(h, "timed out", cp); 2116 break; 2117 case CMD_UNABORTABLE: 2118 hpsa_print_cmd(h, "unabortable", cp); 2119 break; 2120 default: 2121 hpsa_print_cmd(h, "unknown status", cp); 2122 dev_warn(d, "Unknown command status %x\n", 2123 ei->CommandStatus); 2124 } 2125 } 2126 2127 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 2128 u16 page, unsigned char *buf, 2129 unsigned char bufsize) 2130 { 2131 int rc = IO_OK; 2132 struct CommandList *c; 2133 struct ErrorInfo *ei; 2134 2135 c = cmd_alloc(h); 2136 2137 if (c == NULL) { 2138 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2139 return -ENOMEM; 2140 } 2141 2142 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, 2143 page, scsi3addr, TYPE_CMD)) { 2144 rc = -1; 2145 goto out; 2146 } 2147 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2148 ei = c->err_info; 2149 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2150 hpsa_scsi_interpret_error(h, c); 2151 rc = -1; 2152 } 2153 out: 2154 cmd_free(h, c); 2155 return rc; 2156 } 2157 2158 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, 2159 unsigned char *scsi3addr, unsigned char page, 2160 struct bmic_controller_parameters *buf, size_t bufsize) 2161 { 2162 int rc = IO_OK; 2163 struct CommandList *c; 2164 struct ErrorInfo *ei; 2165 2166 c = cmd_alloc(h); 2167 if (c == NULL) { /* trouble... */ 2168 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2169 return -ENOMEM; 2170 } 2171 2172 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, 2173 page, scsi3addr, TYPE_CMD)) { 2174 rc = -1; 2175 goto out; 2176 } 2177 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2178 ei = c->err_info; 2179 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2180 hpsa_scsi_interpret_error(h, c); 2181 rc = -1; 2182 } 2183 out: 2184 cmd_free(h, c); 2185 return rc; 2186 } 2187 2188 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, 2189 u8 reset_type) 2190 { 2191 int rc = IO_OK; 2192 struct CommandList *c; 2193 struct ErrorInfo *ei; 2194 2195 c = cmd_alloc(h); 2196 2197 if (c == NULL) { /* trouble... */ 2198 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2199 return -ENOMEM; 2200 } 2201 2202 /* fill_cmd can't fail here, no data buffer to map. */ 2203 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 2204 scsi3addr, TYPE_MSG); 2205 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */ 2206 hpsa_scsi_do_simple_cmd_core(h, c); 2207 /* no unmap needed here because no data xfer. */ 2208 2209 ei = c->err_info; 2210 if (ei->CommandStatus != 0) { 2211 hpsa_scsi_interpret_error(h, c); 2212 rc = -1; 2213 } 2214 cmd_free(h, c); 2215 return rc; 2216 } 2217 2218 static void hpsa_get_raid_level(struct ctlr_info *h, 2219 unsigned char *scsi3addr, unsigned char *raid_level) 2220 { 2221 int rc; 2222 unsigned char *buf; 2223 2224 *raid_level = RAID_UNKNOWN; 2225 buf = kzalloc(64, GFP_KERNEL); 2226 if (!buf) 2227 return; 2228 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); 2229 if (rc == 0) 2230 *raid_level = buf[8]; 2231 if (*raid_level > RAID_UNKNOWN) 2232 *raid_level = RAID_UNKNOWN; 2233 kfree(buf); 2234 return; 2235 } 2236 2237 #define HPSA_MAP_DEBUG 2238 #ifdef HPSA_MAP_DEBUG 2239 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, 2240 struct raid_map_data *map_buff) 2241 { 2242 struct raid_map_disk_data *dd = &map_buff->data[0]; 2243 int map, row, col; 2244 u16 map_cnt, row_cnt, disks_per_row; 2245 2246 if (rc != 0) 2247 return; 2248 2249 /* Show details only if debugging has been activated. */ 2250 if (h->raid_offload_debug < 2) 2251 return; 2252 2253 dev_info(&h->pdev->dev, "structure_size = %u\n", 2254 le32_to_cpu(map_buff->structure_size)); 2255 dev_info(&h->pdev->dev, "volume_blk_size = %u\n", 2256 le32_to_cpu(map_buff->volume_blk_size)); 2257 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", 2258 le64_to_cpu(map_buff->volume_blk_cnt)); 2259 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", 2260 map_buff->phys_blk_shift); 2261 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", 2262 map_buff->parity_rotation_shift); 2263 dev_info(&h->pdev->dev, "strip_size = %u\n", 2264 le16_to_cpu(map_buff->strip_size)); 2265 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", 2266 le64_to_cpu(map_buff->disk_starting_blk)); 2267 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", 2268 le64_to_cpu(map_buff->disk_blk_cnt)); 2269 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", 2270 le16_to_cpu(map_buff->data_disks_per_row)); 2271 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", 2272 le16_to_cpu(map_buff->metadata_disks_per_row)); 2273 dev_info(&h->pdev->dev, "row_cnt = %u\n", 2274 le16_to_cpu(map_buff->row_cnt)); 2275 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 2276 le16_to_cpu(map_buff->layout_map_count)); 2277 dev_info(&h->pdev->dev, "flags = 0x%x\n", 2278 le16_to_cpu(map_buff->flags)); 2279 dev_info(&h->pdev->dev, "encrypytion = %s\n", 2280 le16_to_cpu(map_buff->flags) & 2281 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); 2282 dev_info(&h->pdev->dev, "dekindex = %u\n", 2283 le16_to_cpu(map_buff->dekindex)); 2284 map_cnt = le16_to_cpu(map_buff->layout_map_count); 2285 for (map = 0; map < map_cnt; map++) { 2286 dev_info(&h->pdev->dev, "Map%u:\n", map); 2287 row_cnt = le16_to_cpu(map_buff->row_cnt); 2288 for (row = 0; row < row_cnt; row++) { 2289 dev_info(&h->pdev->dev, " Row%u:\n", row); 2290 disks_per_row = 2291 le16_to_cpu(map_buff->data_disks_per_row); 2292 for (col = 0; col < disks_per_row; col++, dd++) 2293 dev_info(&h->pdev->dev, 2294 " D%02u: h=0x%04x xor=%u,%u\n", 2295 col, dd->ioaccel_handle, 2296 dd->xor_mult[0], dd->xor_mult[1]); 2297 disks_per_row = 2298 le16_to_cpu(map_buff->metadata_disks_per_row); 2299 for (col = 0; col < disks_per_row; col++, dd++) 2300 dev_info(&h->pdev->dev, 2301 " M%02u: h=0x%04x xor=%u,%u\n", 2302 col, dd->ioaccel_handle, 2303 dd->xor_mult[0], dd->xor_mult[1]); 2304 } 2305 } 2306 } 2307 #else 2308 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, 2309 __attribute__((unused)) int rc, 2310 __attribute__((unused)) struct raid_map_data *map_buff) 2311 { 2312 } 2313 #endif 2314 2315 static int hpsa_get_raid_map(struct ctlr_info *h, 2316 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2317 { 2318 int rc = 0; 2319 struct CommandList *c; 2320 struct ErrorInfo *ei; 2321 2322 c = cmd_alloc(h); 2323 if (c == NULL) { 2324 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2325 return -ENOMEM; 2326 } 2327 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, 2328 sizeof(this_device->raid_map), 0, 2329 scsi3addr, TYPE_CMD)) { 2330 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); 2331 cmd_free(h, c); 2332 return -ENOMEM; 2333 } 2334 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2335 ei = c->err_info; 2336 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2337 hpsa_scsi_interpret_error(h, c); 2338 cmd_free(h, c); 2339 return -1; 2340 } 2341 cmd_free(h, c); 2342 2343 /* @todo in the future, dynamically allocate RAID map memory */ 2344 if (le32_to_cpu(this_device->raid_map.structure_size) > 2345 sizeof(this_device->raid_map)) { 2346 dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); 2347 rc = -1; 2348 } 2349 hpsa_debug_map_buff(h, rc, &this_device->raid_map); 2350 return rc; 2351 } 2352 2353 static int hpsa_bmic_id_physical_device(struct ctlr_info *h, 2354 unsigned char scsi3addr[], u16 bmic_device_index, 2355 struct bmic_identify_physical_device *buf, size_t bufsize) 2356 { 2357 int rc = IO_OK; 2358 struct CommandList *c; 2359 struct ErrorInfo *ei; 2360 2361 c = cmd_alloc(h); 2362 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, 2363 0, RAID_CTLR_LUNID, TYPE_CMD); 2364 if (rc) 2365 goto out; 2366 2367 c->Request.CDB[2] = bmic_device_index & 0xff; 2368 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 2369 2370 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2371 ei = c->err_info; 2372 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2373 hpsa_scsi_interpret_error(h, c); 2374 rc = -1; 2375 } 2376 out: 2377 cmd_free(h, c); 2378 return rc; 2379 } 2380 2381 static int hpsa_vpd_page_supported(struct ctlr_info *h, 2382 unsigned char scsi3addr[], u8 page) 2383 { 2384 int rc; 2385 int i; 2386 int pages; 2387 unsigned char *buf, bufsize; 2388 2389 buf = kzalloc(256, GFP_KERNEL); 2390 if (!buf) 2391 return 0; 2392 2393 /* Get the size of the page list first */ 2394 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2395 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2396 buf, HPSA_VPD_HEADER_SZ); 2397 if (rc != 0) 2398 goto exit_unsupported; 2399 pages = buf[3]; 2400 if ((pages + HPSA_VPD_HEADER_SZ) <= 255) 2401 bufsize = pages + HPSA_VPD_HEADER_SZ; 2402 else 2403 bufsize = 255; 2404 2405 /* Get the whole VPD page list */ 2406 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2407 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2408 buf, bufsize); 2409 if (rc != 0) 2410 goto exit_unsupported; 2411 2412 pages = buf[3]; 2413 for (i = 1; i <= pages; i++) 2414 if (buf[3 + i] == page) 2415 goto exit_supported; 2416 exit_unsupported: 2417 kfree(buf); 2418 return 0; 2419 exit_supported: 2420 kfree(buf); 2421 return 1; 2422 } 2423 2424 static void hpsa_get_ioaccel_status(struct ctlr_info *h, 2425 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2426 { 2427 int rc; 2428 unsigned char *buf; 2429 u8 ioaccel_status; 2430 2431 this_device->offload_config = 0; 2432 this_device->offload_enabled = 0; 2433 2434 buf = kzalloc(64, GFP_KERNEL); 2435 if (!buf) 2436 return; 2437 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) 2438 goto out; 2439 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2440 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); 2441 if (rc != 0) 2442 goto out; 2443 2444 #define IOACCEL_STATUS_BYTE 4 2445 #define OFFLOAD_CONFIGURED_BIT 0x01 2446 #define OFFLOAD_ENABLED_BIT 0x02 2447 ioaccel_status = buf[IOACCEL_STATUS_BYTE]; 2448 this_device->offload_config = 2449 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 2450 if (this_device->offload_config) { 2451 this_device->offload_enabled = 2452 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 2453 if (hpsa_get_raid_map(h, scsi3addr, this_device)) 2454 this_device->offload_enabled = 0; 2455 } 2456 out: 2457 kfree(buf); 2458 return; 2459 } 2460 2461 /* Get the device id from inquiry page 0x83 */ 2462 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 2463 unsigned char *device_id, int buflen) 2464 { 2465 int rc; 2466 unsigned char *buf; 2467 2468 if (buflen > 16) 2469 buflen = 16; 2470 buf = kzalloc(64, GFP_KERNEL); 2471 if (!buf) 2472 return -ENOMEM; 2473 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 2474 if (rc == 0) 2475 memcpy(device_id, &buf[8], buflen); 2476 kfree(buf); 2477 return rc != 0; 2478 } 2479 2480 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 2481 void *buf, int bufsize, 2482 int extended_response) 2483 { 2484 int rc = IO_OK; 2485 struct CommandList *c; 2486 unsigned char scsi3addr[8]; 2487 struct ErrorInfo *ei; 2488 2489 c = cmd_alloc(h); 2490 if (c == NULL) { /* trouble... */ 2491 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2492 return -1; 2493 } 2494 /* address the controller */ 2495 memset(scsi3addr, 0, sizeof(scsi3addr)); 2496 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 2497 buf, bufsize, 0, scsi3addr, TYPE_CMD)) { 2498 rc = -1; 2499 goto out; 2500 } 2501 if (extended_response) 2502 c->Request.CDB[1] = extended_response; 2503 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2504 ei = c->err_info; 2505 if (ei->CommandStatus != 0 && 2506 ei->CommandStatus != CMD_DATA_UNDERRUN) { 2507 hpsa_scsi_interpret_error(h, c); 2508 rc = -1; 2509 } else { 2510 struct ReportLUNdata *rld = buf; 2511 2512 if (rld->extended_response_flag != extended_response) { 2513 dev_err(&h->pdev->dev, 2514 "report luns requested format %u, got %u\n", 2515 extended_response, 2516 rld->extended_response_flag); 2517 rc = -1; 2518 } 2519 } 2520 out: 2521 cmd_free(h, c); 2522 return rc; 2523 } 2524 2525 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 2526 struct ReportExtendedLUNdata *buf, int bufsize) 2527 { 2528 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, 2529 HPSA_REPORT_PHYS_EXTENDED); 2530 } 2531 2532 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 2533 struct ReportLUNdata *buf, int bufsize) 2534 { 2535 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 2536 } 2537 2538 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 2539 int bus, int target, int lun) 2540 { 2541 device->bus = bus; 2542 device->target = target; 2543 device->lun = lun; 2544 } 2545 2546 /* Use VPD inquiry to get details of volume status */ 2547 static int hpsa_get_volume_status(struct ctlr_info *h, 2548 unsigned char scsi3addr[]) 2549 { 2550 int rc; 2551 int status; 2552 int size; 2553 unsigned char *buf; 2554 2555 buf = kzalloc(64, GFP_KERNEL); 2556 if (!buf) 2557 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2558 2559 /* Does controller have VPD for logical volume status? */ 2560 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) 2561 goto exit_failed; 2562 2563 /* Get the size of the VPD return buffer */ 2564 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2565 buf, HPSA_VPD_HEADER_SZ); 2566 if (rc != 0) 2567 goto exit_failed; 2568 size = buf[3]; 2569 2570 /* Now get the whole VPD buffer */ 2571 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2572 buf, size + HPSA_VPD_HEADER_SZ); 2573 if (rc != 0) 2574 goto exit_failed; 2575 status = buf[4]; /* status byte */ 2576 2577 kfree(buf); 2578 return status; 2579 exit_failed: 2580 kfree(buf); 2581 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2582 } 2583 2584 /* Determine offline status of a volume. 2585 * Return either: 2586 * 0 (not offline) 2587 * 0xff (offline for unknown reasons) 2588 * # (integer code indicating one of several NOT READY states 2589 * describing why a volume is to be kept offline) 2590 */ 2591 static int hpsa_volume_offline(struct ctlr_info *h, 2592 unsigned char scsi3addr[]) 2593 { 2594 struct CommandList *c; 2595 unsigned char *sense, sense_key, asc, ascq; 2596 int ldstat = 0; 2597 u16 cmd_status; 2598 u8 scsi_status; 2599 #define ASC_LUN_NOT_READY 0x04 2600 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 2601 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 2602 2603 c = cmd_alloc(h); 2604 if (!c) 2605 return 0; 2606 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); 2607 hpsa_scsi_do_simple_cmd_core(h, c); 2608 sense = c->err_info->SenseInfo; 2609 sense_key = sense[2]; 2610 asc = sense[12]; 2611 ascq = sense[13]; 2612 cmd_status = c->err_info->CommandStatus; 2613 scsi_status = c->err_info->ScsiStatus; 2614 cmd_free(h, c); 2615 /* Is the volume 'not ready'? */ 2616 if (cmd_status != CMD_TARGET_STATUS || 2617 scsi_status != SAM_STAT_CHECK_CONDITION || 2618 sense_key != NOT_READY || 2619 asc != ASC_LUN_NOT_READY) { 2620 return 0; 2621 } 2622 2623 /* Determine the reason for not ready state */ 2624 ldstat = hpsa_get_volume_status(h, scsi3addr); 2625 2626 /* Keep volume offline in certain cases: */ 2627 switch (ldstat) { 2628 case HPSA_LV_UNDERGOING_ERASE: 2629 case HPSA_LV_UNDERGOING_RPI: 2630 case HPSA_LV_PENDING_RPI: 2631 case HPSA_LV_ENCRYPTED_NO_KEY: 2632 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 2633 case HPSA_LV_UNDERGOING_ENCRYPTION: 2634 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 2635 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 2636 return ldstat; 2637 case HPSA_VPD_LV_STATUS_UNSUPPORTED: 2638 /* If VPD status page isn't available, 2639 * use ASC/ASCQ to determine state 2640 */ 2641 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || 2642 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) 2643 return ldstat; 2644 break; 2645 default: 2646 break; 2647 } 2648 return 0; 2649 } 2650 2651 static int hpsa_update_device_info(struct ctlr_info *h, 2652 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 2653 unsigned char *is_OBDR_device) 2654 { 2655 2656 #define OBDR_SIG_OFFSET 43 2657 #define OBDR_TAPE_SIG "$DR-10" 2658 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 2659 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 2660 2661 unsigned char *inq_buff; 2662 unsigned char *obdr_sig; 2663 2664 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 2665 if (!inq_buff) 2666 goto bail_out; 2667 2668 /* Do an inquiry to the device to see what it is. */ 2669 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 2670 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 2671 /* Inquiry failed (msg printed already) */ 2672 dev_err(&h->pdev->dev, 2673 "hpsa_update_device_info: inquiry failed\n"); 2674 goto bail_out; 2675 } 2676 2677 this_device->devtype = (inq_buff[0] & 0x1f); 2678 memcpy(this_device->scsi3addr, scsi3addr, 8); 2679 memcpy(this_device->vendor, &inq_buff[8], 2680 sizeof(this_device->vendor)); 2681 memcpy(this_device->model, &inq_buff[16], 2682 sizeof(this_device->model)); 2683 memset(this_device->device_id, 0, 2684 sizeof(this_device->device_id)); 2685 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 2686 sizeof(this_device->device_id)); 2687 2688 if (this_device->devtype == TYPE_DISK && 2689 is_logical_dev_addr_mode(scsi3addr)) { 2690 int volume_offline; 2691 2692 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 2693 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 2694 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 2695 volume_offline = hpsa_volume_offline(h, scsi3addr); 2696 if (volume_offline < 0 || volume_offline > 0xff) 2697 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; 2698 this_device->volume_offline = volume_offline & 0xff; 2699 } else { 2700 this_device->raid_level = RAID_UNKNOWN; 2701 this_device->offload_config = 0; 2702 this_device->offload_enabled = 0; 2703 this_device->volume_offline = 0; 2704 this_device->queue_depth = h->nr_cmds; 2705 } 2706 2707 if (is_OBDR_device) { 2708 /* See if this is a One-Button-Disaster-Recovery device 2709 * by looking for "$DR-10" at offset 43 in inquiry data. 2710 */ 2711 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 2712 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 2713 strncmp(obdr_sig, OBDR_TAPE_SIG, 2714 OBDR_SIG_LEN) == 0); 2715 } 2716 2717 kfree(inq_buff); 2718 return 0; 2719 2720 bail_out: 2721 kfree(inq_buff); 2722 return 1; 2723 } 2724 2725 static unsigned char *ext_target_model[] = { 2726 "MSA2012", 2727 "MSA2024", 2728 "MSA2312", 2729 "MSA2324", 2730 "P2000 G3 SAS", 2731 "MSA 2040 SAS", 2732 NULL, 2733 }; 2734 2735 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 2736 { 2737 int i; 2738 2739 for (i = 0; ext_target_model[i]; i++) 2740 if (strncmp(device->model, ext_target_model[i], 2741 strlen(ext_target_model[i])) == 0) 2742 return 1; 2743 return 0; 2744 } 2745 2746 /* Helper function to assign bus, target, lun mapping of devices. 2747 * Puts non-external target logical volumes on bus 0, external target logical 2748 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 2749 * Logical drive target and lun are assigned at this time, but 2750 * physical device lun and target assignment are deferred (assigned 2751 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 2752 */ 2753 static void figure_bus_target_lun(struct ctlr_info *h, 2754 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) 2755 { 2756 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 2757 2758 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 2759 /* physical device, target and lun filled in later */ 2760 if (is_hba_lunid(lunaddrbytes)) 2761 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff); 2762 else 2763 /* defer target, lun assignment for physical devices */ 2764 hpsa_set_bus_target_lun(device, 2, -1, -1); 2765 return; 2766 } 2767 /* It's a logical device */ 2768 if (is_ext_target(h, device)) { 2769 /* external target way, put logicals on bus 1 2770 * and match target/lun numbers box 2771 * reports, other smart array, bus 0, target 0, match lunid 2772 */ 2773 hpsa_set_bus_target_lun(device, 2774 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff); 2775 return; 2776 } 2777 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff); 2778 } 2779 2780 /* 2781 * If there is no lun 0 on a target, linux won't find any devices. 2782 * For the external targets (arrays), we have to manually detect the enclosure 2783 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 2784 * it for some reason. *tmpdevice is the target we're adding, 2785 * this_device is a pointer into the current element of currentsd[] 2786 * that we're building up in update_scsi_devices(), below. 2787 * lunzerobits is a bitmap that tracks which targets already have a 2788 * lun 0 assigned. 2789 * Returns 1 if an enclosure was added, 0 if not. 2790 */ 2791 static int add_ext_target_dev(struct ctlr_info *h, 2792 struct hpsa_scsi_dev_t *tmpdevice, 2793 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 2794 unsigned long lunzerobits[], int *n_ext_target_devs) 2795 { 2796 unsigned char scsi3addr[8]; 2797 2798 if (test_bit(tmpdevice->target, lunzerobits)) 2799 return 0; /* There is already a lun 0 on this target. */ 2800 2801 if (!is_logical_dev_addr_mode(lunaddrbytes)) 2802 return 0; /* It's the logical targets that may lack lun 0. */ 2803 2804 if (!is_ext_target(h, tmpdevice)) 2805 return 0; /* Only external target devices have this problem. */ 2806 2807 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */ 2808 return 0; 2809 2810 memset(scsi3addr, 0, 8); 2811 scsi3addr[3] = tmpdevice->target; 2812 if (is_hba_lunid(scsi3addr)) 2813 return 0; /* Don't add the RAID controller here. */ 2814 2815 if (is_scsi_rev_5(h)) 2816 return 0; /* p1210m doesn't need to do this. */ 2817 2818 if (*n_ext_target_devs >= MAX_EXT_TARGETS) { 2819 dev_warn(&h->pdev->dev, "Maximum number of external " 2820 "target devices exceeded. Check your hardware " 2821 "configuration."); 2822 return 0; 2823 } 2824 2825 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 2826 return 0; 2827 (*n_ext_target_devs)++; 2828 hpsa_set_bus_target_lun(this_device, 2829 tmpdevice->bus, tmpdevice->target, 0); 2830 set_bit(tmpdevice->target, lunzerobits); 2831 return 1; 2832 } 2833 2834 /* 2835 * Get address of physical disk used for an ioaccel2 mode command: 2836 * 1. Extract ioaccel2 handle from the command. 2837 * 2. Find a matching ioaccel2 handle from list of physical disks. 2838 * 3. Return: 2839 * 1 and set scsi3addr to address of matching physical 2840 * 0 if no matching physical disk was found. 2841 */ 2842 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, 2843 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) 2844 { 2845 struct ReportExtendedLUNdata *physicals = NULL; 2846 int responsesize = 24; /* size of physical extended response */ 2847 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; 2848 u32 nphysicals = 0; /* number of reported physical devs */ 2849 int found = 0; /* found match (1) or not (0) */ 2850 u32 find; /* handle we need to match */ 2851 int i; 2852 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 2853 struct hpsa_scsi_dev_t *d; /* device of request being aborted */ 2854 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ 2855 __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2856 __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2857 2858 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) 2859 return 0; /* no match */ 2860 2861 /* point to the ioaccel2 device handle */ 2862 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; 2863 if (c2a == NULL) 2864 return 0; /* no match */ 2865 2866 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; 2867 if (scmd == NULL) 2868 return 0; /* no match */ 2869 2870 d = scmd->device->hostdata; 2871 if (d == NULL) 2872 return 0; /* no match */ 2873 2874 it_nexus = cpu_to_le32(d->ioaccel_handle); 2875 scsi_nexus = c2a->scsi_nexus; 2876 find = le32_to_cpu(c2a->scsi_nexus); 2877 2878 if (h->raid_offload_debug > 0) 2879 dev_info(&h->pdev->dev, 2880 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", 2881 __func__, scsi_nexus, 2882 d->device_id[0], d->device_id[1], d->device_id[2], 2883 d->device_id[3], d->device_id[4], d->device_id[5], 2884 d->device_id[6], d->device_id[7], d->device_id[8], 2885 d->device_id[9], d->device_id[10], d->device_id[11], 2886 d->device_id[12], d->device_id[13], d->device_id[14], 2887 d->device_id[15]); 2888 2889 /* Get the list of physical devices */ 2890 physicals = kzalloc(reportsize, GFP_KERNEL); 2891 if (physicals == NULL) 2892 return 0; 2893 if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) { 2894 dev_err(&h->pdev->dev, 2895 "Can't lookup %s device handle: report physical LUNs failed.\n", 2896 "HP SSD Smart Path"); 2897 kfree(physicals); 2898 return 0; 2899 } 2900 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / 2901 responsesize; 2902 2903 /* find ioaccel2 handle in list of physicals: */ 2904 for (i = 0; i < nphysicals; i++) { 2905 struct ext_report_lun_entry *entry = &physicals->LUN[i]; 2906 2907 /* handle is in bytes 28-31 of each lun */ 2908 if (entry->ioaccel_handle != find) 2909 continue; /* didn't match */ 2910 found = 1; 2911 memcpy(scsi3addr, entry->lunid, 8); 2912 if (h->raid_offload_debug > 0) 2913 dev_info(&h->pdev->dev, 2914 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", 2915 __func__, find, 2916 entry->ioaccel_handle, scsi3addr); 2917 break; /* found it */ 2918 } 2919 2920 kfree(physicals); 2921 if (found) 2922 return 1; 2923 else 2924 return 0; 2925 2926 } 2927 /* 2928 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 2929 * logdev. The number of luns in physdev and logdev are returned in 2930 * *nphysicals and *nlogicals, respectively. 2931 * Returns 0 on success, -1 otherwise. 2932 */ 2933 static int hpsa_gather_lun_info(struct ctlr_info *h, 2934 struct ReportExtendedLUNdata *physdev, u32 *nphysicals, 2935 struct ReportLUNdata *logdev, u32 *nlogicals) 2936 { 2937 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { 2938 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 2939 return -1; 2940 } 2941 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24; 2942 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 2943 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", 2944 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); 2945 *nphysicals = HPSA_MAX_PHYS_LUN; 2946 } 2947 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { 2948 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 2949 return -1; 2950 } 2951 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 2952 /* Reject Logicals in excess of our max capability. */ 2953 if (*nlogicals > HPSA_MAX_LUN) { 2954 dev_warn(&h->pdev->dev, 2955 "maximum logical LUNs (%d) exceeded. " 2956 "%d LUNs ignored.\n", HPSA_MAX_LUN, 2957 *nlogicals - HPSA_MAX_LUN); 2958 *nlogicals = HPSA_MAX_LUN; 2959 } 2960 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 2961 dev_warn(&h->pdev->dev, 2962 "maximum logical + physical LUNs (%d) exceeded. " 2963 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2964 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 2965 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 2966 } 2967 return 0; 2968 } 2969 2970 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, 2971 int i, int nphysicals, int nlogicals, 2972 struct ReportExtendedLUNdata *physdev_list, 2973 struct ReportLUNdata *logdev_list) 2974 { 2975 /* Helper function, figure out where the LUN ID info is coming from 2976 * given index i, lists of physical and logical devices, where in 2977 * the list the raid controller is supposed to appear (first or last) 2978 */ 2979 2980 int logicals_start = nphysicals + (raid_ctlr_position == 0); 2981 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 2982 2983 if (i == raid_ctlr_position) 2984 return RAID_CTLR_LUNID; 2985 2986 if (i < logicals_start) 2987 return &physdev_list->LUN[i - 2988 (raid_ctlr_position == 0)].lunid[0]; 2989 2990 if (i < last_device) 2991 return &logdev_list->LUN[i - nphysicals - 2992 (raid_ctlr_position == 0)][0]; 2993 BUG(); 2994 return NULL; 2995 } 2996 2997 static int hpsa_hba_mode_enabled(struct ctlr_info *h) 2998 { 2999 int rc; 3000 int hba_mode_enabled; 3001 struct bmic_controller_parameters *ctlr_params; 3002 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), 3003 GFP_KERNEL); 3004 3005 if (!ctlr_params) 3006 return -ENOMEM; 3007 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, 3008 sizeof(struct bmic_controller_parameters)); 3009 if (rc) { 3010 kfree(ctlr_params); 3011 return rc; 3012 } 3013 3014 hba_mode_enabled = 3015 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0); 3016 kfree(ctlr_params); 3017 return hba_mode_enabled; 3018 } 3019 3020 /* get physical drive ioaccel handle and queue depth */ 3021 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, 3022 struct hpsa_scsi_dev_t *dev, 3023 u8 *lunaddrbytes, 3024 struct bmic_identify_physical_device *id_phys) 3025 { 3026 int rc; 3027 struct ext_report_lun_entry *rle = 3028 (struct ext_report_lun_entry *) lunaddrbytes; 3029 3030 dev->ioaccel_handle = rle->ioaccel_handle; 3031 memset(id_phys, 0, sizeof(*id_phys)); 3032 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes, 3033 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys, 3034 sizeof(*id_phys)); 3035 if (!rc) 3036 /* Reserve space for FW operations */ 3037 #define DRIVE_CMDS_RESERVED_FOR_FW 2 3038 #define DRIVE_QUEUE_DEPTH 7 3039 dev->queue_depth = 3040 le16_to_cpu(id_phys->current_queue_depth_limit) - 3041 DRIVE_CMDS_RESERVED_FOR_FW; 3042 else 3043 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ 3044 atomic_set(&dev->ioaccel_cmds_out, 0); 3045 } 3046 3047 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 3048 { 3049 /* the idea here is we could get notified 3050 * that some devices have changed, so we do a report 3051 * physical luns and report logical luns cmd, and adjust 3052 * our list of devices accordingly. 3053 * 3054 * The scsi3addr's of devices won't change so long as the 3055 * adapter is not reset. That means we can rescan and 3056 * tell which devices we already know about, vs. new 3057 * devices, vs. disappearing devices. 3058 */ 3059 struct ReportExtendedLUNdata *physdev_list = NULL; 3060 struct ReportLUNdata *logdev_list = NULL; 3061 struct bmic_identify_physical_device *id_phys = NULL; 3062 u32 nphysicals = 0; 3063 u32 nlogicals = 0; 3064 u32 ndev_allocated = 0; 3065 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 3066 int ncurrent = 0; 3067 int i, n_ext_target_devs, ndevs_to_allocate; 3068 int raid_ctlr_position; 3069 int rescan_hba_mode; 3070 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 3071 3072 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 3073 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); 3074 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); 3075 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 3076 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); 3077 3078 if (!currentsd || !physdev_list || !logdev_list || 3079 !tmpdevice || !id_phys) { 3080 dev_err(&h->pdev->dev, "out of memory\n"); 3081 goto out; 3082 } 3083 memset(lunzerobits, 0, sizeof(lunzerobits)); 3084 3085 rescan_hba_mode = hpsa_hba_mode_enabled(h); 3086 if (rescan_hba_mode < 0) 3087 goto out; 3088 3089 if (!h->hba_mode_enabled && rescan_hba_mode) 3090 dev_warn(&h->pdev->dev, "HBA mode enabled\n"); 3091 else if (h->hba_mode_enabled && !rescan_hba_mode) 3092 dev_warn(&h->pdev->dev, "HBA mode disabled\n"); 3093 3094 h->hba_mode_enabled = rescan_hba_mode; 3095 3096 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, 3097 logdev_list, &nlogicals)) 3098 goto out; 3099 3100 /* We might see up to the maximum number of logical and physical disks 3101 * plus external target devices, and a device for the local RAID 3102 * controller. 3103 */ 3104 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; 3105 3106 /* Allocate the per device structures */ 3107 for (i = 0; i < ndevs_to_allocate; i++) { 3108 if (i >= HPSA_MAX_DEVICES) { 3109 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." 3110 " %d devices ignored.\n", HPSA_MAX_DEVICES, 3111 ndevs_to_allocate - HPSA_MAX_DEVICES); 3112 break; 3113 } 3114 3115 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 3116 if (!currentsd[i]) { 3117 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 3118 __FILE__, __LINE__); 3119 goto out; 3120 } 3121 ndev_allocated++; 3122 } 3123 3124 if (is_scsi_rev_5(h)) 3125 raid_ctlr_position = 0; 3126 else 3127 raid_ctlr_position = nphysicals + nlogicals; 3128 3129 /* adjust our table of devices */ 3130 n_ext_target_devs = 0; 3131 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 3132 u8 *lunaddrbytes, is_OBDR = 0; 3133 3134 /* Figure out where the LUN ID info is coming from */ 3135 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 3136 i, nphysicals, nlogicals, physdev_list, logdev_list); 3137 /* skip masked physical devices. */ 3138 if (lunaddrbytes[3] & 0xC0 && 3139 i < nphysicals + (raid_ctlr_position == 0)) 3140 continue; 3141 3142 /* Get device type, vendor, model, device id */ 3143 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 3144 &is_OBDR)) 3145 continue; /* skip it if we can't talk to it. */ 3146 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 3147 this_device = currentsd[ncurrent]; 3148 3149 /* 3150 * For external target devices, we have to insert a LUN 0 which 3151 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 3152 * is nonetheless an enclosure device there. We have to 3153 * present that otherwise linux won't find anything if 3154 * there is no lun 0. 3155 */ 3156 if (add_ext_target_dev(h, tmpdevice, this_device, 3157 lunaddrbytes, lunzerobits, 3158 &n_ext_target_devs)) { 3159 ncurrent++; 3160 this_device = currentsd[ncurrent]; 3161 } 3162 3163 *this_device = *tmpdevice; 3164 3165 switch (this_device->devtype) { 3166 case TYPE_ROM: 3167 /* We don't *really* support actual CD-ROM devices, 3168 * just "One Button Disaster Recovery" tape drive 3169 * which temporarily pretends to be a CD-ROM drive. 3170 * So we check that the device is really an OBDR tape 3171 * device by checking for "$DR-10" in bytes 43-48 of 3172 * the inquiry data. 3173 */ 3174 if (is_OBDR) 3175 ncurrent++; 3176 break; 3177 case TYPE_DISK: 3178 if (h->hba_mode_enabled) { 3179 /* never use raid mapper in HBA mode */ 3180 this_device->offload_enabled = 0; 3181 ncurrent++; 3182 break; 3183 } else if (h->acciopath_status) { 3184 if (i >= nphysicals) { 3185 ncurrent++; 3186 break; 3187 } 3188 } else { 3189 if (i < nphysicals) 3190 break; 3191 ncurrent++; 3192 break; 3193 } 3194 if (h->transMethod & CFGTBL_Trans_io_accel1 || 3195 h->transMethod & CFGTBL_Trans_io_accel2) { 3196 hpsa_get_ioaccel_drive_info(h, this_device, 3197 lunaddrbytes, id_phys); 3198 atomic_set(&this_device->ioaccel_cmds_out, 0); 3199 ncurrent++; 3200 } 3201 break; 3202 case TYPE_TAPE: 3203 case TYPE_MEDIUM_CHANGER: 3204 ncurrent++; 3205 break; 3206 case TYPE_RAID: 3207 /* Only present the Smartarray HBA as a RAID controller. 3208 * If it's a RAID controller other than the HBA itself 3209 * (an external RAID controller, MSA500 or similar) 3210 * don't present it. 3211 */ 3212 if (!is_hba_lunid(lunaddrbytes)) 3213 break; 3214 ncurrent++; 3215 break; 3216 default: 3217 break; 3218 } 3219 if (ncurrent >= HPSA_MAX_DEVICES) 3220 break; 3221 } 3222 hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent); 3223 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 3224 out: 3225 kfree(tmpdevice); 3226 for (i = 0; i < ndev_allocated; i++) 3227 kfree(currentsd[i]); 3228 kfree(currentsd); 3229 kfree(physdev_list); 3230 kfree(logdev_list); 3231 kfree(id_phys); 3232 } 3233 3234 /* 3235 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 3236 * dma mapping and fills in the scatter gather entries of the 3237 * hpsa command, cp. 3238 */ 3239 static int hpsa_scatter_gather(struct ctlr_info *h, 3240 struct CommandList *cp, 3241 struct scsi_cmnd *cmd) 3242 { 3243 unsigned int len; 3244 struct scatterlist *sg; 3245 u64 addr64; 3246 int use_sg, i, sg_index, chained; 3247 struct SGDescriptor *curr_sg; 3248 3249 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 3250 3251 use_sg = scsi_dma_map(cmd); 3252 if (use_sg < 0) 3253 return use_sg; 3254 3255 if (!use_sg) 3256 goto sglist_finished; 3257 3258 curr_sg = cp->SG; 3259 chained = 0; 3260 sg_index = 0; 3261 scsi_for_each_sg(cmd, sg, use_sg, i) { 3262 if (i == h->max_cmd_sg_entries - 1 && 3263 use_sg > h->max_cmd_sg_entries) { 3264 chained = 1; 3265 curr_sg = h->cmd_sg_list[cp->cmdindex]; 3266 sg_index = 0; 3267 } 3268 addr64 = (u64) sg_dma_address(sg); 3269 len = sg_dma_len(sg); 3270 curr_sg->Addr = cpu_to_le64(addr64); 3271 curr_sg->Len = cpu_to_le32(len); 3272 curr_sg->Ext = cpu_to_le32(0); 3273 curr_sg++; 3274 } 3275 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); 3276 3277 if (use_sg + chained > h->maxSG) 3278 h->maxSG = use_sg + chained; 3279 3280 if (chained) { 3281 cp->Header.SGList = h->max_cmd_sg_entries; 3282 cp->Header.SGTotal = cpu_to_le16(use_sg + 1); 3283 if (hpsa_map_sg_chain_block(h, cp)) { 3284 scsi_dma_unmap(cmd); 3285 return -1; 3286 } 3287 return 0; 3288 } 3289 3290 sglist_finished: 3291 3292 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 3293 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ 3294 return 0; 3295 } 3296 3297 #define IO_ACCEL_INELIGIBLE (1) 3298 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) 3299 { 3300 int is_write = 0; 3301 u32 block; 3302 u32 block_cnt; 3303 3304 /* Perform some CDB fixups if needed using 10 byte reads/writes only */ 3305 switch (cdb[0]) { 3306 case WRITE_6: 3307 case WRITE_12: 3308 is_write = 1; 3309 case READ_6: 3310 case READ_12: 3311 if (*cdb_len == 6) { 3312 block = (((u32) cdb[2]) << 8) | cdb[3]; 3313 block_cnt = cdb[4]; 3314 } else { 3315 BUG_ON(*cdb_len != 12); 3316 block = (((u32) cdb[2]) << 24) | 3317 (((u32) cdb[3]) << 16) | 3318 (((u32) cdb[4]) << 8) | 3319 cdb[5]; 3320 block_cnt = 3321 (((u32) cdb[6]) << 24) | 3322 (((u32) cdb[7]) << 16) | 3323 (((u32) cdb[8]) << 8) | 3324 cdb[9]; 3325 } 3326 if (block_cnt > 0xffff) 3327 return IO_ACCEL_INELIGIBLE; 3328 3329 cdb[0] = is_write ? WRITE_10 : READ_10; 3330 cdb[1] = 0; 3331 cdb[2] = (u8) (block >> 24); 3332 cdb[3] = (u8) (block >> 16); 3333 cdb[4] = (u8) (block >> 8); 3334 cdb[5] = (u8) (block); 3335 cdb[6] = 0; 3336 cdb[7] = (u8) (block_cnt >> 8); 3337 cdb[8] = (u8) (block_cnt); 3338 cdb[9] = 0; 3339 *cdb_len = 10; 3340 break; 3341 } 3342 return 0; 3343 } 3344 3345 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, 3346 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3347 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) 3348 { 3349 struct scsi_cmnd *cmd = c->scsi_cmd; 3350 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 3351 unsigned int len; 3352 unsigned int total_len = 0; 3353 struct scatterlist *sg; 3354 u64 addr64; 3355 int use_sg, i; 3356 struct SGDescriptor *curr_sg; 3357 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; 3358 3359 /* TODO: implement chaining support */ 3360 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { 3361 atomic_dec(&phys_disk->ioaccel_cmds_out); 3362 return IO_ACCEL_INELIGIBLE; 3363 } 3364 3365 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 3366 3367 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 3368 atomic_dec(&phys_disk->ioaccel_cmds_out); 3369 return IO_ACCEL_INELIGIBLE; 3370 } 3371 3372 c->cmd_type = CMD_IOACCEL1; 3373 3374 /* Adjust the DMA address to point to the accelerated command buffer */ 3375 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + 3376 (c->cmdindex * sizeof(*cp)); 3377 BUG_ON(c->busaddr & 0x0000007F); 3378 3379 use_sg = scsi_dma_map(cmd); 3380 if (use_sg < 0) { 3381 atomic_dec(&phys_disk->ioaccel_cmds_out); 3382 return use_sg; 3383 } 3384 3385 if (use_sg) { 3386 curr_sg = cp->SG; 3387 scsi_for_each_sg(cmd, sg, use_sg, i) { 3388 addr64 = (u64) sg_dma_address(sg); 3389 len = sg_dma_len(sg); 3390 total_len += len; 3391 curr_sg->Addr = cpu_to_le64(addr64); 3392 curr_sg->Len = cpu_to_le32(len); 3393 curr_sg->Ext = cpu_to_le32(0); 3394 curr_sg++; 3395 } 3396 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); 3397 3398 switch (cmd->sc_data_direction) { 3399 case DMA_TO_DEVICE: 3400 control |= IOACCEL1_CONTROL_DATA_OUT; 3401 break; 3402 case DMA_FROM_DEVICE: 3403 control |= IOACCEL1_CONTROL_DATA_IN; 3404 break; 3405 case DMA_NONE: 3406 control |= IOACCEL1_CONTROL_NODATAXFER; 3407 break; 3408 default: 3409 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3410 cmd->sc_data_direction); 3411 BUG(); 3412 break; 3413 } 3414 } else { 3415 control |= IOACCEL1_CONTROL_NODATAXFER; 3416 } 3417 3418 c->Header.SGList = use_sg; 3419 /* Fill out the command structure to submit */ 3420 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); 3421 cp->transfer_len = cpu_to_le32(total_len); 3422 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | 3423 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); 3424 cp->control = cpu_to_le32(control); 3425 memcpy(cp->CDB, cdb, cdb_len); 3426 memcpy(cp->CISS_LUN, scsi3addr, 8); 3427 /* Tag was already set at init time. */ 3428 enqueue_cmd_and_start_io(h, c); 3429 return 0; 3430 } 3431 3432 /* 3433 * Queue a command directly to a device behind the controller using the 3434 * I/O accelerator path. 3435 */ 3436 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, 3437 struct CommandList *c) 3438 { 3439 struct scsi_cmnd *cmd = c->scsi_cmd; 3440 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3441 3442 c->phys_disk = dev; 3443 3444 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, 3445 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); 3446 } 3447 3448 /* 3449 * Set encryption parameters for the ioaccel2 request 3450 */ 3451 static void set_encrypt_ioaccel2(struct ctlr_info *h, 3452 struct CommandList *c, struct io_accel2_cmd *cp) 3453 { 3454 struct scsi_cmnd *cmd = c->scsi_cmd; 3455 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3456 struct raid_map_data *map = &dev->raid_map; 3457 u64 first_block; 3458 3459 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3460 3461 /* Are we doing encryption on this device */ 3462 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) 3463 return; 3464 /* Set the data encryption key index. */ 3465 cp->dekindex = map->dekindex; 3466 3467 /* Set the encryption enable flag, encoded into direction field. */ 3468 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; 3469 3470 /* Set encryption tweak values based on logical block address 3471 * If block size is 512, tweak value is LBA. 3472 * For other block sizes, tweak is (LBA * block size)/ 512) 3473 */ 3474 switch (cmd->cmnd[0]) { 3475 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ 3476 case WRITE_6: 3477 case READ_6: 3478 first_block = get_unaligned_be16(&cmd->cmnd[2]); 3479 break; 3480 case WRITE_10: 3481 case READ_10: 3482 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ 3483 case WRITE_12: 3484 case READ_12: 3485 first_block = get_unaligned_be32(&cmd->cmnd[2]); 3486 break; 3487 case WRITE_16: 3488 case READ_16: 3489 first_block = get_unaligned_be64(&cmd->cmnd[2]); 3490 break; 3491 default: 3492 dev_err(&h->pdev->dev, 3493 "ERROR: %s: size (0x%x) not supported for encryption\n", 3494 __func__, cmd->cmnd[0]); 3495 BUG(); 3496 break; 3497 } 3498 3499 if (le32_to_cpu(map->volume_blk_size) != 512) 3500 first_block = first_block * 3501 le32_to_cpu(map->volume_blk_size)/512; 3502 3503 cp->tweak_lower = cpu_to_le32(first_block); 3504 cp->tweak_upper = cpu_to_le32(first_block >> 32); 3505 } 3506 3507 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 3508 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3509 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) 3510 { 3511 struct scsi_cmnd *cmd = c->scsi_cmd; 3512 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 3513 struct ioaccel2_sg_element *curr_sg; 3514 int use_sg, i; 3515 struct scatterlist *sg; 3516 u64 addr64; 3517 u32 len; 3518 u32 total_len = 0; 3519 3520 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { 3521 atomic_dec(&phys_disk->ioaccel_cmds_out); 3522 return IO_ACCEL_INELIGIBLE; 3523 } 3524 3525 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 3526 atomic_dec(&phys_disk->ioaccel_cmds_out); 3527 return IO_ACCEL_INELIGIBLE; 3528 } 3529 3530 c->cmd_type = CMD_IOACCEL2; 3531 /* Adjust the DMA address to point to the accelerated command buffer */ 3532 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 3533 (c->cmdindex * sizeof(*cp)); 3534 BUG_ON(c->busaddr & 0x0000007F); 3535 3536 memset(cp, 0, sizeof(*cp)); 3537 cp->IU_type = IOACCEL2_IU_TYPE; 3538 3539 use_sg = scsi_dma_map(cmd); 3540 if (use_sg < 0) { 3541 atomic_dec(&phys_disk->ioaccel_cmds_out); 3542 return use_sg; 3543 } 3544 3545 if (use_sg) { 3546 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); 3547 curr_sg = cp->sg; 3548 scsi_for_each_sg(cmd, sg, use_sg, i) { 3549 addr64 = (u64) sg_dma_address(sg); 3550 len = sg_dma_len(sg); 3551 total_len += len; 3552 curr_sg->address = cpu_to_le64(addr64); 3553 curr_sg->length = cpu_to_le32(len); 3554 curr_sg->reserved[0] = 0; 3555 curr_sg->reserved[1] = 0; 3556 curr_sg->reserved[2] = 0; 3557 curr_sg->chain_indicator = 0; 3558 curr_sg++; 3559 } 3560 3561 switch (cmd->sc_data_direction) { 3562 case DMA_TO_DEVICE: 3563 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3564 cp->direction |= IOACCEL2_DIR_DATA_OUT; 3565 break; 3566 case DMA_FROM_DEVICE: 3567 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3568 cp->direction |= IOACCEL2_DIR_DATA_IN; 3569 break; 3570 case DMA_NONE: 3571 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3572 cp->direction |= IOACCEL2_DIR_NO_DATA; 3573 break; 3574 default: 3575 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3576 cmd->sc_data_direction); 3577 BUG(); 3578 break; 3579 } 3580 } else { 3581 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3582 cp->direction |= IOACCEL2_DIR_NO_DATA; 3583 } 3584 3585 /* Set encryption parameters, if necessary */ 3586 set_encrypt_ioaccel2(h, c, cp); 3587 3588 cp->scsi_nexus = cpu_to_le32(ioaccel_handle); 3589 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); 3590 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 3591 3592 /* fill in sg elements */ 3593 cp->sg_count = (u8) use_sg; 3594 3595 cp->data_len = cpu_to_le32(total_len); 3596 cp->err_ptr = cpu_to_le64(c->busaddr + 3597 offsetof(struct io_accel2_cmd, error_data)); 3598 cp->err_len = cpu_to_le32(sizeof(cp->error_data)); 3599 3600 enqueue_cmd_and_start_io(h, c); 3601 return 0; 3602 } 3603 3604 /* 3605 * Queue a command to the correct I/O accelerator path. 3606 */ 3607 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 3608 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3609 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) 3610 { 3611 /* Try to honor the device's queue depth */ 3612 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > 3613 phys_disk->queue_depth) { 3614 atomic_dec(&phys_disk->ioaccel_cmds_out); 3615 return IO_ACCEL_INELIGIBLE; 3616 } 3617 if (h->transMethod & CFGTBL_Trans_io_accel1) 3618 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, 3619 cdb, cdb_len, scsi3addr, 3620 phys_disk); 3621 else 3622 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, 3623 cdb, cdb_len, scsi3addr, 3624 phys_disk); 3625 } 3626 3627 static void raid_map_helper(struct raid_map_data *map, 3628 int offload_to_mirror, u32 *map_index, u32 *current_group) 3629 { 3630 if (offload_to_mirror == 0) { 3631 /* use physical disk in the first mirrored group. */ 3632 *map_index %= le16_to_cpu(map->data_disks_per_row); 3633 return; 3634 } 3635 do { 3636 /* determine mirror group that *map_index indicates */ 3637 *current_group = *map_index / 3638 le16_to_cpu(map->data_disks_per_row); 3639 if (offload_to_mirror == *current_group) 3640 continue; 3641 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { 3642 /* select map index from next group */ 3643 *map_index += le16_to_cpu(map->data_disks_per_row); 3644 (*current_group)++; 3645 } else { 3646 /* select map index from first group */ 3647 *map_index %= le16_to_cpu(map->data_disks_per_row); 3648 *current_group = 0; 3649 } 3650 } while (offload_to_mirror != *current_group); 3651 } 3652 3653 /* 3654 * Attempt to perform offload RAID mapping for a logical volume I/O. 3655 */ 3656 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, 3657 struct CommandList *c) 3658 { 3659 struct scsi_cmnd *cmd = c->scsi_cmd; 3660 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3661 struct raid_map_data *map = &dev->raid_map; 3662 struct raid_map_disk_data *dd = &map->data[0]; 3663 int is_write = 0; 3664 u32 map_index; 3665 u64 first_block, last_block; 3666 u32 block_cnt; 3667 u32 blocks_per_row; 3668 u64 first_row, last_row; 3669 u32 first_row_offset, last_row_offset; 3670 u32 first_column, last_column; 3671 u64 r0_first_row, r0_last_row; 3672 u32 r5or6_blocks_per_row; 3673 u64 r5or6_first_row, r5or6_last_row; 3674 u32 r5or6_first_row_offset, r5or6_last_row_offset; 3675 u32 r5or6_first_column, r5or6_last_column; 3676 u32 total_disks_per_row; 3677 u32 stripesize; 3678 u32 first_group, last_group, current_group; 3679 u32 map_row; 3680 u32 disk_handle; 3681 u64 disk_block; 3682 u32 disk_block_cnt; 3683 u8 cdb[16]; 3684 u8 cdb_len; 3685 u16 strip_size; 3686 #if BITS_PER_LONG == 32 3687 u64 tmpdiv; 3688 #endif 3689 int offload_to_mirror; 3690 3691 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3692 3693 /* check for valid opcode, get LBA and block count */ 3694 switch (cmd->cmnd[0]) { 3695 case WRITE_6: 3696 is_write = 1; 3697 case READ_6: 3698 first_block = 3699 (((u64) cmd->cmnd[2]) << 8) | 3700 cmd->cmnd[3]; 3701 block_cnt = cmd->cmnd[4]; 3702 if (block_cnt == 0) 3703 block_cnt = 256; 3704 break; 3705 case WRITE_10: 3706 is_write = 1; 3707 case READ_10: 3708 first_block = 3709 (((u64) cmd->cmnd[2]) << 24) | 3710 (((u64) cmd->cmnd[3]) << 16) | 3711 (((u64) cmd->cmnd[4]) << 8) | 3712 cmd->cmnd[5]; 3713 block_cnt = 3714 (((u32) cmd->cmnd[7]) << 8) | 3715 cmd->cmnd[8]; 3716 break; 3717 case WRITE_12: 3718 is_write = 1; 3719 case READ_12: 3720 first_block = 3721 (((u64) cmd->cmnd[2]) << 24) | 3722 (((u64) cmd->cmnd[3]) << 16) | 3723 (((u64) cmd->cmnd[4]) << 8) | 3724 cmd->cmnd[5]; 3725 block_cnt = 3726 (((u32) cmd->cmnd[6]) << 24) | 3727 (((u32) cmd->cmnd[7]) << 16) | 3728 (((u32) cmd->cmnd[8]) << 8) | 3729 cmd->cmnd[9]; 3730 break; 3731 case WRITE_16: 3732 is_write = 1; 3733 case READ_16: 3734 first_block = 3735 (((u64) cmd->cmnd[2]) << 56) | 3736 (((u64) cmd->cmnd[3]) << 48) | 3737 (((u64) cmd->cmnd[4]) << 40) | 3738 (((u64) cmd->cmnd[5]) << 32) | 3739 (((u64) cmd->cmnd[6]) << 24) | 3740 (((u64) cmd->cmnd[7]) << 16) | 3741 (((u64) cmd->cmnd[8]) << 8) | 3742 cmd->cmnd[9]; 3743 block_cnt = 3744 (((u32) cmd->cmnd[10]) << 24) | 3745 (((u32) cmd->cmnd[11]) << 16) | 3746 (((u32) cmd->cmnd[12]) << 8) | 3747 cmd->cmnd[13]; 3748 break; 3749 default: 3750 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ 3751 } 3752 last_block = first_block + block_cnt - 1; 3753 3754 /* check for write to non-RAID-0 */ 3755 if (is_write && dev->raid_level != 0) 3756 return IO_ACCEL_INELIGIBLE; 3757 3758 /* check for invalid block or wraparound */ 3759 if (last_block >= le64_to_cpu(map->volume_blk_cnt) || 3760 last_block < first_block) 3761 return IO_ACCEL_INELIGIBLE; 3762 3763 /* calculate stripe information for the request */ 3764 blocks_per_row = le16_to_cpu(map->data_disks_per_row) * 3765 le16_to_cpu(map->strip_size); 3766 strip_size = le16_to_cpu(map->strip_size); 3767 #if BITS_PER_LONG == 32 3768 tmpdiv = first_block; 3769 (void) do_div(tmpdiv, blocks_per_row); 3770 first_row = tmpdiv; 3771 tmpdiv = last_block; 3772 (void) do_div(tmpdiv, blocks_per_row); 3773 last_row = tmpdiv; 3774 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3775 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3776 tmpdiv = first_row_offset; 3777 (void) do_div(tmpdiv, strip_size); 3778 first_column = tmpdiv; 3779 tmpdiv = last_row_offset; 3780 (void) do_div(tmpdiv, strip_size); 3781 last_column = tmpdiv; 3782 #else 3783 first_row = first_block / blocks_per_row; 3784 last_row = last_block / blocks_per_row; 3785 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3786 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3787 first_column = first_row_offset / strip_size; 3788 last_column = last_row_offset / strip_size; 3789 #endif 3790 3791 /* if this isn't a single row/column then give to the controller */ 3792 if ((first_row != last_row) || (first_column != last_column)) 3793 return IO_ACCEL_INELIGIBLE; 3794 3795 /* proceeding with driver mapping */ 3796 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + 3797 le16_to_cpu(map->metadata_disks_per_row); 3798 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3799 le16_to_cpu(map->row_cnt); 3800 map_index = (map_row * total_disks_per_row) + first_column; 3801 3802 switch (dev->raid_level) { 3803 case HPSA_RAID_0: 3804 break; /* nothing special to do */ 3805 case HPSA_RAID_1: 3806 /* Handles load balance across RAID 1 members. 3807 * (2-drive R1 and R10 with even # of drives.) 3808 * Appropriate for SSDs, not optimal for HDDs 3809 */ 3810 BUG_ON(le16_to_cpu(map->layout_map_count) != 2); 3811 if (dev->offload_to_mirror) 3812 map_index += le16_to_cpu(map->data_disks_per_row); 3813 dev->offload_to_mirror = !dev->offload_to_mirror; 3814 break; 3815 case HPSA_RAID_ADM: 3816 /* Handles N-way mirrors (R1-ADM) 3817 * and R10 with # of drives divisible by 3.) 3818 */ 3819 BUG_ON(le16_to_cpu(map->layout_map_count) != 3); 3820 3821 offload_to_mirror = dev->offload_to_mirror; 3822 raid_map_helper(map, offload_to_mirror, 3823 &map_index, ¤t_group); 3824 /* set mirror group to use next time */ 3825 offload_to_mirror = 3826 (offload_to_mirror >= 3827 le16_to_cpu(map->layout_map_count) - 1) 3828 ? 0 : offload_to_mirror + 1; 3829 dev->offload_to_mirror = offload_to_mirror; 3830 /* Avoid direct use of dev->offload_to_mirror within this 3831 * function since multiple threads might simultaneously 3832 * increment it beyond the range of dev->layout_map_count -1. 3833 */ 3834 break; 3835 case HPSA_RAID_5: 3836 case HPSA_RAID_6: 3837 if (le16_to_cpu(map->layout_map_count) <= 1) 3838 break; 3839 3840 /* Verify first and last block are in same RAID group */ 3841 r5or6_blocks_per_row = 3842 le16_to_cpu(map->strip_size) * 3843 le16_to_cpu(map->data_disks_per_row); 3844 BUG_ON(r5or6_blocks_per_row == 0); 3845 stripesize = r5or6_blocks_per_row * 3846 le16_to_cpu(map->layout_map_count); 3847 #if BITS_PER_LONG == 32 3848 tmpdiv = first_block; 3849 first_group = do_div(tmpdiv, stripesize); 3850 tmpdiv = first_group; 3851 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3852 first_group = tmpdiv; 3853 tmpdiv = last_block; 3854 last_group = do_div(tmpdiv, stripesize); 3855 tmpdiv = last_group; 3856 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3857 last_group = tmpdiv; 3858 #else 3859 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 3860 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 3861 #endif 3862 if (first_group != last_group) 3863 return IO_ACCEL_INELIGIBLE; 3864 3865 /* Verify request is in a single row of RAID 5/6 */ 3866 #if BITS_PER_LONG == 32 3867 tmpdiv = first_block; 3868 (void) do_div(tmpdiv, stripesize); 3869 first_row = r5or6_first_row = r0_first_row = tmpdiv; 3870 tmpdiv = last_block; 3871 (void) do_div(tmpdiv, stripesize); 3872 r5or6_last_row = r0_last_row = tmpdiv; 3873 #else 3874 first_row = r5or6_first_row = r0_first_row = 3875 first_block / stripesize; 3876 r5or6_last_row = r0_last_row = last_block / stripesize; 3877 #endif 3878 if (r5or6_first_row != r5or6_last_row) 3879 return IO_ACCEL_INELIGIBLE; 3880 3881 3882 /* Verify request is in a single column */ 3883 #if BITS_PER_LONG == 32 3884 tmpdiv = first_block; 3885 first_row_offset = do_div(tmpdiv, stripesize); 3886 tmpdiv = first_row_offset; 3887 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); 3888 r5or6_first_row_offset = first_row_offset; 3889 tmpdiv = last_block; 3890 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 3891 tmpdiv = r5or6_last_row_offset; 3892 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 3893 tmpdiv = r5or6_first_row_offset; 3894 (void) do_div(tmpdiv, map->strip_size); 3895 first_column = r5or6_first_column = tmpdiv; 3896 tmpdiv = r5or6_last_row_offset; 3897 (void) do_div(tmpdiv, map->strip_size); 3898 r5or6_last_column = tmpdiv; 3899 #else 3900 first_row_offset = r5or6_first_row_offset = 3901 (u32)((first_block % stripesize) % 3902 r5or6_blocks_per_row); 3903 3904 r5or6_last_row_offset = 3905 (u32)((last_block % stripesize) % 3906 r5or6_blocks_per_row); 3907 3908 first_column = r5or6_first_column = 3909 r5or6_first_row_offset / le16_to_cpu(map->strip_size); 3910 r5or6_last_column = 3911 r5or6_last_row_offset / le16_to_cpu(map->strip_size); 3912 #endif 3913 if (r5or6_first_column != r5or6_last_column) 3914 return IO_ACCEL_INELIGIBLE; 3915 3916 /* Request is eligible */ 3917 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3918 le16_to_cpu(map->row_cnt); 3919 3920 map_index = (first_group * 3921 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + 3922 (map_row * total_disks_per_row) + first_column; 3923 break; 3924 default: 3925 return IO_ACCEL_INELIGIBLE; 3926 } 3927 3928 c->phys_disk = dev->phys_disk[map_index]; 3929 3930 disk_handle = dd[map_index].ioaccel_handle; 3931 disk_block = le64_to_cpu(map->disk_starting_blk) + 3932 first_row * le16_to_cpu(map->strip_size) + 3933 (first_row_offset - first_column * 3934 le16_to_cpu(map->strip_size)); 3935 disk_block_cnt = block_cnt; 3936 3937 /* handle differing logical/physical block sizes */ 3938 if (map->phys_blk_shift) { 3939 disk_block <<= map->phys_blk_shift; 3940 disk_block_cnt <<= map->phys_blk_shift; 3941 } 3942 BUG_ON(disk_block_cnt > 0xffff); 3943 3944 /* build the new CDB for the physical disk I/O */ 3945 if (disk_block > 0xffffffff) { 3946 cdb[0] = is_write ? WRITE_16 : READ_16; 3947 cdb[1] = 0; 3948 cdb[2] = (u8) (disk_block >> 56); 3949 cdb[3] = (u8) (disk_block >> 48); 3950 cdb[4] = (u8) (disk_block >> 40); 3951 cdb[5] = (u8) (disk_block >> 32); 3952 cdb[6] = (u8) (disk_block >> 24); 3953 cdb[7] = (u8) (disk_block >> 16); 3954 cdb[8] = (u8) (disk_block >> 8); 3955 cdb[9] = (u8) (disk_block); 3956 cdb[10] = (u8) (disk_block_cnt >> 24); 3957 cdb[11] = (u8) (disk_block_cnt >> 16); 3958 cdb[12] = (u8) (disk_block_cnt >> 8); 3959 cdb[13] = (u8) (disk_block_cnt); 3960 cdb[14] = 0; 3961 cdb[15] = 0; 3962 cdb_len = 16; 3963 } else { 3964 cdb[0] = is_write ? WRITE_10 : READ_10; 3965 cdb[1] = 0; 3966 cdb[2] = (u8) (disk_block >> 24); 3967 cdb[3] = (u8) (disk_block >> 16); 3968 cdb[4] = (u8) (disk_block >> 8); 3969 cdb[5] = (u8) (disk_block); 3970 cdb[6] = 0; 3971 cdb[7] = (u8) (disk_block_cnt >> 8); 3972 cdb[8] = (u8) (disk_block_cnt); 3973 cdb[9] = 0; 3974 cdb_len = 10; 3975 } 3976 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, 3977 dev->scsi3addr, 3978 dev->phys_disk[map_index]); 3979 } 3980 3981 /* Submit commands down the "normal" RAID stack path */ 3982 static int hpsa_ciss_submit(struct ctlr_info *h, 3983 struct CommandList *c, struct scsi_cmnd *cmd, 3984 unsigned char scsi3addr[]) 3985 { 3986 cmd->host_scribble = (unsigned char *) c; 3987 c->cmd_type = CMD_SCSI; 3988 c->scsi_cmd = cmd; 3989 c->Header.ReplyQueue = 0; /* unused in simple mode */ 3990 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 3991 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); 3992 3993 /* Fill in the request block... */ 3994 3995 c->Request.Timeout = 0; 3996 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 3997 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 3998 c->Request.CDBLen = cmd->cmd_len; 3999 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 4000 switch (cmd->sc_data_direction) { 4001 case DMA_TO_DEVICE: 4002 c->Request.type_attr_dir = 4003 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); 4004 break; 4005 case DMA_FROM_DEVICE: 4006 c->Request.type_attr_dir = 4007 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); 4008 break; 4009 case DMA_NONE: 4010 c->Request.type_attr_dir = 4011 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); 4012 break; 4013 case DMA_BIDIRECTIONAL: 4014 /* This can happen if a buggy application does a scsi passthru 4015 * and sets both inlen and outlen to non-zero. ( see 4016 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 4017 */ 4018 4019 c->Request.type_attr_dir = 4020 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); 4021 /* This is technically wrong, and hpsa controllers should 4022 * reject it with CMD_INVALID, which is the most correct 4023 * response, but non-fibre backends appear to let it 4024 * slide by, and give the same results as if this field 4025 * were set correctly. Either way is acceptable for 4026 * our purposes here. 4027 */ 4028 4029 break; 4030 4031 default: 4032 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 4033 cmd->sc_data_direction); 4034 BUG(); 4035 break; 4036 } 4037 4038 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 4039 cmd_free(h, c); 4040 return SCSI_MLQUEUE_HOST_BUSY; 4041 } 4042 enqueue_cmd_and_start_io(h, c); 4043 /* the cmd'll come back via intr handler in complete_scsi_command() */ 4044 return 0; 4045 } 4046 4047 static void hpsa_command_resubmit_worker(struct work_struct *work) 4048 { 4049 struct scsi_cmnd *cmd; 4050 struct hpsa_scsi_dev_t *dev; 4051 struct CommandList *c = 4052 container_of(work, struct CommandList, work); 4053 4054 cmd = c->scsi_cmd; 4055 dev = cmd->device->hostdata; 4056 if (!dev) { 4057 cmd->result = DID_NO_CONNECT << 16; 4058 cmd->scsi_done(cmd); 4059 return; 4060 } 4061 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { 4062 /* 4063 * If we get here, it means dma mapping failed. Try 4064 * again via scsi mid layer, which will then get 4065 * SCSI_MLQUEUE_HOST_BUSY. 4066 */ 4067 cmd->result = DID_IMM_RETRY << 16; 4068 cmd->scsi_done(cmd); 4069 } 4070 } 4071 4072 /* Running in struct Scsi_Host->host_lock less mode */ 4073 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 4074 { 4075 struct ctlr_info *h; 4076 struct hpsa_scsi_dev_t *dev; 4077 unsigned char scsi3addr[8]; 4078 struct CommandList *c; 4079 int rc = 0; 4080 4081 /* Get the ptr to our adapter structure out of cmd->host. */ 4082 h = sdev_to_hba(cmd->device); 4083 dev = cmd->device->hostdata; 4084 if (!dev) { 4085 cmd->result = DID_NO_CONNECT << 16; 4086 cmd->scsi_done(cmd); 4087 return 0; 4088 } 4089 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 4090 4091 if (unlikely(lockup_detected(h))) { 4092 cmd->result = DID_ERROR << 16; 4093 cmd->scsi_done(cmd); 4094 return 0; 4095 } 4096 c = cmd_alloc(h); 4097 if (c == NULL) { /* trouble... */ 4098 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 4099 return SCSI_MLQUEUE_HOST_BUSY; 4100 } 4101 4102 /* Call alternate submit routine for I/O accelerated commands. 4103 * Retries always go down the normal I/O path. 4104 */ 4105 if (likely(cmd->retries == 0 && 4106 cmd->request->cmd_type == REQ_TYPE_FS && 4107 h->acciopath_status)) { 4108 4109 cmd->host_scribble = (unsigned char *) c; 4110 c->cmd_type = CMD_SCSI; 4111 c->scsi_cmd = cmd; 4112 4113 if (dev->offload_enabled) { 4114 rc = hpsa_scsi_ioaccel_raid_map(h, c); 4115 if (rc == 0) 4116 return 0; /* Sent on ioaccel path */ 4117 if (rc < 0) { /* scsi_dma_map failed. */ 4118 cmd_free(h, c); 4119 return SCSI_MLQUEUE_HOST_BUSY; 4120 } 4121 } else if (dev->ioaccel_handle) { 4122 rc = hpsa_scsi_ioaccel_direct_map(h, c); 4123 if (rc == 0) 4124 return 0; /* Sent on direct map path */ 4125 if (rc < 0) { /* scsi_dma_map failed. */ 4126 cmd_free(h, c); 4127 return SCSI_MLQUEUE_HOST_BUSY; 4128 } 4129 } 4130 } 4131 return hpsa_ciss_submit(h, c, cmd, scsi3addr); 4132 } 4133 4134 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) 4135 { 4136 unsigned long flags; 4137 4138 /* 4139 * Don't let rescans be initiated on a controller known 4140 * to be locked up. If the controller locks up *during* 4141 * a rescan, that thread is probably hosed, but at least 4142 * we can prevent new rescan threads from piling up on a 4143 * locked up controller. 4144 */ 4145 if (unlikely(lockup_detected(h))) { 4146 spin_lock_irqsave(&h->scan_lock, flags); 4147 h->scan_finished = 1; 4148 wake_up_all(&h->scan_wait_queue); 4149 spin_unlock_irqrestore(&h->scan_lock, flags); 4150 return 1; 4151 } 4152 return 0; 4153 } 4154 4155 static void hpsa_scan_start(struct Scsi_Host *sh) 4156 { 4157 struct ctlr_info *h = shost_to_hba(sh); 4158 unsigned long flags; 4159 4160 if (do_not_scan_if_controller_locked_up(h)) 4161 return; 4162 4163 /* wait until any scan already in progress is finished. */ 4164 while (1) { 4165 spin_lock_irqsave(&h->scan_lock, flags); 4166 if (h->scan_finished) 4167 break; 4168 spin_unlock_irqrestore(&h->scan_lock, flags); 4169 wait_event(h->scan_wait_queue, h->scan_finished); 4170 /* Note: We don't need to worry about a race between this 4171 * thread and driver unload because the midlayer will 4172 * have incremented the reference count, so unload won't 4173 * happen if we're in here. 4174 */ 4175 } 4176 h->scan_finished = 0; /* mark scan as in progress */ 4177 spin_unlock_irqrestore(&h->scan_lock, flags); 4178 4179 if (do_not_scan_if_controller_locked_up(h)) 4180 return; 4181 4182 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 4183 4184 spin_lock_irqsave(&h->scan_lock, flags); 4185 h->scan_finished = 1; /* mark scan as finished. */ 4186 wake_up_all(&h->scan_wait_queue); 4187 spin_unlock_irqrestore(&h->scan_lock, flags); 4188 } 4189 4190 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) 4191 { 4192 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata; 4193 4194 if (!logical_drive) 4195 return -ENODEV; 4196 4197 if (qdepth < 1) 4198 qdepth = 1; 4199 else if (qdepth > logical_drive->queue_depth) 4200 qdepth = logical_drive->queue_depth; 4201 4202 return scsi_change_queue_depth(sdev, qdepth); 4203 } 4204 4205 static int hpsa_scan_finished(struct Scsi_Host *sh, 4206 unsigned long elapsed_time) 4207 { 4208 struct ctlr_info *h = shost_to_hba(sh); 4209 unsigned long flags; 4210 int finished; 4211 4212 spin_lock_irqsave(&h->scan_lock, flags); 4213 finished = h->scan_finished; 4214 spin_unlock_irqrestore(&h->scan_lock, flags); 4215 return finished; 4216 } 4217 4218 static void hpsa_unregister_scsi(struct ctlr_info *h) 4219 { 4220 /* we are being forcibly unloaded, and may not refuse. */ 4221 scsi_remove_host(h->scsi_host); 4222 scsi_host_put(h->scsi_host); 4223 h->scsi_host = NULL; 4224 } 4225 4226 static int hpsa_register_scsi(struct ctlr_info *h) 4227 { 4228 struct Scsi_Host *sh; 4229 int error; 4230 4231 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 4232 if (sh == NULL) 4233 goto fail; 4234 4235 sh->io_port = 0; 4236 sh->n_io_port = 0; 4237 sh->this_id = -1; 4238 sh->max_channel = 3; 4239 sh->max_cmd_len = MAX_COMMAND_SIZE; 4240 sh->max_lun = HPSA_MAX_LUN; 4241 sh->max_id = HPSA_MAX_LUN; 4242 sh->can_queue = h->nr_cmds - 4243 HPSA_CMDS_RESERVED_FOR_ABORTS - 4244 HPSA_CMDS_RESERVED_FOR_DRIVER - 4245 HPSA_MAX_CONCURRENT_PASSTHRUS; 4246 sh->cmd_per_lun = sh->can_queue; 4247 sh->sg_tablesize = h->maxsgentries; 4248 h->scsi_host = sh; 4249 sh->hostdata[0] = (unsigned long) h; 4250 sh->irq = h->intr[h->intr_mode]; 4251 sh->unique_id = sh->irq; 4252 error = scsi_add_host(sh, &h->pdev->dev); 4253 if (error) 4254 goto fail_host_put; 4255 scsi_scan_host(sh); 4256 return 0; 4257 4258 fail_host_put: 4259 dev_err(&h->pdev->dev, "%s: scsi_add_host" 4260 " failed for controller %d\n", __func__, h->ctlr); 4261 scsi_host_put(sh); 4262 return error; 4263 fail: 4264 dev_err(&h->pdev->dev, "%s: scsi_host_alloc" 4265 " failed for controller %d\n", __func__, h->ctlr); 4266 return -ENOMEM; 4267 } 4268 4269 static int wait_for_device_to_become_ready(struct ctlr_info *h, 4270 unsigned char lunaddr[]) 4271 { 4272 int rc; 4273 int count = 0; 4274 int waittime = 1; /* seconds */ 4275 struct CommandList *c; 4276 4277 c = cmd_alloc(h); 4278 if (!c) { 4279 dev_warn(&h->pdev->dev, "out of memory in " 4280 "wait_for_device_to_become_ready.\n"); 4281 return IO_ERROR; 4282 } 4283 4284 /* Send test unit ready until device ready, or give up. */ 4285 while (count < HPSA_TUR_RETRY_LIMIT) { 4286 4287 /* Wait for a bit. do this first, because if we send 4288 * the TUR right away, the reset will just abort it. 4289 */ 4290 msleep(1000 * waittime); 4291 count++; 4292 rc = 0; /* Device ready. */ 4293 4294 /* Increase wait time with each try, up to a point. */ 4295 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 4296 waittime = waittime * 2; 4297 4298 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ 4299 (void) fill_cmd(c, TEST_UNIT_READY, h, 4300 NULL, 0, 0, lunaddr, TYPE_CMD); 4301 hpsa_scsi_do_simple_cmd_core(h, c); 4302 /* no unmap needed here because no data xfer. */ 4303 4304 if (c->err_info->CommandStatus == CMD_SUCCESS) 4305 break; 4306 4307 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 4308 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 4309 (c->err_info->SenseInfo[2] == NO_SENSE || 4310 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 4311 break; 4312 4313 dev_warn(&h->pdev->dev, "waiting %d secs " 4314 "for device to become ready.\n", waittime); 4315 rc = 1; /* device not ready. */ 4316 } 4317 4318 if (rc) 4319 dev_warn(&h->pdev->dev, "giving up on device.\n"); 4320 else 4321 dev_warn(&h->pdev->dev, "device is ready.\n"); 4322 4323 cmd_free(h, c); 4324 return rc; 4325 } 4326 4327 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 4328 * complaining. Doing a host- or bus-reset can't do anything good here. 4329 */ 4330 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 4331 { 4332 int rc; 4333 struct ctlr_info *h; 4334 struct hpsa_scsi_dev_t *dev; 4335 4336 /* find the controller to which the command to be aborted was sent */ 4337 h = sdev_to_hba(scsicmd->device); 4338 if (h == NULL) /* paranoia */ 4339 return FAILED; 4340 dev = scsicmd->device->hostdata; 4341 if (!dev) { 4342 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 4343 "device lookup failed.\n"); 4344 return FAILED; 4345 } 4346 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 4347 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4348 /* send a reset to the SCSI LUN which the command was sent to */ 4349 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); 4350 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 4351 return SUCCESS; 4352 4353 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 4354 return FAILED; 4355 } 4356 4357 static void swizzle_abort_tag(u8 *tag) 4358 { 4359 u8 original_tag[8]; 4360 4361 memcpy(original_tag, tag, 8); 4362 tag[0] = original_tag[3]; 4363 tag[1] = original_tag[2]; 4364 tag[2] = original_tag[1]; 4365 tag[3] = original_tag[0]; 4366 tag[4] = original_tag[7]; 4367 tag[5] = original_tag[6]; 4368 tag[6] = original_tag[5]; 4369 tag[7] = original_tag[4]; 4370 } 4371 4372 static void hpsa_get_tag(struct ctlr_info *h, 4373 struct CommandList *c, __le32 *taglower, __le32 *tagupper) 4374 { 4375 u64 tag; 4376 if (c->cmd_type == CMD_IOACCEL1) { 4377 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 4378 &h->ioaccel_cmd_pool[c->cmdindex]; 4379 tag = le64_to_cpu(cm1->tag); 4380 *tagupper = cpu_to_le32(tag >> 32); 4381 *taglower = cpu_to_le32(tag); 4382 return; 4383 } 4384 if (c->cmd_type == CMD_IOACCEL2) { 4385 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) 4386 &h->ioaccel2_cmd_pool[c->cmdindex]; 4387 /* upper tag not used in ioaccel2 mode */ 4388 memset(tagupper, 0, sizeof(*tagupper)); 4389 *taglower = cm2->Tag; 4390 return; 4391 } 4392 tag = le64_to_cpu(c->Header.tag); 4393 *tagupper = cpu_to_le32(tag >> 32); 4394 *taglower = cpu_to_le32(tag); 4395 } 4396 4397 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 4398 struct CommandList *abort, int swizzle) 4399 { 4400 int rc = IO_OK; 4401 struct CommandList *c; 4402 struct ErrorInfo *ei; 4403 __le32 tagupper, taglower; 4404 4405 c = cmd_alloc(h); 4406 if (c == NULL) { /* trouble... */ 4407 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 4408 return -ENOMEM; 4409 } 4410 4411 /* fill_cmd can't fail here, no buffer to map */ 4412 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, 4413 0, 0, scsi3addr, TYPE_MSG); 4414 if (swizzle) 4415 swizzle_abort_tag(&c->Request.CDB[4]); 4416 hpsa_scsi_do_simple_cmd_core(h, c); 4417 hpsa_get_tag(h, abort, &taglower, &tagupper); 4418 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", 4419 __func__, tagupper, taglower); 4420 /* no unmap needed here because no data xfer. */ 4421 4422 ei = c->err_info; 4423 switch (ei->CommandStatus) { 4424 case CMD_SUCCESS: 4425 break; 4426 case CMD_UNABORTABLE: /* Very common, don't make noise. */ 4427 rc = -1; 4428 break; 4429 default: 4430 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", 4431 __func__, tagupper, taglower); 4432 hpsa_scsi_interpret_error(h, c); 4433 rc = -1; 4434 break; 4435 } 4436 cmd_free(h, c); 4437 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", 4438 __func__, tagupper, taglower); 4439 return rc; 4440 } 4441 4442 /* ioaccel2 path firmware cannot handle abort task requests. 4443 * Change abort requests to physical target reset, and send to the 4444 * address of the physical disk used for the ioaccel 2 command. 4445 * Return 0 on success (IO_OK) 4446 * -1 on failure 4447 */ 4448 4449 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, 4450 unsigned char *scsi3addr, struct CommandList *abort) 4451 { 4452 int rc = IO_OK; 4453 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 4454 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ 4455 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ 4456 unsigned char *psa = &phys_scsi3addr[0]; 4457 4458 /* Get a pointer to the hpsa logical device. */ 4459 scmd = (struct scsi_cmnd *) abort->scsi_cmd; 4460 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); 4461 if (dev == NULL) { 4462 dev_warn(&h->pdev->dev, 4463 "Cannot abort: no device pointer for command.\n"); 4464 return -1; /* not abortable */ 4465 } 4466 4467 if (h->raid_offload_debug > 0) 4468 dev_info(&h->pdev->dev, 4469 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4470 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 4471 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], 4472 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); 4473 4474 if (!dev->offload_enabled) { 4475 dev_warn(&h->pdev->dev, 4476 "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); 4477 return -1; /* not abortable */ 4478 } 4479 4480 /* Incoming scsi3addr is logical addr. We need physical disk addr. */ 4481 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { 4482 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); 4483 return -1; /* not abortable */ 4484 } 4485 4486 /* send the reset */ 4487 if (h->raid_offload_debug > 0) 4488 dev_info(&h->pdev->dev, 4489 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4490 psa[0], psa[1], psa[2], psa[3], 4491 psa[4], psa[5], psa[6], psa[7]); 4492 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); 4493 if (rc != 0) { 4494 dev_warn(&h->pdev->dev, 4495 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4496 psa[0], psa[1], psa[2], psa[3], 4497 psa[4], psa[5], psa[6], psa[7]); 4498 return rc; /* failed to reset */ 4499 } 4500 4501 /* wait for device to recover */ 4502 if (wait_for_device_to_become_ready(h, psa) != 0) { 4503 dev_warn(&h->pdev->dev, 4504 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4505 psa[0], psa[1], psa[2], psa[3], 4506 psa[4], psa[5], psa[6], psa[7]); 4507 return -1; /* failed to recover */ 4508 } 4509 4510 /* device recovered */ 4511 dev_info(&h->pdev->dev, 4512 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4513 psa[0], psa[1], psa[2], psa[3], 4514 psa[4], psa[5], psa[6], psa[7]); 4515 4516 return rc; /* success */ 4517 } 4518 4519 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to 4520 * tell which kind we're dealing with, so we send the abort both ways. There 4521 * shouldn't be any collisions between swizzled and unswizzled tags due to the 4522 * way we construct our tags but we check anyway in case the assumptions which 4523 * make this true someday become false. 4524 */ 4525 static int hpsa_send_abort_both_ways(struct ctlr_info *h, 4526 unsigned char *scsi3addr, struct CommandList *abort) 4527 { 4528 /* ioccelerator mode 2 commands should be aborted via the 4529 * accelerated path, since RAID path is unaware of these commands, 4530 * but underlying firmware can't handle abort TMF. 4531 * Change abort to physical device reset. 4532 */ 4533 if (abort->cmd_type == CMD_IOACCEL2) 4534 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); 4535 4536 return hpsa_send_abort(h, scsi3addr, abort, 0) && 4537 hpsa_send_abort(h, scsi3addr, abort, 1); 4538 } 4539 4540 /* Send an abort for the specified command. 4541 * If the device and controller support it, 4542 * send a task abort request. 4543 */ 4544 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) 4545 { 4546 4547 int i, rc; 4548 struct ctlr_info *h; 4549 struct hpsa_scsi_dev_t *dev; 4550 struct CommandList *abort; /* pointer to command to be aborted */ 4551 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ 4552 char msg[256]; /* For debug messaging. */ 4553 int ml = 0; 4554 __le32 tagupper, taglower; 4555 int refcount; 4556 4557 /* Find the controller of the command to be aborted */ 4558 h = sdev_to_hba(sc->device); 4559 if (WARN(h == NULL, 4560 "ABORT REQUEST FAILED, Controller lookup failed.\n")) 4561 return FAILED; 4562 4563 /* Check that controller supports some kind of task abort */ 4564 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && 4565 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 4566 return FAILED; 4567 4568 memset(msg, 0, sizeof(msg)); 4569 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ", 4570 h->scsi_host->host_no, sc->device->channel, 4571 sc->device->id, sc->device->lun); 4572 4573 /* Find the device of the command to be aborted */ 4574 dev = sc->device->hostdata; 4575 if (!dev) { 4576 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", 4577 msg); 4578 return FAILED; 4579 } 4580 4581 /* Get SCSI command to be aborted */ 4582 abort = (struct CommandList *) sc->host_scribble; 4583 if (abort == NULL) { 4584 /* This can happen if the command already completed. */ 4585 return SUCCESS; 4586 } 4587 refcount = atomic_inc_return(&abort->refcount); 4588 if (refcount == 1) { /* Command is done already. */ 4589 cmd_free(h, abort); 4590 return SUCCESS; 4591 } 4592 hpsa_get_tag(h, abort, &taglower, &tagupper); 4593 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); 4594 as = (struct scsi_cmnd *) abort->scsi_cmd; 4595 if (as != NULL) 4596 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", 4597 as->cmnd[0], as->serial_number); 4598 dev_dbg(&h->pdev->dev, "%s\n", msg); 4599 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", 4600 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4601 /* 4602 * Command is in flight, or possibly already completed 4603 * by the firmware (but not to the scsi mid layer) but we can't 4604 * distinguish which. Send the abort down. 4605 */ 4606 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); 4607 if (rc != 0) { 4608 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); 4609 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", 4610 h->scsi_host->host_no, 4611 dev->bus, dev->target, dev->lun); 4612 cmd_free(h, abort); 4613 return FAILED; 4614 } 4615 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); 4616 4617 /* If the abort(s) above completed and actually aborted the 4618 * command, then the command to be aborted should already be 4619 * completed. If not, wait around a bit more to see if they 4620 * manage to complete normally. 4621 */ 4622 #define ABORT_COMPLETE_WAIT_SECS 30 4623 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { 4624 refcount = atomic_read(&abort->refcount); 4625 if (refcount < 2) { 4626 cmd_free(h, abort); 4627 return SUCCESS; 4628 } else { 4629 msleep(100); 4630 } 4631 } 4632 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", 4633 msg, ABORT_COMPLETE_WAIT_SECS); 4634 cmd_free(h, abort); 4635 return FAILED; 4636 } 4637 4638 /* 4639 * For operations that cannot sleep, a command block is allocated at init, 4640 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 4641 * which ones are free or in use. Lock must be held when calling this. 4642 * cmd_free() is the complement. 4643 */ 4644 4645 static struct CommandList *cmd_alloc(struct ctlr_info *h) 4646 { 4647 struct CommandList *c; 4648 int i; 4649 union u64bit temp64; 4650 dma_addr_t cmd_dma_handle, err_dma_handle; 4651 int refcount; 4652 unsigned long offset; 4653 4654 /* 4655 * There is some *extremely* small but non-zero chance that that 4656 * multiple threads could get in here, and one thread could 4657 * be scanning through the list of bits looking for a free 4658 * one, but the free ones are always behind him, and other 4659 * threads sneak in behind him and eat them before he can 4660 * get to them, so that while there is always a free one, a 4661 * very unlucky thread might be starved anyway, never able to 4662 * beat the other threads. In reality, this happens so 4663 * infrequently as to be indistinguishable from never. 4664 */ 4665 4666 offset = h->last_allocation; /* benignly racy */ 4667 for (;;) { 4668 i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset); 4669 if (unlikely(i == h->nr_cmds)) { 4670 offset = 0; 4671 continue; 4672 } 4673 c = h->cmd_pool + i; 4674 refcount = atomic_inc_return(&c->refcount); 4675 if (unlikely(refcount > 1)) { 4676 cmd_free(h, c); /* already in use */ 4677 offset = (i + 1) % h->nr_cmds; 4678 continue; 4679 } 4680 set_bit(i & (BITS_PER_LONG - 1), 4681 h->cmd_pool_bits + (i / BITS_PER_LONG)); 4682 break; /* it's ours now. */ 4683 } 4684 h->last_allocation = i; /* benignly racy */ 4685 4686 /* Zero out all of commandlist except the last field, refcount */ 4687 memset(c, 0, offsetof(struct CommandList, refcount)); 4688 c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT)); 4689 cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c); 4690 c->err_info = h->errinfo_pool + i; 4691 memset(c->err_info, 0, sizeof(*c->err_info)); 4692 err_dma_handle = h->errinfo_pool_dhandle 4693 + i * sizeof(*c->err_info); 4694 4695 c->cmdindex = i; 4696 4697 c->busaddr = (u32) cmd_dma_handle; 4698 temp64.val = (u64) err_dma_handle; 4699 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); 4700 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); 4701 4702 c->h = h; 4703 return c; 4704 } 4705 4706 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 4707 { 4708 if (atomic_dec_and_test(&c->refcount)) { 4709 int i; 4710 4711 i = c - h->cmd_pool; 4712 clear_bit(i & (BITS_PER_LONG - 1), 4713 h->cmd_pool_bits + (i / BITS_PER_LONG)); 4714 } 4715 } 4716 4717 #ifdef CONFIG_COMPAT 4718 4719 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, 4720 void __user *arg) 4721 { 4722 IOCTL32_Command_struct __user *arg32 = 4723 (IOCTL32_Command_struct __user *) arg; 4724 IOCTL_Command_struct arg64; 4725 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 4726 int err; 4727 u32 cp; 4728 4729 memset(&arg64, 0, sizeof(arg64)); 4730 err = 0; 4731 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4732 sizeof(arg64.LUN_info)); 4733 err |= copy_from_user(&arg64.Request, &arg32->Request, 4734 sizeof(arg64.Request)); 4735 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4736 sizeof(arg64.error_info)); 4737 err |= get_user(arg64.buf_size, &arg32->buf_size); 4738 err |= get_user(cp, &arg32->buf); 4739 arg64.buf = compat_ptr(cp); 4740 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4741 4742 if (err) 4743 return -EFAULT; 4744 4745 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p); 4746 if (err) 4747 return err; 4748 err |= copy_in_user(&arg32->error_info, &p->error_info, 4749 sizeof(arg32->error_info)); 4750 if (err) 4751 return -EFAULT; 4752 return err; 4753 } 4754 4755 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 4756 int cmd, void __user *arg) 4757 { 4758 BIG_IOCTL32_Command_struct __user *arg32 = 4759 (BIG_IOCTL32_Command_struct __user *) arg; 4760 BIG_IOCTL_Command_struct arg64; 4761 BIG_IOCTL_Command_struct __user *p = 4762 compat_alloc_user_space(sizeof(arg64)); 4763 int err; 4764 u32 cp; 4765 4766 memset(&arg64, 0, sizeof(arg64)); 4767 err = 0; 4768 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4769 sizeof(arg64.LUN_info)); 4770 err |= copy_from_user(&arg64.Request, &arg32->Request, 4771 sizeof(arg64.Request)); 4772 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4773 sizeof(arg64.error_info)); 4774 err |= get_user(arg64.buf_size, &arg32->buf_size); 4775 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 4776 err |= get_user(cp, &arg32->buf); 4777 arg64.buf = compat_ptr(cp); 4778 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4779 4780 if (err) 4781 return -EFAULT; 4782 4783 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p); 4784 if (err) 4785 return err; 4786 err |= copy_in_user(&arg32->error_info, &p->error_info, 4787 sizeof(arg32->error_info)); 4788 if (err) 4789 return -EFAULT; 4790 return err; 4791 } 4792 4793 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 4794 { 4795 switch (cmd) { 4796 case CCISS_GETPCIINFO: 4797 case CCISS_GETINTINFO: 4798 case CCISS_SETINTINFO: 4799 case CCISS_GETNODENAME: 4800 case CCISS_SETNODENAME: 4801 case CCISS_GETHEARTBEAT: 4802 case CCISS_GETBUSTYPES: 4803 case CCISS_GETFIRMVER: 4804 case CCISS_GETDRIVVER: 4805 case CCISS_REVALIDVOLS: 4806 case CCISS_DEREGDISK: 4807 case CCISS_REGNEWDISK: 4808 case CCISS_REGNEWD: 4809 case CCISS_RESCANDISK: 4810 case CCISS_GETLUNINFO: 4811 return hpsa_ioctl(dev, cmd, arg); 4812 4813 case CCISS_PASSTHRU32: 4814 return hpsa_ioctl32_passthru(dev, cmd, arg); 4815 case CCISS_BIG_PASSTHRU32: 4816 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 4817 4818 default: 4819 return -ENOIOCTLCMD; 4820 } 4821 } 4822 #endif 4823 4824 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 4825 { 4826 struct hpsa_pci_info pciinfo; 4827 4828 if (!argp) 4829 return -EINVAL; 4830 pciinfo.domain = pci_domain_nr(h->pdev->bus); 4831 pciinfo.bus = h->pdev->bus->number; 4832 pciinfo.dev_fn = h->pdev->devfn; 4833 pciinfo.board_id = h->board_id; 4834 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 4835 return -EFAULT; 4836 return 0; 4837 } 4838 4839 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 4840 { 4841 DriverVer_type DriverVer; 4842 unsigned char vmaj, vmin, vsubmin; 4843 int rc; 4844 4845 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 4846 &vmaj, &vmin, &vsubmin); 4847 if (rc != 3) { 4848 dev_info(&h->pdev->dev, "driver version string '%s' " 4849 "unrecognized.", HPSA_DRIVER_VERSION); 4850 vmaj = 0; 4851 vmin = 0; 4852 vsubmin = 0; 4853 } 4854 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 4855 if (!argp) 4856 return -EINVAL; 4857 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 4858 return -EFAULT; 4859 return 0; 4860 } 4861 4862 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 4863 { 4864 IOCTL_Command_struct iocommand; 4865 struct CommandList *c; 4866 char *buff = NULL; 4867 u64 temp64; 4868 int rc = 0; 4869 4870 if (!argp) 4871 return -EINVAL; 4872 if (!capable(CAP_SYS_RAWIO)) 4873 return -EPERM; 4874 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 4875 return -EFAULT; 4876 if ((iocommand.buf_size < 1) && 4877 (iocommand.Request.Type.Direction != XFER_NONE)) { 4878 return -EINVAL; 4879 } 4880 if (iocommand.buf_size > 0) { 4881 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 4882 if (buff == NULL) 4883 return -EFAULT; 4884 if (iocommand.Request.Type.Direction & XFER_WRITE) { 4885 /* Copy the data into the buffer we created */ 4886 if (copy_from_user(buff, iocommand.buf, 4887 iocommand.buf_size)) { 4888 rc = -EFAULT; 4889 goto out_kfree; 4890 } 4891 } else { 4892 memset(buff, 0, iocommand.buf_size); 4893 } 4894 } 4895 c = cmd_alloc(h); 4896 if (c == NULL) { 4897 rc = -ENOMEM; 4898 goto out_kfree; 4899 } 4900 /* Fill in the command type */ 4901 c->cmd_type = CMD_IOCTL_PEND; 4902 /* Fill in Command Header */ 4903 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4904 if (iocommand.buf_size > 0) { /* buffer to fill */ 4905 c->Header.SGList = 1; 4906 c->Header.SGTotal = cpu_to_le16(1); 4907 } else { /* no buffers to fill */ 4908 c->Header.SGList = 0; 4909 c->Header.SGTotal = cpu_to_le16(0); 4910 } 4911 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 4912 4913 /* Fill in Request block */ 4914 memcpy(&c->Request, &iocommand.Request, 4915 sizeof(c->Request)); 4916 4917 /* Fill in the scatter gather information */ 4918 if (iocommand.buf_size > 0) { 4919 temp64 = pci_map_single(h->pdev, buff, 4920 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 4921 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { 4922 c->SG[0].Addr = cpu_to_le64(0); 4923 c->SG[0].Len = cpu_to_le32(0); 4924 rc = -ENOMEM; 4925 goto out; 4926 } 4927 c->SG[0].Addr = cpu_to_le64(temp64); 4928 c->SG[0].Len = cpu_to_le32(iocommand.buf_size); 4929 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ 4930 } 4931 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 4932 if (iocommand.buf_size > 0) 4933 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 4934 check_ioctl_unit_attention(h, c); 4935 4936 /* Copy the error information out */ 4937 memcpy(&iocommand.error_info, c->err_info, 4938 sizeof(iocommand.error_info)); 4939 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 4940 rc = -EFAULT; 4941 goto out; 4942 } 4943 if ((iocommand.Request.Type.Direction & XFER_READ) && 4944 iocommand.buf_size > 0) { 4945 /* Copy the data out of the buffer we created */ 4946 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 4947 rc = -EFAULT; 4948 goto out; 4949 } 4950 } 4951 out: 4952 cmd_free(h, c); 4953 out_kfree: 4954 kfree(buff); 4955 return rc; 4956 } 4957 4958 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 4959 { 4960 BIG_IOCTL_Command_struct *ioc; 4961 struct CommandList *c; 4962 unsigned char **buff = NULL; 4963 int *buff_size = NULL; 4964 u64 temp64; 4965 BYTE sg_used = 0; 4966 int status = 0; 4967 u32 left; 4968 u32 sz; 4969 BYTE __user *data_ptr; 4970 4971 if (!argp) 4972 return -EINVAL; 4973 if (!capable(CAP_SYS_RAWIO)) 4974 return -EPERM; 4975 ioc = (BIG_IOCTL_Command_struct *) 4976 kmalloc(sizeof(*ioc), GFP_KERNEL); 4977 if (!ioc) { 4978 status = -ENOMEM; 4979 goto cleanup1; 4980 } 4981 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 4982 status = -EFAULT; 4983 goto cleanup1; 4984 } 4985 if ((ioc->buf_size < 1) && 4986 (ioc->Request.Type.Direction != XFER_NONE)) { 4987 status = -EINVAL; 4988 goto cleanup1; 4989 } 4990 /* Check kmalloc limits using all SGs */ 4991 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 4992 status = -EINVAL; 4993 goto cleanup1; 4994 } 4995 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { 4996 status = -EINVAL; 4997 goto cleanup1; 4998 } 4999 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL); 5000 if (!buff) { 5001 status = -ENOMEM; 5002 goto cleanup1; 5003 } 5004 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL); 5005 if (!buff_size) { 5006 status = -ENOMEM; 5007 goto cleanup1; 5008 } 5009 left = ioc->buf_size; 5010 data_ptr = ioc->buf; 5011 while (left) { 5012 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 5013 buff_size[sg_used] = sz; 5014 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 5015 if (buff[sg_used] == NULL) { 5016 status = -ENOMEM; 5017 goto cleanup1; 5018 } 5019 if (ioc->Request.Type.Direction & XFER_WRITE) { 5020 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 5021 status = -EFAULT; 5022 goto cleanup1; 5023 } 5024 } else 5025 memset(buff[sg_used], 0, sz); 5026 left -= sz; 5027 data_ptr += sz; 5028 sg_used++; 5029 } 5030 c = cmd_alloc(h); 5031 if (c == NULL) { 5032 status = -ENOMEM; 5033 goto cleanup1; 5034 } 5035 c->cmd_type = CMD_IOCTL_PEND; 5036 c->Header.ReplyQueue = 0; 5037 c->Header.SGList = (u8) sg_used; 5038 c->Header.SGTotal = cpu_to_le16(sg_used); 5039 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 5040 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 5041 if (ioc->buf_size > 0) { 5042 int i; 5043 for (i = 0; i < sg_used; i++) { 5044 temp64 = pci_map_single(h->pdev, buff[i], 5045 buff_size[i], PCI_DMA_BIDIRECTIONAL); 5046 if (dma_mapping_error(&h->pdev->dev, 5047 (dma_addr_t) temp64)) { 5048 c->SG[i].Addr = cpu_to_le64(0); 5049 c->SG[i].Len = cpu_to_le32(0); 5050 hpsa_pci_unmap(h->pdev, c, i, 5051 PCI_DMA_BIDIRECTIONAL); 5052 status = -ENOMEM; 5053 goto cleanup0; 5054 } 5055 c->SG[i].Addr = cpu_to_le64(temp64); 5056 c->SG[i].Len = cpu_to_le32(buff_size[i]); 5057 c->SG[i].Ext = cpu_to_le32(0); 5058 } 5059 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); 5060 } 5061 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 5062 if (sg_used) 5063 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 5064 check_ioctl_unit_attention(h, c); 5065 /* Copy the error information out */ 5066 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 5067 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 5068 status = -EFAULT; 5069 goto cleanup0; 5070 } 5071 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { 5072 int i; 5073 5074 /* Copy the data out of the buffer we created */ 5075 BYTE __user *ptr = ioc->buf; 5076 for (i = 0; i < sg_used; i++) { 5077 if (copy_to_user(ptr, buff[i], buff_size[i])) { 5078 status = -EFAULT; 5079 goto cleanup0; 5080 } 5081 ptr += buff_size[i]; 5082 } 5083 } 5084 status = 0; 5085 cleanup0: 5086 cmd_free(h, c); 5087 cleanup1: 5088 if (buff) { 5089 int i; 5090 5091 for (i = 0; i < sg_used; i++) 5092 kfree(buff[i]); 5093 kfree(buff); 5094 } 5095 kfree(buff_size); 5096 kfree(ioc); 5097 return status; 5098 } 5099 5100 static void check_ioctl_unit_attention(struct ctlr_info *h, 5101 struct CommandList *c) 5102 { 5103 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 5104 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 5105 (void) check_for_unit_attention(h, c); 5106 } 5107 5108 static int increment_passthru_count(struct ctlr_info *h) 5109 { 5110 unsigned long flags; 5111 5112 spin_lock_irqsave(&h->passthru_count_lock, flags); 5113 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { 5114 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5115 return -1; 5116 } 5117 h->passthru_count++; 5118 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5119 return 0; 5120 } 5121 5122 static void decrement_passthru_count(struct ctlr_info *h) 5123 { 5124 unsigned long flags; 5125 5126 spin_lock_irqsave(&h->passthru_count_lock, flags); 5127 if (h->passthru_count <= 0) { 5128 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5129 /* not expecting to get here. */ 5130 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); 5131 return; 5132 } 5133 h->passthru_count--; 5134 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5135 } 5136 5137 /* 5138 * ioctl 5139 */ 5140 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) 5141 { 5142 struct ctlr_info *h; 5143 void __user *argp = (void __user *)arg; 5144 int rc; 5145 5146 h = sdev_to_hba(dev); 5147 5148 switch (cmd) { 5149 case CCISS_DEREGDISK: 5150 case CCISS_REGNEWDISK: 5151 case CCISS_REGNEWD: 5152 hpsa_scan_start(h->scsi_host); 5153 return 0; 5154 case CCISS_GETPCIINFO: 5155 return hpsa_getpciinfo_ioctl(h, argp); 5156 case CCISS_GETDRIVVER: 5157 return hpsa_getdrivver_ioctl(h, argp); 5158 case CCISS_PASSTHRU: 5159 if (increment_passthru_count(h)) 5160 return -EAGAIN; 5161 rc = hpsa_passthru_ioctl(h, argp); 5162 decrement_passthru_count(h); 5163 return rc; 5164 case CCISS_BIG_PASSTHRU: 5165 if (increment_passthru_count(h)) 5166 return -EAGAIN; 5167 rc = hpsa_big_passthru_ioctl(h, argp); 5168 decrement_passthru_count(h); 5169 return rc; 5170 default: 5171 return -ENOTTY; 5172 } 5173 } 5174 5175 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, 5176 u8 reset_type) 5177 { 5178 struct CommandList *c; 5179 5180 c = cmd_alloc(h); 5181 if (!c) 5182 return -ENOMEM; 5183 /* fill_cmd can't fail here, no data buffer to map */ 5184 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 5185 RAID_CTLR_LUNID, TYPE_MSG); 5186 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 5187 c->waiting = NULL; 5188 enqueue_cmd_and_start_io(h, c); 5189 /* Don't wait for completion, the reset won't complete. Don't free 5190 * the command either. This is the last command we will send before 5191 * re-initializing everything, so it doesn't matter and won't leak. 5192 */ 5193 return 0; 5194 } 5195 5196 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 5197 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 5198 int cmd_type) 5199 { 5200 int pci_dir = XFER_NONE; 5201 struct CommandList *a; /* for commands to be aborted */ 5202 5203 c->cmd_type = CMD_IOCTL_PEND; 5204 c->Header.ReplyQueue = 0; 5205 if (buff != NULL && size > 0) { 5206 c->Header.SGList = 1; 5207 c->Header.SGTotal = cpu_to_le16(1); 5208 } else { 5209 c->Header.SGList = 0; 5210 c->Header.SGTotal = cpu_to_le16(0); 5211 } 5212 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 5213 5214 if (cmd_type == TYPE_CMD) { 5215 switch (cmd) { 5216 case HPSA_INQUIRY: 5217 /* are we trying to read a vital product page */ 5218 if (page_code & VPD_PAGE) { 5219 c->Request.CDB[1] = 0x01; 5220 c->Request.CDB[2] = (page_code & 0xff); 5221 } 5222 c->Request.CDBLen = 6; 5223 c->Request.type_attr_dir = 5224 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5225 c->Request.Timeout = 0; 5226 c->Request.CDB[0] = HPSA_INQUIRY; 5227 c->Request.CDB[4] = size & 0xFF; 5228 break; 5229 case HPSA_REPORT_LOG: 5230 case HPSA_REPORT_PHYS: 5231 /* Talking to controller so It's a physical command 5232 mode = 00 target = 0. Nothing to write. 5233 */ 5234 c->Request.CDBLen = 12; 5235 c->Request.type_attr_dir = 5236 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5237 c->Request.Timeout = 0; 5238 c->Request.CDB[0] = cmd; 5239 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5240 c->Request.CDB[7] = (size >> 16) & 0xFF; 5241 c->Request.CDB[8] = (size >> 8) & 0xFF; 5242 c->Request.CDB[9] = size & 0xFF; 5243 break; 5244 case HPSA_CACHE_FLUSH: 5245 c->Request.CDBLen = 12; 5246 c->Request.type_attr_dir = 5247 TYPE_ATTR_DIR(cmd_type, 5248 ATTR_SIMPLE, XFER_WRITE); 5249 c->Request.Timeout = 0; 5250 c->Request.CDB[0] = BMIC_WRITE; 5251 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 5252 c->Request.CDB[7] = (size >> 8) & 0xFF; 5253 c->Request.CDB[8] = size & 0xFF; 5254 break; 5255 case TEST_UNIT_READY: 5256 c->Request.CDBLen = 6; 5257 c->Request.type_attr_dir = 5258 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 5259 c->Request.Timeout = 0; 5260 break; 5261 case HPSA_GET_RAID_MAP: 5262 c->Request.CDBLen = 12; 5263 c->Request.type_attr_dir = 5264 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5265 c->Request.Timeout = 0; 5266 c->Request.CDB[0] = HPSA_CISS_READ; 5267 c->Request.CDB[1] = cmd; 5268 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5269 c->Request.CDB[7] = (size >> 16) & 0xFF; 5270 c->Request.CDB[8] = (size >> 8) & 0xFF; 5271 c->Request.CDB[9] = size & 0xFF; 5272 break; 5273 case BMIC_SENSE_CONTROLLER_PARAMETERS: 5274 c->Request.CDBLen = 10; 5275 c->Request.type_attr_dir = 5276 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5277 c->Request.Timeout = 0; 5278 c->Request.CDB[0] = BMIC_READ; 5279 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; 5280 c->Request.CDB[7] = (size >> 16) & 0xFF; 5281 c->Request.CDB[8] = (size >> 8) & 0xFF; 5282 break; 5283 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 5284 c->Request.CDBLen = 10; 5285 c->Request.type_attr_dir = 5286 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); 5287 c->Request.Timeout = 0; 5288 c->Request.CDB[0] = BMIC_READ; 5289 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; 5290 c->Request.CDB[7] = (size >> 16) & 0xFF; 5291 c->Request.CDB[8] = (size >> 8) & 0XFF; 5292 break; 5293 default: 5294 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 5295 BUG(); 5296 return -1; 5297 } 5298 } else if (cmd_type == TYPE_MSG) { 5299 switch (cmd) { 5300 5301 case HPSA_DEVICE_RESET_MSG: 5302 c->Request.CDBLen = 16; 5303 c->Request.type_attr_dir = 5304 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); 5305 c->Request.Timeout = 0; /* Don't time out */ 5306 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 5307 c->Request.CDB[0] = cmd; 5308 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; 5309 /* If bytes 4-7 are zero, it means reset the */ 5310 /* LunID device */ 5311 c->Request.CDB[4] = 0x00; 5312 c->Request.CDB[5] = 0x00; 5313 c->Request.CDB[6] = 0x00; 5314 c->Request.CDB[7] = 0x00; 5315 break; 5316 case HPSA_ABORT_MSG: 5317 a = buff; /* point to command to be aborted */ 5318 dev_dbg(&h->pdev->dev, 5319 "Abort Tag:0x%016llx request Tag:0x%016llx", 5320 a->Header.tag, c->Header.tag); 5321 c->Request.CDBLen = 16; 5322 c->Request.type_attr_dir = 5323 TYPE_ATTR_DIR(cmd_type, 5324 ATTR_SIMPLE, XFER_WRITE); 5325 c->Request.Timeout = 0; /* Don't time out */ 5326 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; 5327 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; 5328 c->Request.CDB[2] = 0x00; /* reserved */ 5329 c->Request.CDB[3] = 0x00; /* reserved */ 5330 /* Tag to abort goes in CDB[4]-CDB[11] */ 5331 memcpy(&c->Request.CDB[4], &a->Header.tag, 5332 sizeof(a->Header.tag)); 5333 c->Request.CDB[12] = 0x00; /* reserved */ 5334 c->Request.CDB[13] = 0x00; /* reserved */ 5335 c->Request.CDB[14] = 0x00; /* reserved */ 5336 c->Request.CDB[15] = 0x00; /* reserved */ 5337 break; 5338 default: 5339 dev_warn(&h->pdev->dev, "unknown message type %d\n", 5340 cmd); 5341 BUG(); 5342 } 5343 } else { 5344 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 5345 BUG(); 5346 } 5347 5348 switch (GET_DIR(c->Request.type_attr_dir)) { 5349 case XFER_READ: 5350 pci_dir = PCI_DMA_FROMDEVICE; 5351 break; 5352 case XFER_WRITE: 5353 pci_dir = PCI_DMA_TODEVICE; 5354 break; 5355 case XFER_NONE: 5356 pci_dir = PCI_DMA_NONE; 5357 break; 5358 default: 5359 pci_dir = PCI_DMA_BIDIRECTIONAL; 5360 } 5361 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) 5362 return -1; 5363 return 0; 5364 } 5365 5366 /* 5367 * Map (physical) PCI mem into (virtual) kernel space 5368 */ 5369 static void __iomem *remap_pci_mem(ulong base, ulong size) 5370 { 5371 ulong page_base = ((ulong) base) & PAGE_MASK; 5372 ulong page_offs = ((ulong) base) - page_base; 5373 void __iomem *page_remapped = ioremap_nocache(page_base, 5374 page_offs + size); 5375 5376 return page_remapped ? (page_remapped + page_offs) : NULL; 5377 } 5378 5379 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) 5380 { 5381 return h->access.command_completed(h, q); 5382 } 5383 5384 static inline bool interrupt_pending(struct ctlr_info *h) 5385 { 5386 return h->access.intr_pending(h); 5387 } 5388 5389 static inline long interrupt_not_for_us(struct ctlr_info *h) 5390 { 5391 return (h->access.intr_pending(h) == 0) || 5392 (h->interrupts_enabled == 0); 5393 } 5394 5395 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 5396 u32 raw_tag) 5397 { 5398 if (unlikely(tag_index >= h->nr_cmds)) { 5399 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 5400 return 1; 5401 } 5402 return 0; 5403 } 5404 5405 static inline void finish_cmd(struct CommandList *c) 5406 { 5407 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 5408 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 5409 || c->cmd_type == CMD_IOACCEL2)) 5410 complete_scsi_command(c); 5411 else if (c->cmd_type == CMD_IOCTL_PEND) 5412 complete(c->waiting); 5413 } 5414 5415 5416 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) 5417 { 5418 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 5419 #define HPSA_SIMPLE_ERROR_BITS 0x03 5420 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 5421 return tag & ~HPSA_SIMPLE_ERROR_BITS; 5422 return tag & ~HPSA_PERF_ERROR_BITS; 5423 } 5424 5425 /* process completion of an indexed ("direct lookup") command */ 5426 static inline void process_indexed_cmd(struct ctlr_info *h, 5427 u32 raw_tag) 5428 { 5429 u32 tag_index; 5430 struct CommandList *c; 5431 5432 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; 5433 if (!bad_tag(h, tag_index, raw_tag)) { 5434 c = h->cmd_pool + tag_index; 5435 finish_cmd(c); 5436 } 5437 } 5438 5439 /* Some controllers, like p400, will give us one interrupt 5440 * after a soft reset, even if we turned interrupts off. 5441 * Only need to check for this in the hpsa_xxx_discard_completions 5442 * functions. 5443 */ 5444 static int ignore_bogus_interrupt(struct ctlr_info *h) 5445 { 5446 if (likely(!reset_devices)) 5447 return 0; 5448 5449 if (likely(h->interrupts_enabled)) 5450 return 0; 5451 5452 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 5453 "(known firmware bug.) Ignoring.\n"); 5454 5455 return 1; 5456 } 5457 5458 /* 5459 * Convert &h->q[x] (passed to interrupt handlers) back to h. 5460 * Relies on (h-q[x] == x) being true for x such that 5461 * 0 <= x < MAX_REPLY_QUEUES. 5462 */ 5463 static struct ctlr_info *queue_to_hba(u8 *queue) 5464 { 5465 return container_of((queue - *queue), struct ctlr_info, q[0]); 5466 } 5467 5468 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) 5469 { 5470 struct ctlr_info *h = queue_to_hba(queue); 5471 u8 q = *(u8 *) queue; 5472 u32 raw_tag; 5473 5474 if (ignore_bogus_interrupt(h)) 5475 return IRQ_NONE; 5476 5477 if (interrupt_not_for_us(h)) 5478 return IRQ_NONE; 5479 h->last_intr_timestamp = get_jiffies_64(); 5480 while (interrupt_pending(h)) { 5481 raw_tag = get_next_completion(h, q); 5482 while (raw_tag != FIFO_EMPTY) 5483 raw_tag = next_command(h, q); 5484 } 5485 return IRQ_HANDLED; 5486 } 5487 5488 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) 5489 { 5490 struct ctlr_info *h = queue_to_hba(queue); 5491 u32 raw_tag; 5492 u8 q = *(u8 *) queue; 5493 5494 if (ignore_bogus_interrupt(h)) 5495 return IRQ_NONE; 5496 5497 h->last_intr_timestamp = get_jiffies_64(); 5498 raw_tag = get_next_completion(h, q); 5499 while (raw_tag != FIFO_EMPTY) 5500 raw_tag = next_command(h, q); 5501 return IRQ_HANDLED; 5502 } 5503 5504 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) 5505 { 5506 struct ctlr_info *h = queue_to_hba((u8 *) queue); 5507 u32 raw_tag; 5508 u8 q = *(u8 *) queue; 5509 5510 if (interrupt_not_for_us(h)) 5511 return IRQ_NONE; 5512 h->last_intr_timestamp = get_jiffies_64(); 5513 while (interrupt_pending(h)) { 5514 raw_tag = get_next_completion(h, q); 5515 while (raw_tag != FIFO_EMPTY) { 5516 process_indexed_cmd(h, raw_tag); 5517 raw_tag = next_command(h, q); 5518 } 5519 } 5520 return IRQ_HANDLED; 5521 } 5522 5523 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) 5524 { 5525 struct ctlr_info *h = queue_to_hba(queue); 5526 u32 raw_tag; 5527 u8 q = *(u8 *) queue; 5528 5529 h->last_intr_timestamp = get_jiffies_64(); 5530 raw_tag = get_next_completion(h, q); 5531 while (raw_tag != FIFO_EMPTY) { 5532 process_indexed_cmd(h, raw_tag); 5533 raw_tag = next_command(h, q); 5534 } 5535 return IRQ_HANDLED; 5536 } 5537 5538 /* Send a message CDB to the firmware. Careful, this only works 5539 * in simple mode, not performant mode due to the tag lookup. 5540 * We only ever use this immediately after a controller reset. 5541 */ 5542 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 5543 unsigned char type) 5544 { 5545 struct Command { 5546 struct CommandListHeader CommandHeader; 5547 struct RequestBlock Request; 5548 struct ErrDescriptor ErrorDescriptor; 5549 }; 5550 struct Command *cmd; 5551 static const size_t cmd_sz = sizeof(*cmd) + 5552 sizeof(cmd->ErrorDescriptor); 5553 dma_addr_t paddr64; 5554 __le32 paddr32; 5555 u32 tag; 5556 void __iomem *vaddr; 5557 int i, err; 5558 5559 vaddr = pci_ioremap_bar(pdev, 0); 5560 if (vaddr == NULL) 5561 return -ENOMEM; 5562 5563 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 5564 * CCISS commands, so they must be allocated from the lower 4GiB of 5565 * memory. 5566 */ 5567 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5568 if (err) { 5569 iounmap(vaddr); 5570 return err; 5571 } 5572 5573 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 5574 if (cmd == NULL) { 5575 iounmap(vaddr); 5576 return -ENOMEM; 5577 } 5578 5579 /* This must fit, because of the 32-bit consistent DMA mask. Also, 5580 * although there's no guarantee, we assume that the address is at 5581 * least 4-byte aligned (most likely, it's page-aligned). 5582 */ 5583 paddr32 = cpu_to_le32(paddr64); 5584 5585 cmd->CommandHeader.ReplyQueue = 0; 5586 cmd->CommandHeader.SGList = 0; 5587 cmd->CommandHeader.SGTotal = cpu_to_le16(0); 5588 cmd->CommandHeader.tag = cpu_to_le64(paddr64); 5589 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 5590 5591 cmd->Request.CDBLen = 16; 5592 cmd->Request.type_attr_dir = 5593 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); 5594 cmd->Request.Timeout = 0; /* Don't time out */ 5595 cmd->Request.CDB[0] = opcode; 5596 cmd->Request.CDB[1] = type; 5597 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 5598 cmd->ErrorDescriptor.Addr = 5599 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); 5600 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); 5601 5602 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); 5603 5604 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 5605 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 5606 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) 5607 break; 5608 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 5609 } 5610 5611 iounmap(vaddr); 5612 5613 /* we leak the DMA buffer here ... no choice since the controller could 5614 * still complete the command. 5615 */ 5616 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 5617 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 5618 opcode, type); 5619 return -ETIMEDOUT; 5620 } 5621 5622 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 5623 5624 if (tag & HPSA_ERROR_BIT) { 5625 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 5626 opcode, type); 5627 return -EIO; 5628 } 5629 5630 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 5631 opcode, type); 5632 return 0; 5633 } 5634 5635 #define hpsa_noop(p) hpsa_message(p, 3, 0) 5636 5637 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 5638 void __iomem *vaddr, u32 use_doorbell) 5639 { 5640 5641 if (use_doorbell) { 5642 /* For everything after the P600, the PCI power state method 5643 * of resetting the controller doesn't work, so we have this 5644 * other way using the doorbell register. 5645 */ 5646 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 5647 writel(use_doorbell, vaddr + SA5_DOORBELL); 5648 5649 /* PMC hardware guys tell us we need a 10 second delay after 5650 * doorbell reset and before any attempt to talk to the board 5651 * at all to ensure that this actually works and doesn't fall 5652 * over in some weird corner cases. 5653 */ 5654 msleep(10000); 5655 } else { /* Try to do it the PCI power state way */ 5656 5657 /* Quoting from the Open CISS Specification: "The Power 5658 * Management Control/Status Register (CSR) controls the power 5659 * state of the device. The normal operating state is D0, 5660 * CSR=00h. The software off state is D3, CSR=03h. To reset 5661 * the controller, place the interface device in D3 then to D0, 5662 * this causes a secondary PCI reset which will reset the 5663 * controller." */ 5664 5665 int rc = 0; 5666 5667 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 5668 5669 /* enter the D3hot power management state */ 5670 rc = pci_set_power_state(pdev, PCI_D3hot); 5671 if (rc) 5672 return rc; 5673 5674 msleep(500); 5675 5676 /* enter the D0 power management state */ 5677 rc = pci_set_power_state(pdev, PCI_D0); 5678 if (rc) 5679 return rc; 5680 5681 /* 5682 * The P600 requires a small delay when changing states. 5683 * Otherwise we may think the board did not reset and we bail. 5684 * This for kdump only and is particular to the P600. 5685 */ 5686 msleep(500); 5687 } 5688 return 0; 5689 } 5690 5691 static void init_driver_version(char *driver_version, int len) 5692 { 5693 memset(driver_version, 0, len); 5694 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); 5695 } 5696 5697 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) 5698 { 5699 char *driver_version; 5700 int i, size = sizeof(cfgtable->driver_version); 5701 5702 driver_version = kmalloc(size, GFP_KERNEL); 5703 if (!driver_version) 5704 return -ENOMEM; 5705 5706 init_driver_version(driver_version, size); 5707 for (i = 0; i < size; i++) 5708 writeb(driver_version[i], &cfgtable->driver_version[i]); 5709 kfree(driver_version); 5710 return 0; 5711 } 5712 5713 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, 5714 unsigned char *driver_ver) 5715 { 5716 int i; 5717 5718 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 5719 driver_ver[i] = readb(&cfgtable->driver_version[i]); 5720 } 5721 5722 static int controller_reset_failed(struct CfgTable __iomem *cfgtable) 5723 { 5724 5725 char *driver_ver, *old_driver_ver; 5726 int rc, size = sizeof(cfgtable->driver_version); 5727 5728 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 5729 if (!old_driver_ver) 5730 return -ENOMEM; 5731 driver_ver = old_driver_ver + size; 5732 5733 /* After a reset, the 32 bytes of "driver version" in the cfgtable 5734 * should have been changed, otherwise we know the reset failed. 5735 */ 5736 init_driver_version(old_driver_ver, size); 5737 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 5738 rc = !memcmp(driver_ver, old_driver_ver, size); 5739 kfree(old_driver_ver); 5740 return rc; 5741 } 5742 /* This does a hard reset of the controller using PCI power management 5743 * states or the using the doorbell register. 5744 */ 5745 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 5746 { 5747 u64 cfg_offset; 5748 u32 cfg_base_addr; 5749 u64 cfg_base_addr_index; 5750 void __iomem *vaddr; 5751 unsigned long paddr; 5752 u32 misc_fw_support; 5753 int rc; 5754 struct CfgTable __iomem *cfgtable; 5755 u32 use_doorbell; 5756 u32 board_id; 5757 u16 command_register; 5758 5759 /* For controllers as old as the P600, this is very nearly 5760 * the same thing as 5761 * 5762 * pci_save_state(pci_dev); 5763 * pci_set_power_state(pci_dev, PCI_D3hot); 5764 * pci_set_power_state(pci_dev, PCI_D0); 5765 * pci_restore_state(pci_dev); 5766 * 5767 * For controllers newer than the P600, the pci power state 5768 * method of resetting doesn't work so we have another way 5769 * using the doorbell register. 5770 */ 5771 5772 rc = hpsa_lookup_board_id(pdev, &board_id); 5773 if (rc < 0) { 5774 dev_warn(&pdev->dev, "Board ID not found\n"); 5775 return rc; 5776 } 5777 if (!ctlr_is_resettable(board_id)) { 5778 dev_warn(&pdev->dev, "Controller not resettable\n"); 5779 return -ENODEV; 5780 } 5781 5782 /* if controller is soft- but not hard resettable... */ 5783 if (!ctlr_is_hard_resettable(board_id)) 5784 return -ENOTSUPP; /* try soft reset later. */ 5785 5786 /* Save the PCI command register */ 5787 pci_read_config_word(pdev, 4, &command_register); 5788 pci_save_state(pdev); 5789 5790 /* find the first memory BAR, so we can find the cfg table */ 5791 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 5792 if (rc) 5793 return rc; 5794 vaddr = remap_pci_mem(paddr, 0x250); 5795 if (!vaddr) 5796 return -ENOMEM; 5797 5798 /* find cfgtable in order to check if reset via doorbell is supported */ 5799 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 5800 &cfg_base_addr_index, &cfg_offset); 5801 if (rc) 5802 goto unmap_vaddr; 5803 cfgtable = remap_pci_mem(pci_resource_start(pdev, 5804 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 5805 if (!cfgtable) { 5806 rc = -ENOMEM; 5807 goto unmap_vaddr; 5808 } 5809 rc = write_driver_ver_to_cfgtable(cfgtable); 5810 if (rc) 5811 goto unmap_cfgtable; 5812 5813 /* If reset via doorbell register is supported, use that. 5814 * There are two such methods. Favor the newest method. 5815 */ 5816 misc_fw_support = readl(&cfgtable->misc_fw_support); 5817 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 5818 if (use_doorbell) { 5819 use_doorbell = DOORBELL_CTLR_RESET2; 5820 } else { 5821 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 5822 if (use_doorbell) { 5823 dev_warn(&pdev->dev, 5824 "Soft reset not supported. Firmware update is required.\n"); 5825 rc = -ENOTSUPP; /* try soft reset */ 5826 goto unmap_cfgtable; 5827 } 5828 } 5829 5830 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 5831 if (rc) 5832 goto unmap_cfgtable; 5833 5834 pci_restore_state(pdev); 5835 pci_write_config_word(pdev, 4, command_register); 5836 5837 /* Some devices (notably the HP Smart Array 5i Controller) 5838 need a little pause here */ 5839 msleep(HPSA_POST_RESET_PAUSE_MSECS); 5840 5841 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 5842 if (rc) { 5843 dev_warn(&pdev->dev, 5844 "Failed waiting for board to become ready after hard reset\n"); 5845 goto unmap_cfgtable; 5846 } 5847 5848 rc = controller_reset_failed(vaddr); 5849 if (rc < 0) 5850 goto unmap_cfgtable; 5851 if (rc) { 5852 dev_warn(&pdev->dev, "Unable to successfully reset " 5853 "controller. Will try soft reset.\n"); 5854 rc = -ENOTSUPP; 5855 } else { 5856 dev_info(&pdev->dev, "board ready after hard reset.\n"); 5857 } 5858 5859 unmap_cfgtable: 5860 iounmap(cfgtable); 5861 5862 unmap_vaddr: 5863 iounmap(vaddr); 5864 return rc; 5865 } 5866 5867 /* 5868 * We cannot read the structure directly, for portability we must use 5869 * the io functions. 5870 * This is for debug only. 5871 */ 5872 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) 5873 { 5874 #ifdef HPSA_DEBUG 5875 int i; 5876 char temp_name[17]; 5877 5878 dev_info(dev, "Controller Configuration information\n"); 5879 dev_info(dev, "------------------------------------\n"); 5880 for (i = 0; i < 4; i++) 5881 temp_name[i] = readb(&(tb->Signature[i])); 5882 temp_name[4] = '\0'; 5883 dev_info(dev, " Signature = %s\n", temp_name); 5884 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 5885 dev_info(dev, " Transport methods supported = 0x%x\n", 5886 readl(&(tb->TransportSupport))); 5887 dev_info(dev, " Transport methods active = 0x%x\n", 5888 readl(&(tb->TransportActive))); 5889 dev_info(dev, " Requested transport Method = 0x%x\n", 5890 readl(&(tb->HostWrite.TransportRequest))); 5891 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 5892 readl(&(tb->HostWrite.CoalIntDelay))); 5893 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 5894 readl(&(tb->HostWrite.CoalIntCount))); 5895 dev_info(dev, " Max outstanding commands = %d\n", 5896 readl(&(tb->CmdsOutMax))); 5897 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 5898 for (i = 0; i < 16; i++) 5899 temp_name[i] = readb(&(tb->ServerName[i])); 5900 temp_name[16] = '\0'; 5901 dev_info(dev, " Server Name = %s\n", temp_name); 5902 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 5903 readl(&(tb->HeartBeat))); 5904 #endif /* HPSA_DEBUG */ 5905 } 5906 5907 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 5908 { 5909 int i, offset, mem_type, bar_type; 5910 5911 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 5912 return 0; 5913 offset = 0; 5914 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5915 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 5916 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 5917 offset += 4; 5918 else { 5919 mem_type = pci_resource_flags(pdev, i) & 5920 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 5921 switch (mem_type) { 5922 case PCI_BASE_ADDRESS_MEM_TYPE_32: 5923 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 5924 offset += 4; /* 32 bit */ 5925 break; 5926 case PCI_BASE_ADDRESS_MEM_TYPE_64: 5927 offset += 8; 5928 break; 5929 default: /* reserved in PCI 2.2 */ 5930 dev_warn(&pdev->dev, 5931 "base address is invalid\n"); 5932 return -1; 5933 break; 5934 } 5935 } 5936 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 5937 return i + 1; 5938 } 5939 return -1; 5940 } 5941 5942 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 5943 * controllers that are capable. If not, we use legacy INTx mode. 5944 */ 5945 5946 static void hpsa_interrupt_mode(struct ctlr_info *h) 5947 { 5948 #ifdef CONFIG_PCI_MSI 5949 int err, i; 5950 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES]; 5951 5952 for (i = 0; i < MAX_REPLY_QUEUES; i++) { 5953 hpsa_msix_entries[i].vector = 0; 5954 hpsa_msix_entries[i].entry = i; 5955 } 5956 5957 /* Some boards advertise MSI but don't really support it */ 5958 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 5959 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 5960 goto default_int_mode; 5961 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 5962 dev_info(&h->pdev->dev, "MSI-X capable controller\n"); 5963 h->msix_vector = MAX_REPLY_QUEUES; 5964 if (h->msix_vector > num_online_cpus()) 5965 h->msix_vector = num_online_cpus(); 5966 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, 5967 1, h->msix_vector); 5968 if (err < 0) { 5969 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); 5970 h->msix_vector = 0; 5971 goto single_msi_mode; 5972 } else if (err < h->msix_vector) { 5973 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 5974 "available\n", err); 5975 } 5976 h->msix_vector = err; 5977 for (i = 0; i < h->msix_vector; i++) 5978 h->intr[i] = hpsa_msix_entries[i].vector; 5979 return; 5980 } 5981 single_msi_mode: 5982 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 5983 dev_info(&h->pdev->dev, "MSI capable controller\n"); 5984 if (!pci_enable_msi(h->pdev)) 5985 h->msi_vector = 1; 5986 else 5987 dev_warn(&h->pdev->dev, "MSI init failed\n"); 5988 } 5989 default_int_mode: 5990 #endif /* CONFIG_PCI_MSI */ 5991 /* if we get here we're going to use the default interrupt mode */ 5992 h->intr[h->intr_mode] = h->pdev->irq; 5993 } 5994 5995 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 5996 { 5997 int i; 5998 u32 subsystem_vendor_id, subsystem_device_id; 5999 6000 subsystem_vendor_id = pdev->subsystem_vendor; 6001 subsystem_device_id = pdev->subsystem_device; 6002 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 6003 subsystem_vendor_id; 6004 6005 for (i = 0; i < ARRAY_SIZE(products); i++) 6006 if (*board_id == products[i].board_id) 6007 return i; 6008 6009 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 6010 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 6011 !hpsa_allow_any) { 6012 dev_warn(&pdev->dev, "unrecognized board ID: " 6013 "0x%08x, ignoring.\n", *board_id); 6014 return -ENODEV; 6015 } 6016 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 6017 } 6018 6019 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 6020 unsigned long *memory_bar) 6021 { 6022 int i; 6023 6024 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 6025 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 6026 /* addressing mode bits already removed */ 6027 *memory_bar = pci_resource_start(pdev, i); 6028 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 6029 *memory_bar); 6030 return 0; 6031 } 6032 dev_warn(&pdev->dev, "no memory BAR found\n"); 6033 return -ENODEV; 6034 } 6035 6036 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 6037 int wait_for_ready) 6038 { 6039 int i, iterations; 6040 u32 scratchpad; 6041 if (wait_for_ready) 6042 iterations = HPSA_BOARD_READY_ITERATIONS; 6043 else 6044 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 6045 6046 for (i = 0; i < iterations; i++) { 6047 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 6048 if (wait_for_ready) { 6049 if (scratchpad == HPSA_FIRMWARE_READY) 6050 return 0; 6051 } else { 6052 if (scratchpad != HPSA_FIRMWARE_READY) 6053 return 0; 6054 } 6055 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 6056 } 6057 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 6058 return -ENODEV; 6059 } 6060 6061 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 6062 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 6063 u64 *cfg_offset) 6064 { 6065 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 6066 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 6067 *cfg_base_addr &= (u32) 0x0000ffff; 6068 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 6069 if (*cfg_base_addr_index == -1) { 6070 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 6071 return -ENODEV; 6072 } 6073 return 0; 6074 } 6075 6076 static int hpsa_find_cfgtables(struct ctlr_info *h) 6077 { 6078 u64 cfg_offset; 6079 u32 cfg_base_addr; 6080 u64 cfg_base_addr_index; 6081 u32 trans_offset; 6082 int rc; 6083 6084 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 6085 &cfg_base_addr_index, &cfg_offset); 6086 if (rc) 6087 return rc; 6088 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 6089 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 6090 if (!h->cfgtable) { 6091 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); 6092 return -ENOMEM; 6093 } 6094 rc = write_driver_ver_to_cfgtable(h->cfgtable); 6095 if (rc) 6096 return rc; 6097 /* Find performant mode table. */ 6098 trans_offset = readl(&h->cfgtable->TransMethodOffset); 6099 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 6100 cfg_base_addr_index)+cfg_offset+trans_offset, 6101 sizeof(*h->transtable)); 6102 if (!h->transtable) 6103 return -ENOMEM; 6104 return 0; 6105 } 6106 6107 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 6108 { 6109 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 6110 6111 /* Limit commands in memory limited kdump scenario. */ 6112 if (reset_devices && h->max_commands > 32) 6113 h->max_commands = 32; 6114 6115 if (h->max_commands < 16) { 6116 dev_warn(&h->pdev->dev, "Controller reports " 6117 "max supported commands of %d, an obvious lie. " 6118 "Using 16. Ensure that firmware is up to date.\n", 6119 h->max_commands); 6120 h->max_commands = 16; 6121 } 6122 } 6123 6124 /* If the controller reports that the total max sg entries is greater than 512, 6125 * then we know that chained SG blocks work. (Original smart arrays did not 6126 * support chained SG blocks and would return zero for max sg entries.) 6127 */ 6128 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) 6129 { 6130 return h->maxsgentries > 512; 6131 } 6132 6133 /* Interrogate the hardware for some limits: 6134 * max commands, max SG elements without chaining, and with chaining, 6135 * SG chain block size, etc. 6136 */ 6137 static void hpsa_find_board_params(struct ctlr_info *h) 6138 { 6139 hpsa_get_max_perf_mode_cmds(h); 6140 h->nr_cmds = h->max_commands; 6141 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 6142 h->fw_support = readl(&(h->cfgtable->misc_fw_support)); 6143 if (hpsa_supports_chained_sg_blocks(h)) { 6144 /* Limit in-command s/g elements to 32 save dma'able memory. */ 6145 h->max_cmd_sg_entries = 32; 6146 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; 6147 h->maxsgentries--; /* save one for chain pointer */ 6148 } else { 6149 /* 6150 * Original smart arrays supported at most 31 s/g entries 6151 * embedded inline in the command (trying to use more 6152 * would lock up the controller) 6153 */ 6154 h->max_cmd_sg_entries = 31; 6155 h->maxsgentries = 31; /* default to traditional values */ 6156 h->chainsize = 0; 6157 } 6158 6159 /* Find out what task management functions are supported and cache */ 6160 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); 6161 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) 6162 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); 6163 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 6164 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); 6165 } 6166 6167 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 6168 { 6169 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { 6170 dev_err(&h->pdev->dev, "not a valid CISS config table\n"); 6171 return false; 6172 } 6173 return true; 6174 } 6175 6176 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) 6177 { 6178 u32 driver_support; 6179 6180 driver_support = readl(&(h->cfgtable->driver_support)); 6181 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 6182 #ifdef CONFIG_X86 6183 driver_support |= ENABLE_SCSI_PREFETCH; 6184 #endif 6185 driver_support |= ENABLE_UNIT_ATTN; 6186 writel(driver_support, &(h->cfgtable->driver_support)); 6187 } 6188 6189 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 6190 * in a prefetch beyond physical memory. 6191 */ 6192 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 6193 { 6194 u32 dma_prefetch; 6195 6196 if (h->board_id != 0x3225103C) 6197 return; 6198 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 6199 dma_prefetch |= 0x8000; 6200 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 6201 } 6202 6203 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) 6204 { 6205 int i; 6206 u32 doorbell_value; 6207 unsigned long flags; 6208 /* wait until the clear_event_notify bit 6 is cleared by controller. */ 6209 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 6210 spin_lock_irqsave(&h->lock, flags); 6211 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 6212 spin_unlock_irqrestore(&h->lock, flags); 6213 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) 6214 break; 6215 /* delay and try again */ 6216 msleep(20); 6217 } 6218 } 6219 6220 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 6221 { 6222 int i; 6223 u32 doorbell_value; 6224 unsigned long flags; 6225 6226 /* under certain very rare conditions, this can take awhile. 6227 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 6228 * as we enter this code.) 6229 */ 6230 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 6231 spin_lock_irqsave(&h->lock, flags); 6232 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 6233 spin_unlock_irqrestore(&h->lock, flags); 6234 if (!(doorbell_value & CFGTBL_ChangeReq)) 6235 break; 6236 /* delay and try again */ 6237 usleep_range(10000, 20000); 6238 } 6239 } 6240 6241 static int hpsa_enter_simple_mode(struct ctlr_info *h) 6242 { 6243 u32 trans_support; 6244 6245 trans_support = readl(&(h->cfgtable->TransportSupport)); 6246 if (!(trans_support & SIMPLE_MODE)) 6247 return -ENOTSUPP; 6248 6249 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 6250 6251 /* Update the field, and then ring the doorbell */ 6252 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 6253 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 6254 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6255 hpsa_wait_for_mode_change_ack(h); 6256 print_cfg_table(&h->pdev->dev, h->cfgtable); 6257 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) 6258 goto error; 6259 h->transMethod = CFGTBL_Trans_Simple; 6260 return 0; 6261 error: 6262 dev_err(&h->pdev->dev, "failed to enter simple mode\n"); 6263 return -ENODEV; 6264 } 6265 6266 static int hpsa_pci_init(struct ctlr_info *h) 6267 { 6268 int prod_index, err; 6269 6270 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 6271 if (prod_index < 0) 6272 return prod_index; 6273 h->product_name = products[prod_index].product_name; 6274 h->access = *(products[prod_index].access); 6275 6276 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 6277 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 6278 6279 err = pci_enable_device(h->pdev); 6280 if (err) { 6281 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 6282 return err; 6283 } 6284 6285 err = pci_request_regions(h->pdev, HPSA); 6286 if (err) { 6287 dev_err(&h->pdev->dev, 6288 "cannot obtain PCI resources, aborting\n"); 6289 return err; 6290 } 6291 6292 pci_set_master(h->pdev); 6293 6294 hpsa_interrupt_mode(h); 6295 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 6296 if (err) 6297 goto err_out_free_res; 6298 h->vaddr = remap_pci_mem(h->paddr, 0x250); 6299 if (!h->vaddr) { 6300 err = -ENOMEM; 6301 goto err_out_free_res; 6302 } 6303 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 6304 if (err) 6305 goto err_out_free_res; 6306 err = hpsa_find_cfgtables(h); 6307 if (err) 6308 goto err_out_free_res; 6309 hpsa_find_board_params(h); 6310 6311 if (!hpsa_CISS_signature_present(h)) { 6312 err = -ENODEV; 6313 goto err_out_free_res; 6314 } 6315 hpsa_set_driver_support_bits(h); 6316 hpsa_p600_dma_prefetch_quirk(h); 6317 err = hpsa_enter_simple_mode(h); 6318 if (err) 6319 goto err_out_free_res; 6320 return 0; 6321 6322 err_out_free_res: 6323 if (h->transtable) 6324 iounmap(h->transtable); 6325 if (h->cfgtable) 6326 iounmap(h->cfgtable); 6327 if (h->vaddr) 6328 iounmap(h->vaddr); 6329 pci_disable_device(h->pdev); 6330 pci_release_regions(h->pdev); 6331 return err; 6332 } 6333 6334 static void hpsa_hba_inquiry(struct ctlr_info *h) 6335 { 6336 int rc; 6337 6338 #define HBA_INQUIRY_BYTE_COUNT 64 6339 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 6340 if (!h->hba_inquiry_data) 6341 return; 6342 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 6343 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 6344 if (rc != 0) { 6345 kfree(h->hba_inquiry_data); 6346 h->hba_inquiry_data = NULL; 6347 } 6348 } 6349 6350 static int hpsa_init_reset_devices(struct pci_dev *pdev) 6351 { 6352 int rc, i; 6353 void __iomem *vaddr; 6354 6355 if (!reset_devices) 6356 return 0; 6357 6358 /* kdump kernel is loading, we don't know in which state is 6359 * the pci interface. The dev->enable_cnt is equal zero 6360 * so we call enable+disable, wait a while and switch it on. 6361 */ 6362 rc = pci_enable_device(pdev); 6363 if (rc) { 6364 dev_warn(&pdev->dev, "Failed to enable PCI device\n"); 6365 return -ENODEV; 6366 } 6367 pci_disable_device(pdev); 6368 msleep(260); /* a randomly chosen number */ 6369 rc = pci_enable_device(pdev); 6370 if (rc) { 6371 dev_warn(&pdev->dev, "failed to enable device.\n"); 6372 return -ENODEV; 6373 } 6374 6375 pci_set_master(pdev); 6376 6377 vaddr = pci_ioremap_bar(pdev, 0); 6378 if (vaddr == NULL) { 6379 rc = -ENOMEM; 6380 goto out_disable; 6381 } 6382 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); 6383 iounmap(vaddr); 6384 6385 /* Reset the controller with a PCI power-cycle or via doorbell */ 6386 rc = hpsa_kdump_hard_reset_controller(pdev); 6387 6388 /* -ENOTSUPP here means we cannot reset the controller 6389 * but it's already (and still) up and running in 6390 * "performant mode". Or, it might be 640x, which can't reset 6391 * due to concerns about shared bbwc between 6402/6404 pair. 6392 */ 6393 if (rc) 6394 goto out_disable; 6395 6396 /* Now try to get the controller to respond to a no-op */ 6397 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); 6398 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 6399 if (hpsa_noop(pdev) == 0) 6400 break; 6401 else 6402 dev_warn(&pdev->dev, "no-op failed%s\n", 6403 (i < 11 ? "; re-trying" : "")); 6404 } 6405 6406 out_disable: 6407 6408 pci_disable_device(pdev); 6409 return rc; 6410 } 6411 6412 static int hpsa_allocate_cmd_pool(struct ctlr_info *h) 6413 { 6414 h->cmd_pool_bits = kzalloc( 6415 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 6416 sizeof(unsigned long), GFP_KERNEL); 6417 h->cmd_pool = pci_alloc_consistent(h->pdev, 6418 h->nr_cmds * sizeof(*h->cmd_pool), 6419 &(h->cmd_pool_dhandle)); 6420 h->errinfo_pool = pci_alloc_consistent(h->pdev, 6421 h->nr_cmds * sizeof(*h->errinfo_pool), 6422 &(h->errinfo_pool_dhandle)); 6423 if ((h->cmd_pool_bits == NULL) 6424 || (h->cmd_pool == NULL) 6425 || (h->errinfo_pool == NULL)) { 6426 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 6427 goto clean_up; 6428 } 6429 return 0; 6430 clean_up: 6431 hpsa_free_cmd_pool(h); 6432 return -ENOMEM; 6433 } 6434 6435 static void hpsa_free_cmd_pool(struct ctlr_info *h) 6436 { 6437 kfree(h->cmd_pool_bits); 6438 if (h->cmd_pool) 6439 pci_free_consistent(h->pdev, 6440 h->nr_cmds * sizeof(struct CommandList), 6441 h->cmd_pool, h->cmd_pool_dhandle); 6442 if (h->ioaccel2_cmd_pool) 6443 pci_free_consistent(h->pdev, 6444 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 6445 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 6446 if (h->errinfo_pool) 6447 pci_free_consistent(h->pdev, 6448 h->nr_cmds * sizeof(struct ErrorInfo), 6449 h->errinfo_pool, 6450 h->errinfo_pool_dhandle); 6451 if (h->ioaccel_cmd_pool) 6452 pci_free_consistent(h->pdev, 6453 h->nr_cmds * sizeof(struct io_accel1_cmd), 6454 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 6455 } 6456 6457 static void hpsa_irq_affinity_hints(struct ctlr_info *h) 6458 { 6459 int i, cpu; 6460 6461 cpu = cpumask_first(cpu_online_mask); 6462 for (i = 0; i < h->msix_vector; i++) { 6463 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); 6464 cpu = cpumask_next(cpu, cpu_online_mask); 6465 } 6466 } 6467 6468 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ 6469 static void hpsa_free_irqs(struct ctlr_info *h) 6470 { 6471 int i; 6472 6473 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 6474 /* Single reply queue, only one irq to free */ 6475 i = h->intr_mode; 6476 irq_set_affinity_hint(h->intr[i], NULL); 6477 free_irq(h->intr[i], &h->q[i]); 6478 return; 6479 } 6480 6481 for (i = 0; i < h->msix_vector; i++) { 6482 irq_set_affinity_hint(h->intr[i], NULL); 6483 free_irq(h->intr[i], &h->q[i]); 6484 } 6485 for (; i < MAX_REPLY_QUEUES; i++) 6486 h->q[i] = 0; 6487 } 6488 6489 /* returns 0 on success; cleans up and returns -Enn on error */ 6490 static int hpsa_request_irqs(struct ctlr_info *h, 6491 irqreturn_t (*msixhandler)(int, void *), 6492 irqreturn_t (*intxhandler)(int, void *)) 6493 { 6494 int rc, i; 6495 6496 /* 6497 * initialize h->q[x] = x so that interrupt handlers know which 6498 * queue to process. 6499 */ 6500 for (i = 0; i < MAX_REPLY_QUEUES; i++) 6501 h->q[i] = (u8) i; 6502 6503 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { 6504 /* If performant mode and MSI-X, use multiple reply queues */ 6505 for (i = 0; i < h->msix_vector; i++) { 6506 rc = request_irq(h->intr[i], msixhandler, 6507 0, h->devname, 6508 &h->q[i]); 6509 if (rc) { 6510 int j; 6511 6512 dev_err(&h->pdev->dev, 6513 "failed to get irq %d for %s\n", 6514 h->intr[i], h->devname); 6515 for (j = 0; j < i; j++) { 6516 free_irq(h->intr[j], &h->q[j]); 6517 h->q[j] = 0; 6518 } 6519 for (; j < MAX_REPLY_QUEUES; j++) 6520 h->q[j] = 0; 6521 return rc; 6522 } 6523 } 6524 hpsa_irq_affinity_hints(h); 6525 } else { 6526 /* Use single reply pool */ 6527 if (h->msix_vector > 0 || h->msi_vector) { 6528 rc = request_irq(h->intr[h->intr_mode], 6529 msixhandler, 0, h->devname, 6530 &h->q[h->intr_mode]); 6531 } else { 6532 rc = request_irq(h->intr[h->intr_mode], 6533 intxhandler, IRQF_SHARED, h->devname, 6534 &h->q[h->intr_mode]); 6535 } 6536 } 6537 if (rc) { 6538 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 6539 h->intr[h->intr_mode], h->devname); 6540 return -ENODEV; 6541 } 6542 return 0; 6543 } 6544 6545 static int hpsa_kdump_soft_reset(struct ctlr_info *h) 6546 { 6547 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, 6548 HPSA_RESET_TYPE_CONTROLLER)) { 6549 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 6550 return -EIO; 6551 } 6552 6553 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 6554 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 6555 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 6556 return -1; 6557 } 6558 6559 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 6560 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 6561 dev_warn(&h->pdev->dev, "Board failed to become ready " 6562 "after soft reset.\n"); 6563 return -1; 6564 } 6565 6566 return 0; 6567 } 6568 6569 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) 6570 { 6571 hpsa_free_irqs(h); 6572 #ifdef CONFIG_PCI_MSI 6573 if (h->msix_vector) { 6574 if (h->pdev->msix_enabled) 6575 pci_disable_msix(h->pdev); 6576 } else if (h->msi_vector) { 6577 if (h->pdev->msi_enabled) 6578 pci_disable_msi(h->pdev); 6579 } 6580 #endif /* CONFIG_PCI_MSI */ 6581 } 6582 6583 static void hpsa_free_reply_queues(struct ctlr_info *h) 6584 { 6585 int i; 6586 6587 for (i = 0; i < h->nreply_queues; i++) { 6588 if (!h->reply_queue[i].head) 6589 continue; 6590 pci_free_consistent(h->pdev, h->reply_queue_size, 6591 h->reply_queue[i].head, h->reply_queue[i].busaddr); 6592 h->reply_queue[i].head = NULL; 6593 h->reply_queue[i].busaddr = 0; 6594 } 6595 } 6596 6597 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 6598 { 6599 hpsa_free_irqs_and_disable_msix(h); 6600 hpsa_free_sg_chain_blocks(h); 6601 hpsa_free_cmd_pool(h); 6602 kfree(h->ioaccel1_blockFetchTable); 6603 kfree(h->blockFetchTable); 6604 hpsa_free_reply_queues(h); 6605 if (h->vaddr) 6606 iounmap(h->vaddr); 6607 if (h->transtable) 6608 iounmap(h->transtable); 6609 if (h->cfgtable) 6610 iounmap(h->cfgtable); 6611 pci_disable_device(h->pdev); 6612 pci_release_regions(h->pdev); 6613 kfree(h); 6614 } 6615 6616 /* Called when controller lockup detected. */ 6617 static void fail_all_outstanding_cmds(struct ctlr_info *h) 6618 { 6619 int i, refcount; 6620 struct CommandList *c; 6621 6622 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ 6623 for (i = 0; i < h->nr_cmds; i++) { 6624 c = h->cmd_pool + i; 6625 refcount = atomic_inc_return(&c->refcount); 6626 if (refcount > 1) { 6627 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 6628 finish_cmd(c); 6629 } 6630 cmd_free(h, c); 6631 } 6632 } 6633 6634 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) 6635 { 6636 int i, cpu; 6637 6638 cpu = cpumask_first(cpu_online_mask); 6639 for (i = 0; i < num_online_cpus(); i++) { 6640 u32 *lockup_detected; 6641 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); 6642 *lockup_detected = value; 6643 cpu = cpumask_next(cpu, cpu_online_mask); 6644 } 6645 wmb(); /* be sure the per-cpu variables are out to memory */ 6646 } 6647 6648 static void controller_lockup_detected(struct ctlr_info *h) 6649 { 6650 unsigned long flags; 6651 u32 lockup_detected; 6652 6653 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6654 spin_lock_irqsave(&h->lock, flags); 6655 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6656 if (!lockup_detected) { 6657 /* no heartbeat, but controller gave us a zero. */ 6658 dev_warn(&h->pdev->dev, 6659 "lockup detected but scratchpad register is zero\n"); 6660 lockup_detected = 0xffffffff; 6661 } 6662 set_lockup_detected_for_all_cpus(h, lockup_detected); 6663 spin_unlock_irqrestore(&h->lock, flags); 6664 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 6665 lockup_detected); 6666 pci_disable_device(h->pdev); 6667 fail_all_outstanding_cmds(h); 6668 } 6669 6670 static void detect_controller_lockup(struct ctlr_info *h) 6671 { 6672 u64 now; 6673 u32 heartbeat; 6674 unsigned long flags; 6675 6676 now = get_jiffies_64(); 6677 /* If we've received an interrupt recently, we're ok. */ 6678 if (time_after64(h->last_intr_timestamp + 6679 (h->heartbeat_sample_interval), now)) 6680 return; 6681 6682 /* 6683 * If we've already checked the heartbeat recently, we're ok. 6684 * This could happen if someone sends us a signal. We 6685 * otherwise don't care about signals in this thread. 6686 */ 6687 if (time_after64(h->last_heartbeat_timestamp + 6688 (h->heartbeat_sample_interval), now)) 6689 return; 6690 6691 /* If heartbeat has not changed since we last looked, we're not ok. */ 6692 spin_lock_irqsave(&h->lock, flags); 6693 heartbeat = readl(&h->cfgtable->HeartBeat); 6694 spin_unlock_irqrestore(&h->lock, flags); 6695 if (h->last_heartbeat == heartbeat) { 6696 controller_lockup_detected(h); 6697 return; 6698 } 6699 6700 /* We're ok. */ 6701 h->last_heartbeat = heartbeat; 6702 h->last_heartbeat_timestamp = now; 6703 } 6704 6705 static void hpsa_ack_ctlr_events(struct ctlr_info *h) 6706 { 6707 int i; 6708 char *event_type; 6709 6710 /* Ask the controller to clear the events we're handling. */ 6711 if ((h->transMethod & (CFGTBL_Trans_io_accel1 6712 | CFGTBL_Trans_io_accel2)) && 6713 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || 6714 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { 6715 6716 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) 6717 event_type = "state change"; 6718 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) 6719 event_type = "configuration change"; 6720 /* Stop sending new RAID offload reqs via the IO accelerator */ 6721 scsi_block_requests(h->scsi_host); 6722 for (i = 0; i < h->ndevices; i++) 6723 h->dev[i]->offload_enabled = 0; 6724 hpsa_drain_accel_commands(h); 6725 /* Set 'accelerator path config change' bit */ 6726 dev_warn(&h->pdev->dev, 6727 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", 6728 h->events, event_type); 6729 writel(h->events, &(h->cfgtable->clear_event_notify)); 6730 /* Set the "clear event notify field update" bit 6 */ 6731 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6732 /* Wait until ctlr clears 'clear event notify field', bit 6 */ 6733 hpsa_wait_for_clear_event_notify_ack(h); 6734 scsi_unblock_requests(h->scsi_host); 6735 } else { 6736 /* Acknowledge controller notification events. */ 6737 writel(h->events, &(h->cfgtable->clear_event_notify)); 6738 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6739 hpsa_wait_for_clear_event_notify_ack(h); 6740 #if 0 6741 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6742 hpsa_wait_for_mode_change_ack(h); 6743 #endif 6744 } 6745 return; 6746 } 6747 6748 /* Check a register on the controller to see if there are configuration 6749 * changes (added/changed/removed logical drives, etc.) which mean that 6750 * we should rescan the controller for devices. 6751 * Also check flag for driver-initiated rescan. 6752 */ 6753 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) 6754 { 6755 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 6756 return 0; 6757 6758 h->events = readl(&(h->cfgtable->event_notify)); 6759 return h->events & RESCAN_REQUIRED_EVENT_BITS; 6760 } 6761 6762 /* 6763 * Check if any of the offline devices have become ready 6764 */ 6765 static int hpsa_offline_devices_ready(struct ctlr_info *h) 6766 { 6767 unsigned long flags; 6768 struct offline_device_entry *d; 6769 struct list_head *this, *tmp; 6770 6771 spin_lock_irqsave(&h->offline_device_lock, flags); 6772 list_for_each_safe(this, tmp, &h->offline_device_list) { 6773 d = list_entry(this, struct offline_device_entry, 6774 offline_list); 6775 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6776 if (!hpsa_volume_offline(h, d->scsi3addr)) { 6777 spin_lock_irqsave(&h->offline_device_lock, flags); 6778 list_del(&d->offline_list); 6779 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6780 return 1; 6781 } 6782 spin_lock_irqsave(&h->offline_device_lock, flags); 6783 } 6784 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6785 return 0; 6786 } 6787 6788 6789 static void hpsa_monitor_ctlr_worker(struct work_struct *work) 6790 { 6791 unsigned long flags; 6792 struct ctlr_info *h = container_of(to_delayed_work(work), 6793 struct ctlr_info, monitor_ctlr_work); 6794 detect_controller_lockup(h); 6795 if (lockup_detected(h)) 6796 return; 6797 6798 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 6799 scsi_host_get(h->scsi_host); 6800 hpsa_ack_ctlr_events(h); 6801 hpsa_scan_start(h->scsi_host); 6802 scsi_host_put(h->scsi_host); 6803 } 6804 6805 spin_lock_irqsave(&h->lock, flags); 6806 if (h->remove_in_progress) { 6807 spin_unlock_irqrestore(&h->lock, flags); 6808 return; 6809 } 6810 schedule_delayed_work(&h->monitor_ctlr_work, 6811 h->heartbeat_sample_interval); 6812 spin_unlock_irqrestore(&h->lock, flags); 6813 } 6814 6815 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6816 { 6817 int dac, rc; 6818 struct ctlr_info *h; 6819 int try_soft_reset = 0; 6820 unsigned long flags; 6821 6822 if (number_of_controllers == 0) 6823 printk(KERN_INFO DRIVER_NAME "\n"); 6824 6825 rc = hpsa_init_reset_devices(pdev); 6826 if (rc) { 6827 if (rc != -ENOTSUPP) 6828 return rc; 6829 /* If the reset fails in a particular way (it has no way to do 6830 * a proper hard reset, so returns -ENOTSUPP) we can try to do 6831 * a soft reset once we get the controller configured up to the 6832 * point that it can accept a command. 6833 */ 6834 try_soft_reset = 1; 6835 rc = 0; 6836 } 6837 6838 reinit_after_soft_reset: 6839 6840 /* Command structures must be aligned on a 32-byte boundary because 6841 * the 5 lower bits of the address are used by the hardware. and by 6842 * the driver. See comments in hpsa.h for more info. 6843 */ 6844 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 6845 h = kzalloc(sizeof(*h), GFP_KERNEL); 6846 if (!h) 6847 return -ENOMEM; 6848 6849 h->pdev = pdev; 6850 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 6851 INIT_LIST_HEAD(&h->offline_device_list); 6852 spin_lock_init(&h->lock); 6853 spin_lock_init(&h->offline_device_lock); 6854 spin_lock_init(&h->scan_lock); 6855 spin_lock_init(&h->passthru_count_lock); 6856 6857 h->resubmit_wq = alloc_workqueue("hpsa", WQ_MEM_RECLAIM, 0); 6858 if (!h->resubmit_wq) { 6859 dev_err(&h->pdev->dev, "Failed to allocate work queue\n"); 6860 rc = -ENOMEM; 6861 goto clean1; 6862 } 6863 /* Allocate and clear per-cpu variable lockup_detected */ 6864 h->lockup_detected = alloc_percpu(u32); 6865 if (!h->lockup_detected) { 6866 rc = -ENOMEM; 6867 goto clean1; 6868 } 6869 set_lockup_detected_for_all_cpus(h, 0); 6870 6871 rc = hpsa_pci_init(h); 6872 if (rc != 0) 6873 goto clean1; 6874 6875 sprintf(h->devname, HPSA "%d", number_of_controllers); 6876 h->ctlr = number_of_controllers; 6877 number_of_controllers++; 6878 6879 /* configure PCI DMA stuff */ 6880 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 6881 if (rc == 0) { 6882 dac = 1; 6883 } else { 6884 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6885 if (rc == 0) { 6886 dac = 0; 6887 } else { 6888 dev_err(&pdev->dev, "no suitable DMA available\n"); 6889 goto clean1; 6890 } 6891 } 6892 6893 /* make sure the board interrupts are off */ 6894 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6895 6896 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 6897 goto clean2; 6898 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 6899 h->devname, pdev->device, 6900 h->intr[h->intr_mode], dac ? "" : " not"); 6901 rc = hpsa_allocate_cmd_pool(h); 6902 if (rc) 6903 goto clean2_and_free_irqs; 6904 if (hpsa_allocate_sg_chain_blocks(h)) 6905 goto clean4; 6906 init_waitqueue_head(&h->scan_wait_queue); 6907 h->scan_finished = 1; /* no scan currently in progress */ 6908 6909 pci_set_drvdata(pdev, h); 6910 h->ndevices = 0; 6911 h->hba_mode_enabled = 0; 6912 h->scsi_host = NULL; 6913 spin_lock_init(&h->devlock); 6914 hpsa_put_ctlr_into_performant_mode(h); 6915 6916 /* At this point, the controller is ready to take commands. 6917 * Now, if reset_devices and the hard reset didn't work, try 6918 * the soft reset and see if that works. 6919 */ 6920 if (try_soft_reset) { 6921 6922 /* This is kind of gross. We may or may not get a completion 6923 * from the soft reset command, and if we do, then the value 6924 * from the fifo may or may not be valid. So, we wait 10 secs 6925 * after the reset throwing away any completions we get during 6926 * that time. Unregister the interrupt handler and register 6927 * fake ones to scoop up any residual completions. 6928 */ 6929 spin_lock_irqsave(&h->lock, flags); 6930 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6931 spin_unlock_irqrestore(&h->lock, flags); 6932 hpsa_free_irqs(h); 6933 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, 6934 hpsa_intx_discard_completions); 6935 if (rc) { 6936 dev_warn(&h->pdev->dev, 6937 "Failed to request_irq after soft reset.\n"); 6938 goto clean4; 6939 } 6940 6941 rc = hpsa_kdump_soft_reset(h); 6942 if (rc) 6943 /* Neither hard nor soft reset worked, we're hosed. */ 6944 goto clean4; 6945 6946 dev_info(&h->pdev->dev, "Board READY.\n"); 6947 dev_info(&h->pdev->dev, 6948 "Waiting for stale completions to drain.\n"); 6949 h->access.set_intr_mask(h, HPSA_INTR_ON); 6950 msleep(10000); 6951 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6952 6953 rc = controller_reset_failed(h->cfgtable); 6954 if (rc) 6955 dev_info(&h->pdev->dev, 6956 "Soft reset appears to have failed.\n"); 6957 6958 /* since the controller's reset, we have to go back and re-init 6959 * everything. Easiest to just forget what we've done and do it 6960 * all over again. 6961 */ 6962 hpsa_undo_allocations_after_kdump_soft_reset(h); 6963 try_soft_reset = 0; 6964 if (rc) 6965 /* don't go to clean4, we already unallocated */ 6966 return -ENODEV; 6967 6968 goto reinit_after_soft_reset; 6969 } 6970 6971 /* Enable Accelerated IO path at driver layer */ 6972 h->acciopath_status = 1; 6973 6974 6975 /* Turn the interrupts on so we can service requests */ 6976 h->access.set_intr_mask(h, HPSA_INTR_ON); 6977 6978 hpsa_hba_inquiry(h); 6979 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 6980 6981 /* Monitor the controller for firmware lockups */ 6982 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 6983 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); 6984 schedule_delayed_work(&h->monitor_ctlr_work, 6985 h->heartbeat_sample_interval); 6986 return 0; 6987 6988 clean4: 6989 hpsa_free_sg_chain_blocks(h); 6990 hpsa_free_cmd_pool(h); 6991 clean2_and_free_irqs: 6992 hpsa_free_irqs(h); 6993 clean2: 6994 clean1: 6995 if (h->resubmit_wq) 6996 destroy_workqueue(h->resubmit_wq); 6997 if (h->lockup_detected) 6998 free_percpu(h->lockup_detected); 6999 kfree(h); 7000 return rc; 7001 } 7002 7003 static void hpsa_flush_cache(struct ctlr_info *h) 7004 { 7005 char *flush_buf; 7006 struct CommandList *c; 7007 7008 /* Don't bother trying to flush the cache if locked up */ 7009 if (unlikely(lockup_detected(h))) 7010 return; 7011 flush_buf = kzalloc(4, GFP_KERNEL); 7012 if (!flush_buf) 7013 return; 7014 7015 c = cmd_alloc(h); 7016 if (!c) { 7017 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 7018 goto out_of_memory; 7019 } 7020 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 7021 RAID_CTLR_LUNID, TYPE_CMD)) { 7022 goto out; 7023 } 7024 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 7025 if (c->err_info->CommandStatus != 0) 7026 out: 7027 dev_warn(&h->pdev->dev, 7028 "error flushing cache on controller\n"); 7029 cmd_free(h, c); 7030 out_of_memory: 7031 kfree(flush_buf); 7032 } 7033 7034 static void hpsa_shutdown(struct pci_dev *pdev) 7035 { 7036 struct ctlr_info *h; 7037 7038 h = pci_get_drvdata(pdev); 7039 /* Turn board interrupts off and send the flush cache command 7040 * sendcmd will turn off interrupt, and send the flush... 7041 * To write all data in the battery backed cache to disks 7042 */ 7043 hpsa_flush_cache(h); 7044 h->access.set_intr_mask(h, HPSA_INTR_OFF); 7045 hpsa_free_irqs_and_disable_msix(h); 7046 } 7047 7048 static void hpsa_free_device_info(struct ctlr_info *h) 7049 { 7050 int i; 7051 7052 for (i = 0; i < h->ndevices; i++) 7053 kfree(h->dev[i]); 7054 } 7055 7056 static void hpsa_remove_one(struct pci_dev *pdev) 7057 { 7058 struct ctlr_info *h; 7059 unsigned long flags; 7060 7061 if (pci_get_drvdata(pdev) == NULL) { 7062 dev_err(&pdev->dev, "unable to remove device\n"); 7063 return; 7064 } 7065 h = pci_get_drvdata(pdev); 7066 7067 /* Get rid of any controller monitoring work items */ 7068 spin_lock_irqsave(&h->lock, flags); 7069 h->remove_in_progress = 1; 7070 cancel_delayed_work(&h->monitor_ctlr_work); 7071 spin_unlock_irqrestore(&h->lock, flags); 7072 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 7073 hpsa_shutdown(pdev); 7074 destroy_workqueue(h->resubmit_wq); 7075 iounmap(h->vaddr); 7076 iounmap(h->transtable); 7077 iounmap(h->cfgtable); 7078 hpsa_free_device_info(h); 7079 hpsa_free_sg_chain_blocks(h); 7080 pci_free_consistent(h->pdev, 7081 h->nr_cmds * sizeof(struct CommandList), 7082 h->cmd_pool, h->cmd_pool_dhandle); 7083 pci_free_consistent(h->pdev, 7084 h->nr_cmds * sizeof(struct ErrorInfo), 7085 h->errinfo_pool, h->errinfo_pool_dhandle); 7086 hpsa_free_reply_queues(h); 7087 kfree(h->cmd_pool_bits); 7088 kfree(h->blockFetchTable); 7089 kfree(h->ioaccel1_blockFetchTable); 7090 kfree(h->ioaccel2_blockFetchTable); 7091 kfree(h->hba_inquiry_data); 7092 pci_disable_device(pdev); 7093 pci_release_regions(pdev); 7094 free_percpu(h->lockup_detected); 7095 kfree(h); 7096 } 7097 7098 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 7099 __attribute__((unused)) pm_message_t state) 7100 { 7101 return -ENOSYS; 7102 } 7103 7104 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 7105 { 7106 return -ENOSYS; 7107 } 7108 7109 static struct pci_driver hpsa_pci_driver = { 7110 .name = HPSA, 7111 .probe = hpsa_init_one, 7112 .remove = hpsa_remove_one, 7113 .id_table = hpsa_pci_device_id, /* id_table */ 7114 .shutdown = hpsa_shutdown, 7115 .suspend = hpsa_suspend, 7116 .resume = hpsa_resume, 7117 }; 7118 7119 /* Fill in bucket_map[], given nsgs (the max number of 7120 * scatter gather elements supported) and bucket[], 7121 * which is an array of 8 integers. The bucket[] array 7122 * contains 8 different DMA transfer sizes (in 16 7123 * byte increments) which the controller uses to fetch 7124 * commands. This function fills in bucket_map[], which 7125 * maps a given number of scatter gather elements to one of 7126 * the 8 DMA transfer sizes. The point of it is to allow the 7127 * controller to only do as much DMA as needed to fetch the 7128 * command, with the DMA transfer size encoded in the lower 7129 * bits of the command address. 7130 */ 7131 static void calc_bucket_map(int bucket[], int num_buckets, 7132 int nsgs, int min_blocks, u32 *bucket_map) 7133 { 7134 int i, j, b, size; 7135 7136 /* Note, bucket_map must have nsgs+1 entries. */ 7137 for (i = 0; i <= nsgs; i++) { 7138 /* Compute size of a command with i SG entries */ 7139 size = i + min_blocks; 7140 b = num_buckets; /* Assume the biggest bucket */ 7141 /* Find the bucket that is just big enough */ 7142 for (j = 0; j < num_buckets; j++) { 7143 if (bucket[j] >= size) { 7144 b = j; 7145 break; 7146 } 7147 } 7148 /* for a command with i SG entries, use bucket b. */ 7149 bucket_map[i] = b; 7150 } 7151 } 7152 7153 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) 7154 { 7155 int i; 7156 unsigned long register_value; 7157 unsigned long transMethod = CFGTBL_Trans_Performant | 7158 (trans_support & CFGTBL_Trans_use_short_tags) | 7159 CFGTBL_Trans_enable_directed_msix | 7160 (trans_support & (CFGTBL_Trans_io_accel1 | 7161 CFGTBL_Trans_io_accel2)); 7162 struct access_method access = SA5_performant_access; 7163 7164 /* This is a bit complicated. There are 8 registers on 7165 * the controller which we write to to tell it 8 different 7166 * sizes of commands which there may be. It's a way of 7167 * reducing the DMA done to fetch each command. Encoded into 7168 * each command's tag are 3 bits which communicate to the controller 7169 * which of the eight sizes that command fits within. The size of 7170 * each command depends on how many scatter gather entries there are. 7171 * Each SG entry requires 16 bytes. The eight registers are programmed 7172 * with the number of 16-byte blocks a command of that size requires. 7173 * The smallest command possible requires 5 such 16 byte blocks. 7174 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte 7175 * blocks. Note, this only extends to the SG entries contained 7176 * within the command block, and does not extend to chained blocks 7177 * of SG elements. bft[] contains the eight values we write to 7178 * the registers. They are not evenly distributed, but have more 7179 * sizes for small commands, and fewer sizes for larger commands. 7180 */ 7181 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; 7182 #define MIN_IOACCEL2_BFT_ENTRY 5 7183 #define HPSA_IOACCEL2_HEADER_SZ 4 7184 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, 7185 13, 14, 15, 16, 17, 18, 19, 7186 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; 7187 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); 7188 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); 7189 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > 7190 16 * MIN_IOACCEL2_BFT_ENTRY); 7191 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); 7192 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); 7193 /* 5 = 1 s/g entry or 4k 7194 * 6 = 2 s/g entry or 8k 7195 * 8 = 4 s/g entry or 16k 7196 * 10 = 6 s/g entry or 24k 7197 */ 7198 7199 /* If the controller supports either ioaccel method then 7200 * we can also use the RAID stack submit path that does not 7201 * perform the superfluous readl() after each command submission. 7202 */ 7203 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) 7204 access = SA5_performant_access_no_read; 7205 7206 /* Controller spec: zero out this buffer. */ 7207 for (i = 0; i < h->nreply_queues; i++) 7208 memset(h->reply_queue[i].head, 0, h->reply_queue_size); 7209 7210 bft[7] = SG_ENTRIES_IN_CMD + 4; 7211 calc_bucket_map(bft, ARRAY_SIZE(bft), 7212 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); 7213 for (i = 0; i < 8; i++) 7214 writel(bft[i], &h->transtable->BlockFetch[i]); 7215 7216 /* size of controller ring buffer */ 7217 writel(h->max_commands, &h->transtable->RepQSize); 7218 writel(h->nreply_queues, &h->transtable->RepQCount); 7219 writel(0, &h->transtable->RepQCtrAddrLow32); 7220 writel(0, &h->transtable->RepQCtrAddrHigh32); 7221 7222 for (i = 0; i < h->nreply_queues; i++) { 7223 writel(0, &h->transtable->RepQAddr[i].upper); 7224 writel(h->reply_queue[i].busaddr, 7225 &h->transtable->RepQAddr[i].lower); 7226 } 7227 7228 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 7229 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); 7230 /* 7231 * enable outbound interrupt coalescing in accelerator mode; 7232 */ 7233 if (trans_support & CFGTBL_Trans_io_accel1) { 7234 access = SA5_ioaccel_mode1_access; 7235 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 7236 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 7237 } else { 7238 if (trans_support & CFGTBL_Trans_io_accel2) { 7239 access = SA5_ioaccel_mode2_access; 7240 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 7241 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 7242 } 7243 } 7244 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7245 hpsa_wait_for_mode_change_ack(h); 7246 register_value = readl(&(h->cfgtable->TransportActive)); 7247 if (!(register_value & CFGTBL_Trans_Performant)) { 7248 dev_err(&h->pdev->dev, 7249 "performant mode problem - transport not active\n"); 7250 return; 7251 } 7252 /* Change the access methods to the performant access methods */ 7253 h->access = access; 7254 h->transMethod = transMethod; 7255 7256 if (!((trans_support & CFGTBL_Trans_io_accel1) || 7257 (trans_support & CFGTBL_Trans_io_accel2))) 7258 return; 7259 7260 if (trans_support & CFGTBL_Trans_io_accel1) { 7261 /* Set up I/O accelerator mode */ 7262 for (i = 0; i < h->nreply_queues; i++) { 7263 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); 7264 h->reply_queue[i].current_entry = 7265 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); 7266 } 7267 bft[7] = h->ioaccel_maxsg + 8; 7268 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, 7269 h->ioaccel1_blockFetchTable); 7270 7271 /* initialize all reply queue entries to unused */ 7272 for (i = 0; i < h->nreply_queues; i++) 7273 memset(h->reply_queue[i].head, 7274 (u8) IOACCEL_MODE1_REPLY_UNUSED, 7275 h->reply_queue_size); 7276 7277 /* set all the constant fields in the accelerator command 7278 * frames once at init time to save CPU cycles later. 7279 */ 7280 for (i = 0; i < h->nr_cmds; i++) { 7281 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; 7282 7283 cp->function = IOACCEL1_FUNCTION_SCSIIO; 7284 cp->err_info = (u32) (h->errinfo_pool_dhandle + 7285 (i * sizeof(struct ErrorInfo))); 7286 cp->err_info_len = sizeof(struct ErrorInfo); 7287 cp->sgl_offset = IOACCEL1_SGLOFFSET; 7288 cp->host_context_flags = 7289 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); 7290 cp->timeout_sec = 0; 7291 cp->ReplyQueue = 0; 7292 cp->tag = 7293 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); 7294 cp->host_addr = 7295 cpu_to_le64(h->ioaccel_cmd_pool_dhandle + 7296 (i * sizeof(struct io_accel1_cmd))); 7297 } 7298 } else if (trans_support & CFGTBL_Trans_io_accel2) { 7299 u64 cfg_offset, cfg_base_addr_index; 7300 u32 bft2_offset, cfg_base_addr; 7301 int rc; 7302 7303 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 7304 &cfg_base_addr_index, &cfg_offset); 7305 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); 7306 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; 7307 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, 7308 4, h->ioaccel2_blockFetchTable); 7309 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); 7310 BUILD_BUG_ON(offsetof(struct CfgTable, 7311 io_accel_request_size_offset) != 0xb8); 7312 h->ioaccel2_bft2_regs = 7313 remap_pci_mem(pci_resource_start(h->pdev, 7314 cfg_base_addr_index) + 7315 cfg_offset + bft2_offset, 7316 ARRAY_SIZE(bft2) * 7317 sizeof(*h->ioaccel2_bft2_regs)); 7318 for (i = 0; i < ARRAY_SIZE(bft2); i++) 7319 writel(bft2[i], &h->ioaccel2_bft2_regs[i]); 7320 } 7321 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7322 hpsa_wait_for_mode_change_ack(h); 7323 } 7324 7325 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) 7326 { 7327 h->ioaccel_maxsg = 7328 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 7329 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) 7330 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; 7331 7332 /* Command structures must be aligned on a 128-byte boundary 7333 * because the 7 lower bits of the address are used by the 7334 * hardware. 7335 */ 7336 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 7337 IOACCEL1_COMMANDLIST_ALIGNMENT); 7338 h->ioaccel_cmd_pool = 7339 pci_alloc_consistent(h->pdev, 7340 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 7341 &(h->ioaccel_cmd_pool_dhandle)); 7342 7343 h->ioaccel1_blockFetchTable = 7344 kmalloc(((h->ioaccel_maxsg + 1) * 7345 sizeof(u32)), GFP_KERNEL); 7346 7347 if ((h->ioaccel_cmd_pool == NULL) || 7348 (h->ioaccel1_blockFetchTable == NULL)) 7349 goto clean_up; 7350 7351 memset(h->ioaccel_cmd_pool, 0, 7352 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); 7353 return 0; 7354 7355 clean_up: 7356 if (h->ioaccel_cmd_pool) 7357 pci_free_consistent(h->pdev, 7358 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 7359 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 7360 kfree(h->ioaccel1_blockFetchTable); 7361 return 1; 7362 } 7363 7364 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) 7365 { 7366 /* Allocate ioaccel2 mode command blocks and block fetch table */ 7367 7368 h->ioaccel_maxsg = 7369 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 7370 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 7371 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 7372 7373 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 7374 IOACCEL2_COMMANDLIST_ALIGNMENT); 7375 h->ioaccel2_cmd_pool = 7376 pci_alloc_consistent(h->pdev, 7377 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 7378 &(h->ioaccel2_cmd_pool_dhandle)); 7379 7380 h->ioaccel2_blockFetchTable = 7381 kmalloc(((h->ioaccel_maxsg + 1) * 7382 sizeof(u32)), GFP_KERNEL); 7383 7384 if ((h->ioaccel2_cmd_pool == NULL) || 7385 (h->ioaccel2_blockFetchTable == NULL)) 7386 goto clean_up; 7387 7388 memset(h->ioaccel2_cmd_pool, 0, 7389 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); 7390 return 0; 7391 7392 clean_up: 7393 if (h->ioaccel2_cmd_pool) 7394 pci_free_consistent(h->pdev, 7395 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 7396 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 7397 kfree(h->ioaccel2_blockFetchTable); 7398 return 1; 7399 } 7400 7401 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 7402 { 7403 u32 trans_support; 7404 unsigned long transMethod = CFGTBL_Trans_Performant | 7405 CFGTBL_Trans_use_short_tags; 7406 int i; 7407 7408 if (hpsa_simple_mode) 7409 return; 7410 7411 trans_support = readl(&(h->cfgtable->TransportSupport)); 7412 if (!(trans_support & PERFORMANT_MODE)) 7413 return; 7414 7415 /* Check for I/O accelerator mode support */ 7416 if (trans_support & CFGTBL_Trans_io_accel1) { 7417 transMethod |= CFGTBL_Trans_io_accel1 | 7418 CFGTBL_Trans_enable_directed_msix; 7419 if (hpsa_alloc_ioaccel_cmd_and_bft(h)) 7420 goto clean_up; 7421 } else { 7422 if (trans_support & CFGTBL_Trans_io_accel2) { 7423 transMethod |= CFGTBL_Trans_io_accel2 | 7424 CFGTBL_Trans_enable_directed_msix; 7425 if (ioaccel2_alloc_cmds_and_bft(h)) 7426 goto clean_up; 7427 } 7428 } 7429 7430 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 7431 hpsa_get_max_perf_mode_cmds(h); 7432 /* Performant mode ring buffer and supporting data structures */ 7433 h->reply_queue_size = h->max_commands * sizeof(u64); 7434 7435 for (i = 0; i < h->nreply_queues; i++) { 7436 h->reply_queue[i].head = pci_alloc_consistent(h->pdev, 7437 h->reply_queue_size, 7438 &(h->reply_queue[i].busaddr)); 7439 if (!h->reply_queue[i].head) 7440 goto clean_up; 7441 h->reply_queue[i].size = h->max_commands; 7442 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 7443 h->reply_queue[i].current_entry = 0; 7444 } 7445 7446 /* Need a block fetch table for performant mode */ 7447 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 7448 sizeof(u32)), GFP_KERNEL); 7449 if (!h->blockFetchTable) 7450 goto clean_up; 7451 7452 hpsa_enter_performant_mode(h, trans_support); 7453 return; 7454 7455 clean_up: 7456 hpsa_free_reply_queues(h); 7457 kfree(h->blockFetchTable); 7458 } 7459 7460 static int is_accelerated_cmd(struct CommandList *c) 7461 { 7462 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; 7463 } 7464 7465 static void hpsa_drain_accel_commands(struct ctlr_info *h) 7466 { 7467 struct CommandList *c = NULL; 7468 int i, accel_cmds_out; 7469 int refcount; 7470 7471 do { /* wait for all outstanding ioaccel commands to drain out */ 7472 accel_cmds_out = 0; 7473 for (i = 0; i < h->nr_cmds; i++) { 7474 c = h->cmd_pool + i; 7475 refcount = atomic_inc_return(&c->refcount); 7476 if (refcount > 1) /* Command is allocated */ 7477 accel_cmds_out += is_accelerated_cmd(c); 7478 cmd_free(h, c); 7479 } 7480 if (accel_cmds_out <= 0) 7481 break; 7482 msleep(100); 7483 } while (1); 7484 } 7485 7486 /* 7487 * This is it. Register the PCI driver information for the cards we control 7488 * the OS will call our registered routines when it finds one of our cards. 7489 */ 7490 static int __init hpsa_init(void) 7491 { 7492 return pci_register_driver(&hpsa_pci_driver); 7493 } 7494 7495 static void __exit hpsa_cleanup(void) 7496 { 7497 pci_unregister_driver(&hpsa_pci_driver); 7498 } 7499 7500 static void __attribute__((unused)) verify_offsets(void) 7501 { 7502 #define VERIFY_OFFSET(member, offset) \ 7503 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) 7504 7505 VERIFY_OFFSET(structure_size, 0); 7506 VERIFY_OFFSET(volume_blk_size, 4); 7507 VERIFY_OFFSET(volume_blk_cnt, 8); 7508 VERIFY_OFFSET(phys_blk_shift, 16); 7509 VERIFY_OFFSET(parity_rotation_shift, 17); 7510 VERIFY_OFFSET(strip_size, 18); 7511 VERIFY_OFFSET(disk_starting_blk, 20); 7512 VERIFY_OFFSET(disk_blk_cnt, 28); 7513 VERIFY_OFFSET(data_disks_per_row, 36); 7514 VERIFY_OFFSET(metadata_disks_per_row, 38); 7515 VERIFY_OFFSET(row_cnt, 40); 7516 VERIFY_OFFSET(layout_map_count, 42); 7517 VERIFY_OFFSET(flags, 44); 7518 VERIFY_OFFSET(dekindex, 46); 7519 /* VERIFY_OFFSET(reserved, 48 */ 7520 VERIFY_OFFSET(data, 64); 7521 7522 #undef VERIFY_OFFSET 7523 7524 #define VERIFY_OFFSET(member, offset) \ 7525 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) 7526 7527 VERIFY_OFFSET(IU_type, 0); 7528 VERIFY_OFFSET(direction, 1); 7529 VERIFY_OFFSET(reply_queue, 2); 7530 /* VERIFY_OFFSET(reserved1, 3); */ 7531 VERIFY_OFFSET(scsi_nexus, 4); 7532 VERIFY_OFFSET(Tag, 8); 7533 VERIFY_OFFSET(cdb, 16); 7534 VERIFY_OFFSET(cciss_lun, 32); 7535 VERIFY_OFFSET(data_len, 40); 7536 VERIFY_OFFSET(cmd_priority_task_attr, 44); 7537 VERIFY_OFFSET(sg_count, 45); 7538 /* VERIFY_OFFSET(reserved3 */ 7539 VERIFY_OFFSET(err_ptr, 48); 7540 VERIFY_OFFSET(err_len, 56); 7541 /* VERIFY_OFFSET(reserved4 */ 7542 VERIFY_OFFSET(sg, 64); 7543 7544 #undef VERIFY_OFFSET 7545 7546 #define VERIFY_OFFSET(member, offset) \ 7547 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) 7548 7549 VERIFY_OFFSET(dev_handle, 0x00); 7550 VERIFY_OFFSET(reserved1, 0x02); 7551 VERIFY_OFFSET(function, 0x03); 7552 VERIFY_OFFSET(reserved2, 0x04); 7553 VERIFY_OFFSET(err_info, 0x0C); 7554 VERIFY_OFFSET(reserved3, 0x10); 7555 VERIFY_OFFSET(err_info_len, 0x12); 7556 VERIFY_OFFSET(reserved4, 0x13); 7557 VERIFY_OFFSET(sgl_offset, 0x14); 7558 VERIFY_OFFSET(reserved5, 0x15); 7559 VERIFY_OFFSET(transfer_len, 0x1C); 7560 VERIFY_OFFSET(reserved6, 0x20); 7561 VERIFY_OFFSET(io_flags, 0x24); 7562 VERIFY_OFFSET(reserved7, 0x26); 7563 VERIFY_OFFSET(LUN, 0x34); 7564 VERIFY_OFFSET(control, 0x3C); 7565 VERIFY_OFFSET(CDB, 0x40); 7566 VERIFY_OFFSET(reserved8, 0x50); 7567 VERIFY_OFFSET(host_context_flags, 0x60); 7568 VERIFY_OFFSET(timeout_sec, 0x62); 7569 VERIFY_OFFSET(ReplyQueue, 0x64); 7570 VERIFY_OFFSET(reserved9, 0x65); 7571 VERIFY_OFFSET(tag, 0x68); 7572 VERIFY_OFFSET(host_addr, 0x70); 7573 VERIFY_OFFSET(CISS_LUN, 0x78); 7574 VERIFY_OFFSET(SG, 0x78 + 8); 7575 #undef VERIFY_OFFSET 7576 } 7577 7578 module_init(hpsa_init); 7579 module_exit(hpsa_cleanup); 7580