1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/pci-aspm.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/delay.h> 30 #include <linux/fs.h> 31 #include <linux/timer.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/compat.h> 35 #include <linux/blktrace_api.h> 36 #include <linux/uaccess.h> 37 #include <linux/io.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/completion.h> 40 #include <linux/moduleparam.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_tcq.h> 46 #include <linux/cciss_ioctl.h> 47 #include <linux/string.h> 48 #include <linux/bitmap.h> 49 #include <linux/atomic.h> 50 #include <linux/jiffies.h> 51 #include <asm/div64.h> 52 #include "hpsa_cmd.h" 53 #include "hpsa.h" 54 55 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 56 #define HPSA_DRIVER_VERSION "3.4.4-1" 57 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 58 #define HPSA "hpsa" 59 60 /* How long to wait (in milliseconds) for board to go into simple mode */ 61 #define MAX_CONFIG_WAIT 30000 62 #define MAX_IOCTL_CONFIG_WAIT 1000 63 64 /*define how many times we will try a command because of bus resets */ 65 #define MAX_CMD_RETRIES 3 66 67 /* Embedded module documentation macros - see modules.h */ 68 MODULE_AUTHOR("Hewlett-Packard Company"); 69 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 70 HPSA_DRIVER_VERSION); 71 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 72 MODULE_VERSION(HPSA_DRIVER_VERSION); 73 MODULE_LICENSE("GPL"); 74 75 static int hpsa_allow_any; 76 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 77 MODULE_PARM_DESC(hpsa_allow_any, 78 "Allow hpsa driver to access unknown HP Smart Array hardware"); 79 static int hpsa_simple_mode; 80 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 81 MODULE_PARM_DESC(hpsa_simple_mode, 82 "Use 'simple mode' rather than 'performant mode'"); 83 84 /* define the PCI info for the cards we can control */ 85 static const struct pci_device_id hpsa_pci_device_id[] = { 86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, 112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, 113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, 114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, 115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, 116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, 117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, 118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, 119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, 123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, 124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, 125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, 126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, 127 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, 128 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, 129 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, 130 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, 131 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, 132 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 133 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 134 {0,} 135 }; 136 137 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 138 139 /* board_id = Subsystem Device ID & Vendor ID 140 * product = Marketing Name for the board 141 * access = Address of the struct of function pointers 142 */ 143 static struct board_type products[] = { 144 {0x3241103C, "Smart Array P212", &SA5_access}, 145 {0x3243103C, "Smart Array P410", &SA5_access}, 146 {0x3245103C, "Smart Array P410i", &SA5_access}, 147 {0x3247103C, "Smart Array P411", &SA5_access}, 148 {0x3249103C, "Smart Array P812", &SA5_access}, 149 {0x324A103C, "Smart Array P712m", &SA5_access}, 150 {0x324B103C, "Smart Array P711m", &SA5_access}, 151 {0x3350103C, "Smart Array P222", &SA5_access}, 152 {0x3351103C, "Smart Array P420", &SA5_access}, 153 {0x3352103C, "Smart Array P421", &SA5_access}, 154 {0x3353103C, "Smart Array P822", &SA5_access}, 155 {0x3354103C, "Smart Array P420i", &SA5_access}, 156 {0x3355103C, "Smart Array P220i", &SA5_access}, 157 {0x3356103C, "Smart Array P721m", &SA5_access}, 158 {0x1921103C, "Smart Array P830i", &SA5_access}, 159 {0x1922103C, "Smart Array P430", &SA5_access}, 160 {0x1923103C, "Smart Array P431", &SA5_access}, 161 {0x1924103C, "Smart Array P830", &SA5_access}, 162 {0x1926103C, "Smart Array P731m", &SA5_access}, 163 {0x1928103C, "Smart Array P230i", &SA5_access}, 164 {0x1929103C, "Smart Array P530", &SA5_access}, 165 {0x21BD103C, "Smart Array", &SA5_access}, 166 {0x21BE103C, "Smart Array", &SA5_access}, 167 {0x21BF103C, "Smart Array", &SA5_access}, 168 {0x21C0103C, "Smart Array", &SA5_access}, 169 {0x21C1103C, "Smart Array", &SA5_access}, 170 {0x21C2103C, "Smart Array", &SA5_access}, 171 {0x21C3103C, "Smart Array", &SA5_access}, 172 {0x21C4103C, "Smart Array", &SA5_access}, 173 {0x21C5103C, "Smart Array", &SA5_access}, 174 {0x21C6103C, "Smart Array", &SA5_access}, 175 {0x21C7103C, "Smart Array", &SA5_access}, 176 {0x21C8103C, "Smart Array", &SA5_access}, 177 {0x21C9103C, "Smart Array", &SA5_access}, 178 {0x21CA103C, "Smart Array", &SA5_access}, 179 {0x21CB103C, "Smart Array", &SA5_access}, 180 {0x21CC103C, "Smart Array", &SA5_access}, 181 {0x21CD103C, "Smart Array", &SA5_access}, 182 {0x21CE103C, "Smart Array", &SA5_access}, 183 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, 184 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, 185 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, 186 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, 187 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, 188 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 189 }; 190 191 static int number_of_controllers; 192 193 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 194 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 195 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 196 static void start_io(struct ctlr_info *h); 197 198 #ifdef CONFIG_COMPAT 199 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 200 #endif 201 202 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 203 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); 204 static struct CommandList *cmd_alloc(struct ctlr_info *h); 205 static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 206 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 207 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 208 int cmd_type); 209 #define VPD_PAGE (1 << 8) 210 211 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 212 static void hpsa_scan_start(struct Scsi_Host *); 213 static int hpsa_scan_finished(struct Scsi_Host *sh, 214 unsigned long elapsed_time); 215 static int hpsa_change_queue_depth(struct scsi_device *sdev, 216 int qdepth, int reason); 217 218 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 219 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); 220 static int hpsa_slave_alloc(struct scsi_device *sdev); 221 static void hpsa_slave_destroy(struct scsi_device *sdev); 222 223 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 224 static int check_for_unit_attention(struct ctlr_info *h, 225 struct CommandList *c); 226 static void check_ioctl_unit_attention(struct ctlr_info *h, 227 struct CommandList *c); 228 /* performant mode helper functions */ 229 static void calc_bucket_map(int *bucket, int num_buckets, 230 int nsgs, int min_blocks, int *bucket_map); 231 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 232 static inline u32 next_command(struct ctlr_info *h, u8 q); 233 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 234 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 235 u64 *cfg_offset); 236 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 237 unsigned long *memory_bar); 238 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 239 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 240 int wait_for_ready); 241 static inline void finish_cmd(struct CommandList *c); 242 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h); 243 #define BOARD_NOT_READY 0 244 #define BOARD_READY 1 245 static void hpsa_drain_accel_commands(struct ctlr_info *h); 246 static void hpsa_flush_cache(struct ctlr_info *h); 247 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 248 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 249 u8 *scsi3addr); 250 251 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 252 { 253 unsigned long *priv = shost_priv(sdev->host); 254 return (struct ctlr_info *) *priv; 255 } 256 257 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 258 { 259 unsigned long *priv = shost_priv(sh); 260 return (struct ctlr_info *) *priv; 261 } 262 263 static int check_for_unit_attention(struct ctlr_info *h, 264 struct CommandList *c) 265 { 266 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 267 return 0; 268 269 switch (c->err_info->SenseInfo[12]) { 270 case STATE_CHANGED: 271 dev_warn(&h->pdev->dev, HPSA "%d: a state change " 272 "detected, command retried\n", h->ctlr); 273 break; 274 case LUN_FAILED: 275 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure " 276 "detected, action required\n", h->ctlr); 277 break; 278 case REPORT_LUNS_CHANGED: 279 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data " 280 "changed, action required\n", h->ctlr); 281 /* 282 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 283 * target (array) devices. 284 */ 285 break; 286 case POWER_OR_RESET: 287 dev_warn(&h->pdev->dev, HPSA "%d: a power on " 288 "or device reset detected\n", h->ctlr); 289 break; 290 case UNIT_ATTENTION_CLEARED: 291 dev_warn(&h->pdev->dev, HPSA "%d: unit attention " 292 "cleared by another initiator\n", h->ctlr); 293 break; 294 default: 295 dev_warn(&h->pdev->dev, HPSA "%d: unknown " 296 "unit attention detected\n", h->ctlr); 297 break; 298 } 299 return 1; 300 } 301 302 static int check_for_busy(struct ctlr_info *h, struct CommandList *c) 303 { 304 if (c->err_info->CommandStatus != CMD_TARGET_STATUS || 305 (c->err_info->ScsiStatus != SAM_STAT_BUSY && 306 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) 307 return 0; 308 dev_warn(&h->pdev->dev, HPSA "device busy"); 309 return 1; 310 } 311 312 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, 313 struct device_attribute *attr, 314 const char *buf, size_t count) 315 { 316 int status, len; 317 struct ctlr_info *h; 318 struct Scsi_Host *shost = class_to_shost(dev); 319 char tmpbuf[10]; 320 321 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 322 return -EACCES; 323 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 324 strncpy(tmpbuf, buf, len); 325 tmpbuf[len] = '\0'; 326 if (sscanf(tmpbuf, "%d", &status) != 1) 327 return -EINVAL; 328 h = shost_to_hba(shost); 329 h->acciopath_status = !!status; 330 dev_warn(&h->pdev->dev, 331 "hpsa: HP SSD Smart Path %s via sysfs update.\n", 332 h->acciopath_status ? "enabled" : "disabled"); 333 return count; 334 } 335 336 static ssize_t host_store_raid_offload_debug(struct device *dev, 337 struct device_attribute *attr, 338 const char *buf, size_t count) 339 { 340 int debug_level, len; 341 struct ctlr_info *h; 342 struct Scsi_Host *shost = class_to_shost(dev); 343 char tmpbuf[10]; 344 345 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 346 return -EACCES; 347 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; 348 strncpy(tmpbuf, buf, len); 349 tmpbuf[len] = '\0'; 350 if (sscanf(tmpbuf, "%d", &debug_level) != 1) 351 return -EINVAL; 352 if (debug_level < 0) 353 debug_level = 0; 354 h = shost_to_hba(shost); 355 h->raid_offload_debug = debug_level; 356 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", 357 h->raid_offload_debug); 358 return count; 359 } 360 361 static ssize_t host_store_rescan(struct device *dev, 362 struct device_attribute *attr, 363 const char *buf, size_t count) 364 { 365 struct ctlr_info *h; 366 struct Scsi_Host *shost = class_to_shost(dev); 367 h = shost_to_hba(shost); 368 hpsa_scan_start(h->scsi_host); 369 return count; 370 } 371 372 static ssize_t host_show_firmware_revision(struct device *dev, 373 struct device_attribute *attr, char *buf) 374 { 375 struct ctlr_info *h; 376 struct Scsi_Host *shost = class_to_shost(dev); 377 unsigned char *fwrev; 378 379 h = shost_to_hba(shost); 380 if (!h->hba_inquiry_data) 381 return 0; 382 fwrev = &h->hba_inquiry_data[32]; 383 return snprintf(buf, 20, "%c%c%c%c\n", 384 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 385 } 386 387 static ssize_t host_show_commands_outstanding(struct device *dev, 388 struct device_attribute *attr, char *buf) 389 { 390 struct Scsi_Host *shost = class_to_shost(dev); 391 struct ctlr_info *h = shost_to_hba(shost); 392 393 return snprintf(buf, 20, "%d\n", h->commands_outstanding); 394 } 395 396 static ssize_t host_show_transport_mode(struct device *dev, 397 struct device_attribute *attr, char *buf) 398 { 399 struct ctlr_info *h; 400 struct Scsi_Host *shost = class_to_shost(dev); 401 402 h = shost_to_hba(shost); 403 return snprintf(buf, 20, "%s\n", 404 h->transMethod & CFGTBL_Trans_Performant ? 405 "performant" : "simple"); 406 } 407 408 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, 409 struct device_attribute *attr, char *buf) 410 { 411 struct ctlr_info *h; 412 struct Scsi_Host *shost = class_to_shost(dev); 413 414 h = shost_to_hba(shost); 415 return snprintf(buf, 30, "HP SSD Smart Path %s\n", 416 (h->acciopath_status == 1) ? "enabled" : "disabled"); 417 } 418 419 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 420 static u32 unresettable_controller[] = { 421 0x324a103C, /* Smart Array P712m */ 422 0x324b103C, /* SmartArray P711m */ 423 0x3223103C, /* Smart Array P800 */ 424 0x3234103C, /* Smart Array P400 */ 425 0x3235103C, /* Smart Array P400i */ 426 0x3211103C, /* Smart Array E200i */ 427 0x3212103C, /* Smart Array E200 */ 428 0x3213103C, /* Smart Array E200i */ 429 0x3214103C, /* Smart Array E200i */ 430 0x3215103C, /* Smart Array E200i */ 431 0x3237103C, /* Smart Array E500 */ 432 0x323D103C, /* Smart Array P700m */ 433 0x40800E11, /* Smart Array 5i */ 434 0x409C0E11, /* Smart Array 6400 */ 435 0x409D0E11, /* Smart Array 6400 EM */ 436 0x40700E11, /* Smart Array 5300 */ 437 0x40820E11, /* Smart Array 532 */ 438 0x40830E11, /* Smart Array 5312 */ 439 0x409A0E11, /* Smart Array 641 */ 440 0x409B0E11, /* Smart Array 642 */ 441 0x40910E11, /* Smart Array 6i */ 442 }; 443 444 /* List of controllers which cannot even be soft reset */ 445 static u32 soft_unresettable_controller[] = { 446 0x40800E11, /* Smart Array 5i */ 447 0x40700E11, /* Smart Array 5300 */ 448 0x40820E11, /* Smart Array 532 */ 449 0x40830E11, /* Smart Array 5312 */ 450 0x409A0E11, /* Smart Array 641 */ 451 0x409B0E11, /* Smart Array 642 */ 452 0x40910E11, /* Smart Array 6i */ 453 /* Exclude 640x boards. These are two pci devices in one slot 454 * which share a battery backed cache module. One controls the 455 * cache, the other accesses the cache through the one that controls 456 * it. If we reset the one controlling the cache, the other will 457 * likely not be happy. Just forbid resetting this conjoined mess. 458 * The 640x isn't really supported by hpsa anyway. 459 */ 460 0x409C0E11, /* Smart Array 6400 */ 461 0x409D0E11, /* Smart Array 6400 EM */ 462 }; 463 464 static int ctlr_is_hard_resettable(u32 board_id) 465 { 466 int i; 467 468 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 469 if (unresettable_controller[i] == board_id) 470 return 0; 471 return 1; 472 } 473 474 static int ctlr_is_soft_resettable(u32 board_id) 475 { 476 int i; 477 478 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 479 if (soft_unresettable_controller[i] == board_id) 480 return 0; 481 return 1; 482 } 483 484 static int ctlr_is_resettable(u32 board_id) 485 { 486 return ctlr_is_hard_resettable(board_id) || 487 ctlr_is_soft_resettable(board_id); 488 } 489 490 static ssize_t host_show_resettable(struct device *dev, 491 struct device_attribute *attr, char *buf) 492 { 493 struct ctlr_info *h; 494 struct Scsi_Host *shost = class_to_shost(dev); 495 496 h = shost_to_hba(shost); 497 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 498 } 499 500 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 501 { 502 return (scsi3addr[3] & 0xC0) == 0x40; 503 } 504 505 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 506 "1(ADM)", "UNKNOWN" 507 }; 508 #define HPSA_RAID_0 0 509 #define HPSA_RAID_4 1 510 #define HPSA_RAID_1 2 /* also used for RAID 10 */ 511 #define HPSA_RAID_5 3 /* also used for RAID 50 */ 512 #define HPSA_RAID_51 4 513 #define HPSA_RAID_6 5 /* also used for RAID 60 */ 514 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 515 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 516 517 static ssize_t raid_level_show(struct device *dev, 518 struct device_attribute *attr, char *buf) 519 { 520 ssize_t l = 0; 521 unsigned char rlevel; 522 struct ctlr_info *h; 523 struct scsi_device *sdev; 524 struct hpsa_scsi_dev_t *hdev; 525 unsigned long flags; 526 527 sdev = to_scsi_device(dev); 528 h = sdev_to_hba(sdev); 529 spin_lock_irqsave(&h->lock, flags); 530 hdev = sdev->hostdata; 531 if (!hdev) { 532 spin_unlock_irqrestore(&h->lock, flags); 533 return -ENODEV; 534 } 535 536 /* Is this even a logical drive? */ 537 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 538 spin_unlock_irqrestore(&h->lock, flags); 539 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 540 return l; 541 } 542 543 rlevel = hdev->raid_level; 544 spin_unlock_irqrestore(&h->lock, flags); 545 if (rlevel > RAID_UNKNOWN) 546 rlevel = RAID_UNKNOWN; 547 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 548 return l; 549 } 550 551 static ssize_t lunid_show(struct device *dev, 552 struct device_attribute *attr, char *buf) 553 { 554 struct ctlr_info *h; 555 struct scsi_device *sdev; 556 struct hpsa_scsi_dev_t *hdev; 557 unsigned long flags; 558 unsigned char lunid[8]; 559 560 sdev = to_scsi_device(dev); 561 h = sdev_to_hba(sdev); 562 spin_lock_irqsave(&h->lock, flags); 563 hdev = sdev->hostdata; 564 if (!hdev) { 565 spin_unlock_irqrestore(&h->lock, flags); 566 return -ENODEV; 567 } 568 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 569 spin_unlock_irqrestore(&h->lock, flags); 570 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 571 lunid[0], lunid[1], lunid[2], lunid[3], 572 lunid[4], lunid[5], lunid[6], lunid[7]); 573 } 574 575 static ssize_t unique_id_show(struct device *dev, 576 struct device_attribute *attr, char *buf) 577 { 578 struct ctlr_info *h; 579 struct scsi_device *sdev; 580 struct hpsa_scsi_dev_t *hdev; 581 unsigned long flags; 582 unsigned char sn[16]; 583 584 sdev = to_scsi_device(dev); 585 h = sdev_to_hba(sdev); 586 spin_lock_irqsave(&h->lock, flags); 587 hdev = sdev->hostdata; 588 if (!hdev) { 589 spin_unlock_irqrestore(&h->lock, flags); 590 return -ENODEV; 591 } 592 memcpy(sn, hdev->device_id, sizeof(sn)); 593 spin_unlock_irqrestore(&h->lock, flags); 594 return snprintf(buf, 16 * 2 + 2, 595 "%02X%02X%02X%02X%02X%02X%02X%02X" 596 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 597 sn[0], sn[1], sn[2], sn[3], 598 sn[4], sn[5], sn[6], sn[7], 599 sn[8], sn[9], sn[10], sn[11], 600 sn[12], sn[13], sn[14], sn[15]); 601 } 602 603 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, 604 struct device_attribute *attr, char *buf) 605 { 606 struct ctlr_info *h; 607 struct scsi_device *sdev; 608 struct hpsa_scsi_dev_t *hdev; 609 unsigned long flags; 610 int offload_enabled; 611 612 sdev = to_scsi_device(dev); 613 h = sdev_to_hba(sdev); 614 spin_lock_irqsave(&h->lock, flags); 615 hdev = sdev->hostdata; 616 if (!hdev) { 617 spin_unlock_irqrestore(&h->lock, flags); 618 return -ENODEV; 619 } 620 offload_enabled = hdev->offload_enabled; 621 spin_unlock_irqrestore(&h->lock, flags); 622 return snprintf(buf, 20, "%d\n", offload_enabled); 623 } 624 625 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 626 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 627 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 628 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 629 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, 630 host_show_hp_ssd_smart_path_enabled, NULL); 631 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, 632 host_show_hp_ssd_smart_path_status, 633 host_store_hp_ssd_smart_path_status); 634 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, 635 host_store_raid_offload_debug); 636 static DEVICE_ATTR(firmware_revision, S_IRUGO, 637 host_show_firmware_revision, NULL); 638 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 639 host_show_commands_outstanding, NULL); 640 static DEVICE_ATTR(transport_mode, S_IRUGO, 641 host_show_transport_mode, NULL); 642 static DEVICE_ATTR(resettable, S_IRUGO, 643 host_show_resettable, NULL); 644 645 static struct device_attribute *hpsa_sdev_attrs[] = { 646 &dev_attr_raid_level, 647 &dev_attr_lunid, 648 &dev_attr_unique_id, 649 &dev_attr_hp_ssd_smart_path_enabled, 650 NULL, 651 }; 652 653 static struct device_attribute *hpsa_shost_attrs[] = { 654 &dev_attr_rescan, 655 &dev_attr_firmware_revision, 656 &dev_attr_commands_outstanding, 657 &dev_attr_transport_mode, 658 &dev_attr_resettable, 659 &dev_attr_hp_ssd_smart_path_status, 660 &dev_attr_raid_offload_debug, 661 NULL, 662 }; 663 664 static struct scsi_host_template hpsa_driver_template = { 665 .module = THIS_MODULE, 666 .name = HPSA, 667 .proc_name = HPSA, 668 .queuecommand = hpsa_scsi_queue_command, 669 .scan_start = hpsa_scan_start, 670 .scan_finished = hpsa_scan_finished, 671 .change_queue_depth = hpsa_change_queue_depth, 672 .this_id = -1, 673 .use_clustering = ENABLE_CLUSTERING, 674 .eh_abort_handler = hpsa_eh_abort_handler, 675 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 676 .ioctl = hpsa_ioctl, 677 .slave_alloc = hpsa_slave_alloc, 678 .slave_destroy = hpsa_slave_destroy, 679 #ifdef CONFIG_COMPAT 680 .compat_ioctl = hpsa_compat_ioctl, 681 #endif 682 .sdev_attrs = hpsa_sdev_attrs, 683 .shost_attrs = hpsa_shost_attrs, 684 .max_sectors = 8192, 685 .no_write_same = 1, 686 }; 687 688 689 /* Enqueuing and dequeuing functions for cmdlists. */ 690 static inline void addQ(struct list_head *list, struct CommandList *c) 691 { 692 list_add_tail(&c->list, list); 693 } 694 695 static inline u32 next_command(struct ctlr_info *h, u8 q) 696 { 697 u32 a; 698 struct reply_pool *rq = &h->reply_queue[q]; 699 unsigned long flags; 700 701 if (h->transMethod & CFGTBL_Trans_io_accel1) 702 return h->access.command_completed(h, q); 703 704 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 705 return h->access.command_completed(h, q); 706 707 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 708 a = rq->head[rq->current_entry]; 709 rq->current_entry++; 710 spin_lock_irqsave(&h->lock, flags); 711 h->commands_outstanding--; 712 spin_unlock_irqrestore(&h->lock, flags); 713 } else { 714 a = FIFO_EMPTY; 715 } 716 /* Check for wraparound */ 717 if (rq->current_entry == h->max_commands) { 718 rq->current_entry = 0; 719 rq->wraparound ^= 1; 720 } 721 return a; 722 } 723 724 /* 725 * There are some special bits in the bus address of the 726 * command that we have to set for the controller to know 727 * how to process the command: 728 * 729 * Normal performant mode: 730 * bit 0: 1 means performant mode, 0 means simple mode. 731 * bits 1-3 = block fetch table entry 732 * bits 4-6 = command type (== 0) 733 * 734 * ioaccel1 mode: 735 * bit 0 = "performant mode" bit. 736 * bits 1-3 = block fetch table entry 737 * bits 4-6 = command type (== 110) 738 * (command type is needed because ioaccel1 mode 739 * commands are submitted through the same register as normal 740 * mode commands, so this is how the controller knows whether 741 * the command is normal mode or ioaccel1 mode.) 742 * 743 * ioaccel2 mode: 744 * bit 0 = "performant mode" bit. 745 * bits 1-4 = block fetch table entry (note extra bit) 746 * bits 4-6 = not needed, because ioaccel2 mode has 747 * a separate special register for submitting commands. 748 */ 749 750 /* set_performant_mode: Modify the tag for cciss performant 751 * set bit 0 for pull model, bits 3-1 for block fetch 752 * register number 753 */ 754 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 755 { 756 if (likely(h->transMethod & CFGTBL_Trans_Performant)) { 757 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 758 if (likely(h->msix_vector > 0)) 759 c->Header.ReplyQueue = 760 raw_smp_processor_id() % h->nreply_queues; 761 } 762 } 763 764 static void set_ioaccel1_performant_mode(struct ctlr_info *h, 765 struct CommandList *c) 766 { 767 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 768 769 /* Tell the controller to post the reply to the queue for this 770 * processor. This seems to give the best I/O throughput. 771 */ 772 cp->ReplyQueue = smp_processor_id() % h->nreply_queues; 773 /* Set the bits in the address sent down to include: 774 * - performant mode bit (bit 0) 775 * - pull count (bits 1-3) 776 * - command type (bits 4-6) 777 */ 778 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | 779 IOACCEL1_BUSADDR_CMDTYPE; 780 } 781 782 static void set_ioaccel2_performant_mode(struct ctlr_info *h, 783 struct CommandList *c) 784 { 785 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 786 787 /* Tell the controller to post the reply to the queue for this 788 * processor. This seems to give the best I/O throughput. 789 */ 790 cp->reply_queue = smp_processor_id() % h->nreply_queues; 791 /* Set the bits in the address sent down to include: 792 * - performant mode bit not used in ioaccel mode 2 793 * - pull count (bits 0-3) 794 * - command type isn't needed for ioaccel2 795 */ 796 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); 797 } 798 799 static int is_firmware_flash_cmd(u8 *cdb) 800 { 801 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; 802 } 803 804 /* 805 * During firmware flash, the heartbeat register may not update as frequently 806 * as it should. So we dial down lockup detection during firmware flash. and 807 * dial it back up when firmware flash completes. 808 */ 809 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 810 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) 811 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 812 struct CommandList *c) 813 { 814 if (!is_firmware_flash_cmd(c->Request.CDB)) 815 return; 816 atomic_inc(&h->firmware_flash_in_progress); 817 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; 818 } 819 820 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, 821 struct CommandList *c) 822 { 823 if (is_firmware_flash_cmd(c->Request.CDB) && 824 atomic_dec_and_test(&h->firmware_flash_in_progress)) 825 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 826 } 827 828 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 829 struct CommandList *c) 830 { 831 unsigned long flags; 832 833 switch (c->cmd_type) { 834 case CMD_IOACCEL1: 835 set_ioaccel1_performant_mode(h, c); 836 break; 837 case CMD_IOACCEL2: 838 set_ioaccel2_performant_mode(h, c); 839 break; 840 default: 841 set_performant_mode(h, c); 842 } 843 dial_down_lockup_detection_during_fw_flash(h, c); 844 spin_lock_irqsave(&h->lock, flags); 845 addQ(&h->reqQ, c); 846 h->Qdepth++; 847 spin_unlock_irqrestore(&h->lock, flags); 848 start_io(h); 849 } 850 851 static inline void removeQ(struct CommandList *c) 852 { 853 if (WARN_ON(list_empty(&c->list))) 854 return; 855 list_del_init(&c->list); 856 } 857 858 static inline int is_hba_lunid(unsigned char scsi3addr[]) 859 { 860 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 861 } 862 863 static inline int is_scsi_rev_5(struct ctlr_info *h) 864 { 865 if (!h->hba_inquiry_data) 866 return 0; 867 if ((h->hba_inquiry_data[2] & 0x07) == 5) 868 return 1; 869 return 0; 870 } 871 872 static int hpsa_find_target_lun(struct ctlr_info *h, 873 unsigned char scsi3addr[], int bus, int *target, int *lun) 874 { 875 /* finds an unused bus, target, lun for a new physical device 876 * assumes h->devlock is held 877 */ 878 int i, found = 0; 879 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 880 881 bitmap_zero(lun_taken, HPSA_MAX_DEVICES); 882 883 for (i = 0; i < h->ndevices; i++) { 884 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 885 __set_bit(h->dev[i]->target, lun_taken); 886 } 887 888 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); 889 if (i < HPSA_MAX_DEVICES) { 890 /* *bus = 1; */ 891 *target = i; 892 *lun = 0; 893 found = 1; 894 } 895 return !found; 896 } 897 898 /* Add an entry into h->dev[] array. */ 899 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 900 struct hpsa_scsi_dev_t *device, 901 struct hpsa_scsi_dev_t *added[], int *nadded) 902 { 903 /* assumes h->devlock is held */ 904 int n = h->ndevices; 905 int i; 906 unsigned char addr1[8], addr2[8]; 907 struct hpsa_scsi_dev_t *sd; 908 909 if (n >= HPSA_MAX_DEVICES) { 910 dev_err(&h->pdev->dev, "too many devices, some will be " 911 "inaccessible.\n"); 912 return -1; 913 } 914 915 /* physical devices do not have lun or target assigned until now. */ 916 if (device->lun != -1) 917 /* Logical device, lun is already assigned. */ 918 goto lun_assigned; 919 920 /* If this device a non-zero lun of a multi-lun device 921 * byte 4 of the 8-byte LUN addr will contain the logical 922 * unit no, zero otherise. 923 */ 924 if (device->scsi3addr[4] == 0) { 925 /* This is not a non-zero lun of a multi-lun device */ 926 if (hpsa_find_target_lun(h, device->scsi3addr, 927 device->bus, &device->target, &device->lun) != 0) 928 return -1; 929 goto lun_assigned; 930 } 931 932 /* This is a non-zero lun of a multi-lun device. 933 * Search through our list and find the device which 934 * has the same 8 byte LUN address, excepting byte 4. 935 * Assign the same bus and target for this new LUN. 936 * Use the logical unit number from the firmware. 937 */ 938 memcpy(addr1, device->scsi3addr, 8); 939 addr1[4] = 0; 940 for (i = 0; i < n; i++) { 941 sd = h->dev[i]; 942 memcpy(addr2, sd->scsi3addr, 8); 943 addr2[4] = 0; 944 /* differ only in byte 4? */ 945 if (memcmp(addr1, addr2, 8) == 0) { 946 device->bus = sd->bus; 947 device->target = sd->target; 948 device->lun = device->scsi3addr[4]; 949 break; 950 } 951 } 952 if (device->lun == -1) { 953 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 954 " suspect firmware bug or unsupported hardware " 955 "configuration.\n"); 956 return -1; 957 } 958 959 lun_assigned: 960 961 h->dev[n] = device; 962 h->ndevices++; 963 added[*nadded] = device; 964 (*nadded)++; 965 966 /* initially, (before registering with scsi layer) we don't 967 * know our hostno and we don't want to print anything first 968 * time anyway (the scsi layer's inquiries will show that info) 969 */ 970 /* if (hostno != -1) */ 971 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 972 scsi_device_type(device->devtype), hostno, 973 device->bus, device->target, device->lun); 974 return 0; 975 } 976 977 /* Update an entry in h->dev[] array. */ 978 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, 979 int entry, struct hpsa_scsi_dev_t *new_entry) 980 { 981 /* assumes h->devlock is held */ 982 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 983 984 /* Raid level changed. */ 985 h->dev[entry]->raid_level = new_entry->raid_level; 986 987 /* Raid offload parameters changed. */ 988 h->dev[entry]->offload_config = new_entry->offload_config; 989 h->dev[entry]->offload_enabled = new_entry->offload_enabled; 990 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 991 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; 992 h->dev[entry]->raid_map = new_entry->raid_map; 993 994 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", 995 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 996 new_entry->target, new_entry->lun); 997 } 998 999 /* Replace an entry from h->dev[] array. */ 1000 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 1001 int entry, struct hpsa_scsi_dev_t *new_entry, 1002 struct hpsa_scsi_dev_t *added[], int *nadded, 1003 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1004 { 1005 /* assumes h->devlock is held */ 1006 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1007 removed[*nremoved] = h->dev[entry]; 1008 (*nremoved)++; 1009 1010 /* 1011 * New physical devices won't have target/lun assigned yet 1012 * so we need to preserve the values in the slot we are replacing. 1013 */ 1014 if (new_entry->target == -1) { 1015 new_entry->target = h->dev[entry]->target; 1016 new_entry->lun = h->dev[entry]->lun; 1017 } 1018 1019 h->dev[entry] = new_entry; 1020 added[*nadded] = new_entry; 1021 (*nadded)++; 1022 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 1023 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 1024 new_entry->target, new_entry->lun); 1025 } 1026 1027 /* Remove an entry from h->dev[] array. */ 1028 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 1029 struct hpsa_scsi_dev_t *removed[], int *nremoved) 1030 { 1031 /* assumes h->devlock is held */ 1032 int i; 1033 struct hpsa_scsi_dev_t *sd; 1034 1035 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1036 1037 sd = h->dev[entry]; 1038 removed[*nremoved] = h->dev[entry]; 1039 (*nremoved)++; 1040 1041 for (i = entry; i < h->ndevices-1; i++) 1042 h->dev[i] = h->dev[i+1]; 1043 h->ndevices--; 1044 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 1045 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 1046 sd->lun); 1047 } 1048 1049 #define SCSI3ADDR_EQ(a, b) ( \ 1050 (a)[7] == (b)[7] && \ 1051 (a)[6] == (b)[6] && \ 1052 (a)[5] == (b)[5] && \ 1053 (a)[4] == (b)[4] && \ 1054 (a)[3] == (b)[3] && \ 1055 (a)[2] == (b)[2] && \ 1056 (a)[1] == (b)[1] && \ 1057 (a)[0] == (b)[0]) 1058 1059 static void fixup_botched_add(struct ctlr_info *h, 1060 struct hpsa_scsi_dev_t *added) 1061 { 1062 /* called when scsi_add_device fails in order to re-adjust 1063 * h->dev[] to match the mid layer's view. 1064 */ 1065 unsigned long flags; 1066 int i, j; 1067 1068 spin_lock_irqsave(&h->lock, flags); 1069 for (i = 0; i < h->ndevices; i++) { 1070 if (h->dev[i] == added) { 1071 for (j = i; j < h->ndevices-1; j++) 1072 h->dev[j] = h->dev[j+1]; 1073 h->ndevices--; 1074 break; 1075 } 1076 } 1077 spin_unlock_irqrestore(&h->lock, flags); 1078 kfree(added); 1079 } 1080 1081 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 1082 struct hpsa_scsi_dev_t *dev2) 1083 { 1084 /* we compare everything except lun and target as these 1085 * are not yet assigned. Compare parts likely 1086 * to differ first 1087 */ 1088 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 1089 sizeof(dev1->scsi3addr)) != 0) 1090 return 0; 1091 if (memcmp(dev1->device_id, dev2->device_id, 1092 sizeof(dev1->device_id)) != 0) 1093 return 0; 1094 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 1095 return 0; 1096 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 1097 return 0; 1098 if (dev1->devtype != dev2->devtype) 1099 return 0; 1100 if (dev1->bus != dev2->bus) 1101 return 0; 1102 return 1; 1103 } 1104 1105 static inline int device_updated(struct hpsa_scsi_dev_t *dev1, 1106 struct hpsa_scsi_dev_t *dev2) 1107 { 1108 /* Device attributes that can change, but don't mean 1109 * that the device is a different device, nor that the OS 1110 * needs to be told anything about the change. 1111 */ 1112 if (dev1->raid_level != dev2->raid_level) 1113 return 1; 1114 if (dev1->offload_config != dev2->offload_config) 1115 return 1; 1116 if (dev1->offload_enabled != dev2->offload_enabled) 1117 return 1; 1118 return 0; 1119 } 1120 1121 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 1122 * and return needle location in *index. If scsi3addr matches, but not 1123 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 1124 * location in *index. 1125 * In the case of a minor device attribute change, such as RAID level, just 1126 * return DEVICE_UPDATED, along with the updated device's location in index. 1127 * If needle not found, return DEVICE_NOT_FOUND. 1128 */ 1129 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 1130 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 1131 int *index) 1132 { 1133 int i; 1134 #define DEVICE_NOT_FOUND 0 1135 #define DEVICE_CHANGED 1 1136 #define DEVICE_SAME 2 1137 #define DEVICE_UPDATED 3 1138 for (i = 0; i < haystack_size; i++) { 1139 if (haystack[i] == NULL) /* previously removed. */ 1140 continue; 1141 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 1142 *index = i; 1143 if (device_is_the_same(needle, haystack[i])) { 1144 if (device_updated(needle, haystack[i])) 1145 return DEVICE_UPDATED; 1146 return DEVICE_SAME; 1147 } else { 1148 /* Keep offline devices offline */ 1149 if (needle->volume_offline) 1150 return DEVICE_NOT_FOUND; 1151 return DEVICE_CHANGED; 1152 } 1153 } 1154 } 1155 *index = -1; 1156 return DEVICE_NOT_FOUND; 1157 } 1158 1159 static void hpsa_monitor_offline_device(struct ctlr_info *h, 1160 unsigned char scsi3addr[]) 1161 { 1162 struct offline_device_entry *device; 1163 unsigned long flags; 1164 1165 /* Check to see if device is already on the list */ 1166 spin_lock_irqsave(&h->offline_device_lock, flags); 1167 list_for_each_entry(device, &h->offline_device_list, offline_list) { 1168 if (memcmp(device->scsi3addr, scsi3addr, 1169 sizeof(device->scsi3addr)) == 0) { 1170 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1171 return; 1172 } 1173 } 1174 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1175 1176 /* Device is not on the list, add it. */ 1177 device = kmalloc(sizeof(*device), GFP_KERNEL); 1178 if (!device) { 1179 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); 1180 return; 1181 } 1182 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1183 spin_lock_irqsave(&h->offline_device_lock, flags); 1184 list_add_tail(&device->offline_list, &h->offline_device_list); 1185 spin_unlock_irqrestore(&h->offline_device_lock, flags); 1186 } 1187 1188 /* Print a message explaining various offline volume states */ 1189 static void hpsa_show_volume_status(struct ctlr_info *h, 1190 struct hpsa_scsi_dev_t *sd) 1191 { 1192 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) 1193 dev_info(&h->pdev->dev, 1194 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", 1195 h->scsi_host->host_no, 1196 sd->bus, sd->target, sd->lun); 1197 switch (sd->volume_offline) { 1198 case HPSA_LV_OK: 1199 break; 1200 case HPSA_LV_UNDERGOING_ERASE: 1201 dev_info(&h->pdev->dev, 1202 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", 1203 h->scsi_host->host_no, 1204 sd->bus, sd->target, sd->lun); 1205 break; 1206 case HPSA_LV_UNDERGOING_RPI: 1207 dev_info(&h->pdev->dev, 1208 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n", 1209 h->scsi_host->host_no, 1210 sd->bus, sd->target, sd->lun); 1211 break; 1212 case HPSA_LV_PENDING_RPI: 1213 dev_info(&h->pdev->dev, 1214 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", 1215 h->scsi_host->host_no, 1216 sd->bus, sd->target, sd->lun); 1217 break; 1218 case HPSA_LV_ENCRYPTED_NO_KEY: 1219 dev_info(&h->pdev->dev, 1220 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", 1221 h->scsi_host->host_no, 1222 sd->bus, sd->target, sd->lun); 1223 break; 1224 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 1225 dev_info(&h->pdev->dev, 1226 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", 1227 h->scsi_host->host_no, 1228 sd->bus, sd->target, sd->lun); 1229 break; 1230 case HPSA_LV_UNDERGOING_ENCRYPTION: 1231 dev_info(&h->pdev->dev, 1232 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", 1233 h->scsi_host->host_no, 1234 sd->bus, sd->target, sd->lun); 1235 break; 1236 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 1237 dev_info(&h->pdev->dev, 1238 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", 1239 h->scsi_host->host_no, 1240 sd->bus, sd->target, sd->lun); 1241 break; 1242 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1243 dev_info(&h->pdev->dev, 1244 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", 1245 h->scsi_host->host_no, 1246 sd->bus, sd->target, sd->lun); 1247 break; 1248 case HPSA_LV_PENDING_ENCRYPTION: 1249 dev_info(&h->pdev->dev, 1250 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", 1251 h->scsi_host->host_no, 1252 sd->bus, sd->target, sd->lun); 1253 break; 1254 case HPSA_LV_PENDING_ENCRYPTION_REKEYING: 1255 dev_info(&h->pdev->dev, 1256 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", 1257 h->scsi_host->host_no, 1258 sd->bus, sd->target, sd->lun); 1259 break; 1260 } 1261 } 1262 1263 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 1264 struct hpsa_scsi_dev_t *sd[], int nsds) 1265 { 1266 /* sd contains scsi3 addresses and devtypes, and inquiry 1267 * data. This function takes what's in sd to be the current 1268 * reality and updates h->dev[] to reflect that reality. 1269 */ 1270 int i, entry, device_change, changes = 0; 1271 struct hpsa_scsi_dev_t *csd; 1272 unsigned long flags; 1273 struct hpsa_scsi_dev_t **added, **removed; 1274 int nadded, nremoved; 1275 struct Scsi_Host *sh = NULL; 1276 1277 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 1278 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 1279 1280 if (!added || !removed) { 1281 dev_warn(&h->pdev->dev, "out of memory in " 1282 "adjust_hpsa_scsi_table\n"); 1283 goto free_and_out; 1284 } 1285 1286 spin_lock_irqsave(&h->devlock, flags); 1287 1288 /* find any devices in h->dev[] that are not in 1289 * sd[] and remove them from h->dev[], and for any 1290 * devices which have changed, remove the old device 1291 * info and add the new device info. 1292 * If minor device attributes change, just update 1293 * the existing device structure. 1294 */ 1295 i = 0; 1296 nremoved = 0; 1297 nadded = 0; 1298 while (i < h->ndevices) { 1299 csd = h->dev[i]; 1300 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 1301 if (device_change == DEVICE_NOT_FOUND) { 1302 changes++; 1303 hpsa_scsi_remove_entry(h, hostno, i, 1304 removed, &nremoved); 1305 continue; /* remove ^^^, hence i not incremented */ 1306 } else if (device_change == DEVICE_CHANGED) { 1307 changes++; 1308 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 1309 added, &nadded, removed, &nremoved); 1310 /* Set it to NULL to prevent it from being freed 1311 * at the bottom of hpsa_update_scsi_devices() 1312 */ 1313 sd[entry] = NULL; 1314 } else if (device_change == DEVICE_UPDATED) { 1315 hpsa_scsi_update_entry(h, hostno, i, sd[entry]); 1316 } 1317 i++; 1318 } 1319 1320 /* Now, make sure every device listed in sd[] is also 1321 * listed in h->dev[], adding them if they aren't found 1322 */ 1323 1324 for (i = 0; i < nsds; i++) { 1325 if (!sd[i]) /* if already added above. */ 1326 continue; 1327 1328 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS 1329 * as the SCSI mid-layer does not handle such devices well. 1330 * It relentlessly loops sending TUR at 3Hz, then READ(10) 1331 * at 160Hz, and prevents the system from coming up. 1332 */ 1333 if (sd[i]->volume_offline) { 1334 hpsa_show_volume_status(h, sd[i]); 1335 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", 1336 h->scsi_host->host_no, 1337 sd[i]->bus, sd[i]->target, sd[i]->lun); 1338 continue; 1339 } 1340 1341 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 1342 h->ndevices, &entry); 1343 if (device_change == DEVICE_NOT_FOUND) { 1344 changes++; 1345 if (hpsa_scsi_add_entry(h, hostno, sd[i], 1346 added, &nadded) != 0) 1347 break; 1348 sd[i] = NULL; /* prevent from being freed later. */ 1349 } else if (device_change == DEVICE_CHANGED) { 1350 /* should never happen... */ 1351 changes++; 1352 dev_warn(&h->pdev->dev, 1353 "device unexpectedly changed.\n"); 1354 /* but if it does happen, we just ignore that device */ 1355 } 1356 } 1357 spin_unlock_irqrestore(&h->devlock, flags); 1358 1359 /* Monitor devices which are in one of several NOT READY states to be 1360 * brought online later. This must be done without holding h->devlock, 1361 * so don't touch h->dev[] 1362 */ 1363 for (i = 0; i < nsds; i++) { 1364 if (!sd[i]) /* if already added above. */ 1365 continue; 1366 if (sd[i]->volume_offline) 1367 hpsa_monitor_offline_device(h, sd[i]->scsi3addr); 1368 } 1369 1370 /* Don't notify scsi mid layer of any changes the first time through 1371 * (or if there are no changes) scsi_scan_host will do it later the 1372 * first time through. 1373 */ 1374 if (hostno == -1 || !changes) 1375 goto free_and_out; 1376 1377 sh = h->scsi_host; 1378 /* Notify scsi mid layer of any removed devices */ 1379 for (i = 0; i < nremoved; i++) { 1380 struct scsi_device *sdev = 1381 scsi_device_lookup(sh, removed[i]->bus, 1382 removed[i]->target, removed[i]->lun); 1383 if (sdev != NULL) { 1384 scsi_remove_device(sdev); 1385 scsi_device_put(sdev); 1386 } else { 1387 /* We don't expect to get here. 1388 * future cmds to this device will get selection 1389 * timeout as if the device was gone. 1390 */ 1391 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 1392 " for removal.", hostno, removed[i]->bus, 1393 removed[i]->target, removed[i]->lun); 1394 } 1395 kfree(removed[i]); 1396 removed[i] = NULL; 1397 } 1398 1399 /* Notify scsi mid layer of any added devices */ 1400 for (i = 0; i < nadded; i++) { 1401 if (scsi_add_device(sh, added[i]->bus, 1402 added[i]->target, added[i]->lun) == 0) 1403 continue; 1404 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 1405 "device not added.\n", hostno, added[i]->bus, 1406 added[i]->target, added[i]->lun); 1407 /* now we have to remove it from h->dev, 1408 * since it didn't get added to scsi mid layer 1409 */ 1410 fixup_botched_add(h, added[i]); 1411 } 1412 1413 free_and_out: 1414 kfree(added); 1415 kfree(removed); 1416 } 1417 1418 /* 1419 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * 1420 * Assume's h->devlock is held. 1421 */ 1422 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 1423 int bus, int target, int lun) 1424 { 1425 int i; 1426 struct hpsa_scsi_dev_t *sd; 1427 1428 for (i = 0; i < h->ndevices; i++) { 1429 sd = h->dev[i]; 1430 if (sd->bus == bus && sd->target == target && sd->lun == lun) 1431 return sd; 1432 } 1433 return NULL; 1434 } 1435 1436 /* link sdev->hostdata to our per-device structure. */ 1437 static int hpsa_slave_alloc(struct scsi_device *sdev) 1438 { 1439 struct hpsa_scsi_dev_t *sd; 1440 unsigned long flags; 1441 struct ctlr_info *h; 1442 1443 h = sdev_to_hba(sdev); 1444 spin_lock_irqsave(&h->devlock, flags); 1445 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 1446 sdev_id(sdev), sdev->lun); 1447 if (sd != NULL) 1448 sdev->hostdata = sd; 1449 spin_unlock_irqrestore(&h->devlock, flags); 1450 return 0; 1451 } 1452 1453 static void hpsa_slave_destroy(struct scsi_device *sdev) 1454 { 1455 /* nothing to do. */ 1456 } 1457 1458 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 1459 { 1460 int i; 1461 1462 if (!h->cmd_sg_list) 1463 return; 1464 for (i = 0; i < h->nr_cmds; i++) { 1465 kfree(h->cmd_sg_list[i]); 1466 h->cmd_sg_list[i] = NULL; 1467 } 1468 kfree(h->cmd_sg_list); 1469 h->cmd_sg_list = NULL; 1470 } 1471 1472 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 1473 { 1474 int i; 1475 1476 if (h->chainsize <= 0) 1477 return 0; 1478 1479 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 1480 GFP_KERNEL); 1481 if (!h->cmd_sg_list) 1482 return -ENOMEM; 1483 for (i = 0; i < h->nr_cmds; i++) { 1484 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 1485 h->chainsize, GFP_KERNEL); 1486 if (!h->cmd_sg_list[i]) 1487 goto clean; 1488 } 1489 return 0; 1490 1491 clean: 1492 hpsa_free_sg_chain_blocks(h); 1493 return -ENOMEM; 1494 } 1495 1496 static int hpsa_map_sg_chain_block(struct ctlr_info *h, 1497 struct CommandList *c) 1498 { 1499 struct SGDescriptor *chain_sg, *chain_block; 1500 u64 temp64; 1501 1502 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1503 chain_block = h->cmd_sg_list[c->cmdindex]; 1504 chain_sg->Ext = HPSA_SG_CHAIN; 1505 chain_sg->Len = sizeof(*chain_sg) * 1506 (c->Header.SGTotal - h->max_cmd_sg_entries); 1507 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 1508 PCI_DMA_TODEVICE); 1509 if (dma_mapping_error(&h->pdev->dev, temp64)) { 1510 /* prevent subsequent unmapping */ 1511 chain_sg->Addr.lower = 0; 1512 chain_sg->Addr.upper = 0; 1513 return -1; 1514 } 1515 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 1516 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); 1517 return 0; 1518 } 1519 1520 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 1521 struct CommandList *c) 1522 { 1523 struct SGDescriptor *chain_sg; 1524 union u64bit temp64; 1525 1526 if (c->Header.SGTotal <= h->max_cmd_sg_entries) 1527 return; 1528 1529 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1530 temp64.val32.lower = chain_sg->Addr.lower; 1531 temp64.val32.upper = chain_sg->Addr.upper; 1532 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1533 } 1534 1535 1536 /* Decode the various types of errors on ioaccel2 path. 1537 * Return 1 for any error that should generate a RAID path retry. 1538 * Return 0 for errors that don't require a RAID path retry. 1539 */ 1540 static int handle_ioaccel_mode2_error(struct ctlr_info *h, 1541 struct CommandList *c, 1542 struct scsi_cmnd *cmd, 1543 struct io_accel2_cmd *c2) 1544 { 1545 int data_len; 1546 int retry = 0; 1547 1548 switch (c2->error_data.serv_response) { 1549 case IOACCEL2_SERV_RESPONSE_COMPLETE: 1550 switch (c2->error_data.status) { 1551 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: 1552 break; 1553 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: 1554 dev_warn(&h->pdev->dev, 1555 "%s: task complete with check condition.\n", 1556 "HP SSD Smart Path"); 1557 if (c2->error_data.data_present != 1558 IOACCEL2_SENSE_DATA_PRESENT) 1559 break; 1560 /* copy the sense data */ 1561 data_len = c2->error_data.sense_data_len; 1562 if (data_len > SCSI_SENSE_BUFFERSIZE) 1563 data_len = SCSI_SENSE_BUFFERSIZE; 1564 if (data_len > sizeof(c2->error_data.sense_data_buff)) 1565 data_len = 1566 sizeof(c2->error_data.sense_data_buff); 1567 memcpy(cmd->sense_buffer, 1568 c2->error_data.sense_data_buff, data_len); 1569 cmd->result |= SAM_STAT_CHECK_CONDITION; 1570 retry = 1; 1571 break; 1572 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: 1573 dev_warn(&h->pdev->dev, 1574 "%s: task complete with BUSY status.\n", 1575 "HP SSD Smart Path"); 1576 retry = 1; 1577 break; 1578 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: 1579 dev_warn(&h->pdev->dev, 1580 "%s: task complete with reservation conflict.\n", 1581 "HP SSD Smart Path"); 1582 retry = 1; 1583 break; 1584 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: 1585 /* Make scsi midlayer do unlimited retries */ 1586 cmd->result = DID_IMM_RETRY << 16; 1587 break; 1588 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: 1589 dev_warn(&h->pdev->dev, 1590 "%s: task complete with aborted status.\n", 1591 "HP SSD Smart Path"); 1592 retry = 1; 1593 break; 1594 default: 1595 dev_warn(&h->pdev->dev, 1596 "%s: task complete with unrecognized status: 0x%02x\n", 1597 "HP SSD Smart Path", c2->error_data.status); 1598 retry = 1; 1599 break; 1600 } 1601 break; 1602 case IOACCEL2_SERV_RESPONSE_FAILURE: 1603 /* don't expect to get here. */ 1604 dev_warn(&h->pdev->dev, 1605 "unexpected delivery or target failure, status = 0x%02x\n", 1606 c2->error_data.status); 1607 retry = 1; 1608 break; 1609 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 1610 break; 1611 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 1612 break; 1613 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 1614 dev_warn(&h->pdev->dev, "task management function rejected.\n"); 1615 retry = 1; 1616 break; 1617 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 1618 dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); 1619 break; 1620 default: 1621 dev_warn(&h->pdev->dev, 1622 "%s: Unrecognized server response: 0x%02x\n", 1623 "HP SSD Smart Path", 1624 c2->error_data.serv_response); 1625 retry = 1; 1626 break; 1627 } 1628 1629 return retry; /* retry on raid path? */ 1630 } 1631 1632 static void process_ioaccel2_completion(struct ctlr_info *h, 1633 struct CommandList *c, struct scsi_cmnd *cmd, 1634 struct hpsa_scsi_dev_t *dev) 1635 { 1636 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 1637 int raid_retry = 0; 1638 1639 /* check for good status */ 1640 if (likely(c2->error_data.serv_response == 0 && 1641 c2->error_data.status == 0)) { 1642 cmd_free(h, c); 1643 cmd->scsi_done(cmd); 1644 return; 1645 } 1646 1647 /* Any RAID offload error results in retry which will use 1648 * the normal I/O path so the controller can handle whatever's 1649 * wrong. 1650 */ 1651 if (is_logical_dev_addr_mode(dev->scsi3addr) && 1652 c2->error_data.serv_response == 1653 IOACCEL2_SERV_RESPONSE_FAILURE) { 1654 if (c2->error_data.status == 1655 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) 1656 dev_warn(&h->pdev->dev, 1657 "%s: Path is unavailable, retrying on standard path.\n", 1658 "HP SSD Smart Path"); 1659 else 1660 dev_warn(&h->pdev->dev, 1661 "%s: Error 0x%02x, retrying on standard path.\n", 1662 "HP SSD Smart Path", c2->error_data.status); 1663 1664 dev->offload_enabled = 0; 1665 h->drv_req_rescan = 1; /* schedule controller for a rescan */ 1666 cmd->result = DID_SOFT_ERROR << 16; 1667 cmd_free(h, c); 1668 cmd->scsi_done(cmd); 1669 return; 1670 } 1671 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2); 1672 /* If error found, disable Smart Path, schedule a rescan, 1673 * and force a retry on the standard path. 1674 */ 1675 if (raid_retry) { 1676 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n", 1677 "HP SSD Smart Path"); 1678 dev->offload_enabled = 0; /* Disable Smart Path */ 1679 h->drv_req_rescan = 1; /* schedule controller rescan */ 1680 cmd->result = DID_SOFT_ERROR << 16; 1681 } 1682 cmd_free(h, c); 1683 cmd->scsi_done(cmd); 1684 } 1685 1686 static void complete_scsi_command(struct CommandList *cp) 1687 { 1688 struct scsi_cmnd *cmd; 1689 struct ctlr_info *h; 1690 struct ErrorInfo *ei; 1691 struct hpsa_scsi_dev_t *dev; 1692 1693 unsigned char sense_key; 1694 unsigned char asc; /* additional sense code */ 1695 unsigned char ascq; /* additional sense code qualifier */ 1696 unsigned long sense_data_size; 1697 1698 ei = cp->err_info; 1699 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1700 h = cp->h; 1701 dev = cmd->device->hostdata; 1702 1703 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1704 if ((cp->cmd_type == CMD_SCSI) && 1705 (cp->Header.SGTotal > h->max_cmd_sg_entries)) 1706 hpsa_unmap_sg_chain_block(h, cp); 1707 1708 cmd->result = (DID_OK << 16); /* host byte */ 1709 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1710 1711 if (cp->cmd_type == CMD_IOACCEL2) 1712 return process_ioaccel2_completion(h, cp, cmd, dev); 1713 1714 cmd->result |= ei->ScsiStatus; 1715 1716 /* copy the sense data whether we need to or not. */ 1717 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 1718 sense_data_size = SCSI_SENSE_BUFFERSIZE; 1719 else 1720 sense_data_size = sizeof(ei->SenseInfo); 1721 if (ei->SenseLen < sense_data_size) 1722 sense_data_size = ei->SenseLen; 1723 1724 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 1725 scsi_set_resid(cmd, ei->ResidualCnt); 1726 1727 if (ei->CommandStatus == 0) { 1728 cmd_free(h, cp); 1729 cmd->scsi_done(cmd); 1730 return; 1731 } 1732 1733 /* For I/O accelerator commands, copy over some fields to the normal 1734 * CISS header used below for error handling. 1735 */ 1736 if (cp->cmd_type == CMD_IOACCEL1) { 1737 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; 1738 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); 1739 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; 1740 cp->Header.Tag.lower = c->Tag.lower; 1741 cp->Header.Tag.upper = c->Tag.upper; 1742 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); 1743 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); 1744 1745 /* Any RAID offload error results in retry which will use 1746 * the normal I/O path so the controller can handle whatever's 1747 * wrong. 1748 */ 1749 if (is_logical_dev_addr_mode(dev->scsi3addr)) { 1750 if (ei->CommandStatus == CMD_IOACCEL_DISABLED) 1751 dev->offload_enabled = 0; 1752 cmd->result = DID_SOFT_ERROR << 16; 1753 cmd_free(h, cp); 1754 cmd->scsi_done(cmd); 1755 return; 1756 } 1757 } 1758 1759 /* an error has occurred */ 1760 switch (ei->CommandStatus) { 1761 1762 case CMD_TARGET_STATUS: 1763 if (ei->ScsiStatus) { 1764 /* Get sense key */ 1765 sense_key = 0xf & ei->SenseInfo[2]; 1766 /* Get additional sense code */ 1767 asc = ei->SenseInfo[12]; 1768 /* Get addition sense code qualifier */ 1769 ascq = ei->SenseInfo[13]; 1770 } 1771 1772 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1773 if (check_for_unit_attention(h, cp)) 1774 break; 1775 if (sense_key == ILLEGAL_REQUEST) { 1776 /* 1777 * SCSI REPORT_LUNS is commonly unsupported on 1778 * Smart Array. Suppress noisy complaint. 1779 */ 1780 if (cp->Request.CDB[0] == REPORT_LUNS) 1781 break; 1782 1783 /* If ASC/ASCQ indicate Logical Unit 1784 * Not Supported condition, 1785 */ 1786 if ((asc == 0x25) && (ascq == 0x0)) { 1787 dev_warn(&h->pdev->dev, "cp %p " 1788 "has check condition\n", cp); 1789 break; 1790 } 1791 } 1792 1793 if (sense_key == NOT_READY) { 1794 /* If Sense is Not Ready, Logical Unit 1795 * Not ready, Manual Intervention 1796 * required 1797 */ 1798 if ((asc == 0x04) && (ascq == 0x03)) { 1799 dev_warn(&h->pdev->dev, "cp %p " 1800 "has check condition: unit " 1801 "not ready, manual " 1802 "intervention required\n", cp); 1803 break; 1804 } 1805 } 1806 if (sense_key == ABORTED_COMMAND) { 1807 /* Aborted command is retryable */ 1808 dev_warn(&h->pdev->dev, "cp %p " 1809 "has check condition: aborted command: " 1810 "ASC: 0x%x, ASCQ: 0x%x\n", 1811 cp, asc, ascq); 1812 cmd->result |= DID_SOFT_ERROR << 16; 1813 break; 1814 } 1815 /* Must be some other type of check condition */ 1816 dev_dbg(&h->pdev->dev, "cp %p has check condition: " 1817 "unknown type: " 1818 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1819 "Returning result: 0x%x, " 1820 "cmd=[%02x %02x %02x %02x %02x " 1821 "%02x %02x %02x %02x %02x %02x " 1822 "%02x %02x %02x %02x %02x]\n", 1823 cp, sense_key, asc, ascq, 1824 cmd->result, 1825 cmd->cmnd[0], cmd->cmnd[1], 1826 cmd->cmnd[2], cmd->cmnd[3], 1827 cmd->cmnd[4], cmd->cmnd[5], 1828 cmd->cmnd[6], cmd->cmnd[7], 1829 cmd->cmnd[8], cmd->cmnd[9], 1830 cmd->cmnd[10], cmd->cmnd[11], 1831 cmd->cmnd[12], cmd->cmnd[13], 1832 cmd->cmnd[14], cmd->cmnd[15]); 1833 break; 1834 } 1835 1836 1837 /* Problem was not a check condition 1838 * Pass it up to the upper layers... 1839 */ 1840 if (ei->ScsiStatus) { 1841 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1842 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1843 "Returning result: 0x%x\n", 1844 cp, ei->ScsiStatus, 1845 sense_key, asc, ascq, 1846 cmd->result); 1847 } else { /* scsi status is zero??? How??? */ 1848 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1849 "Returning no connection.\n", cp), 1850 1851 /* Ordinarily, this case should never happen, 1852 * but there is a bug in some released firmware 1853 * revisions that allows it to happen if, for 1854 * example, a 4100 backplane loses power and 1855 * the tape drive is in it. We assume that 1856 * it's a fatal error of some kind because we 1857 * can't show that it wasn't. We will make it 1858 * look like selection timeout since that is 1859 * the most common reason for this to occur, 1860 * and it's severe enough. 1861 */ 1862 1863 cmd->result = DID_NO_CONNECT << 16; 1864 } 1865 break; 1866 1867 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1868 break; 1869 case CMD_DATA_OVERRUN: 1870 dev_warn(&h->pdev->dev, "cp %p has" 1871 " completed with data overrun " 1872 "reported\n", cp); 1873 break; 1874 case CMD_INVALID: { 1875 /* print_bytes(cp, sizeof(*cp), 1, 0); 1876 print_cmd(cp); */ 1877 /* We get CMD_INVALID if you address a non-existent device 1878 * instead of a selection timeout (no response). You will 1879 * see this if you yank out a drive, then try to access it. 1880 * This is kind of a shame because it means that any other 1881 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1882 * missing target. */ 1883 cmd->result = DID_NO_CONNECT << 16; 1884 } 1885 break; 1886 case CMD_PROTOCOL_ERR: 1887 cmd->result = DID_ERROR << 16; 1888 dev_warn(&h->pdev->dev, "cp %p has " 1889 "protocol error\n", cp); 1890 break; 1891 case CMD_HARDWARE_ERR: 1892 cmd->result = DID_ERROR << 16; 1893 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1894 break; 1895 case CMD_CONNECTION_LOST: 1896 cmd->result = DID_ERROR << 16; 1897 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1898 break; 1899 case CMD_ABORTED: 1900 cmd->result = DID_ABORT << 16; 1901 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1902 cp, ei->ScsiStatus); 1903 break; 1904 case CMD_ABORT_FAILED: 1905 cmd->result = DID_ERROR << 16; 1906 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1907 break; 1908 case CMD_UNSOLICITED_ABORT: 1909 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 1910 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " 1911 "abort\n", cp); 1912 break; 1913 case CMD_TIMEOUT: 1914 cmd->result = DID_TIME_OUT << 16; 1915 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1916 break; 1917 case CMD_UNABORTABLE: 1918 cmd->result = DID_ERROR << 16; 1919 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1920 break; 1921 case CMD_IOACCEL_DISABLED: 1922 /* This only handles the direct pass-through case since RAID 1923 * offload is handled above. Just attempt a retry. 1924 */ 1925 cmd->result = DID_SOFT_ERROR << 16; 1926 dev_warn(&h->pdev->dev, 1927 "cp %p had HP SSD Smart Path error\n", cp); 1928 break; 1929 default: 1930 cmd->result = DID_ERROR << 16; 1931 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1932 cp, ei->CommandStatus); 1933 } 1934 cmd_free(h, cp); 1935 cmd->scsi_done(cmd); 1936 } 1937 1938 static void hpsa_pci_unmap(struct pci_dev *pdev, 1939 struct CommandList *c, int sg_used, int data_direction) 1940 { 1941 int i; 1942 union u64bit addr64; 1943 1944 for (i = 0; i < sg_used; i++) { 1945 addr64.val32.lower = c->SG[i].Addr.lower; 1946 addr64.val32.upper = c->SG[i].Addr.upper; 1947 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, 1948 data_direction); 1949 } 1950 } 1951 1952 static int hpsa_map_one(struct pci_dev *pdev, 1953 struct CommandList *cp, 1954 unsigned char *buf, 1955 size_t buflen, 1956 int data_direction) 1957 { 1958 u64 addr64; 1959 1960 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1961 cp->Header.SGList = 0; 1962 cp->Header.SGTotal = 0; 1963 return 0; 1964 } 1965 1966 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); 1967 if (dma_mapping_error(&pdev->dev, addr64)) { 1968 /* Prevent subsequent unmap of something never mapped */ 1969 cp->Header.SGList = 0; 1970 cp->Header.SGTotal = 0; 1971 return -1; 1972 } 1973 cp->SG[0].Addr.lower = 1974 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1975 cp->SG[0].Addr.upper = 1976 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1977 cp->SG[0].Len = buflen; 1978 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */ 1979 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ 1980 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ 1981 return 0; 1982 } 1983 1984 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1985 struct CommandList *c) 1986 { 1987 DECLARE_COMPLETION_ONSTACK(wait); 1988 1989 c->waiting = &wait; 1990 enqueue_cmd_and_start_io(h, c); 1991 wait_for_completion(&wait); 1992 } 1993 1994 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 1995 struct CommandList *c) 1996 { 1997 unsigned long flags; 1998 1999 /* If controller lockup detected, fake a hardware error. */ 2000 spin_lock_irqsave(&h->lock, flags); 2001 if (unlikely(h->lockup_detected)) { 2002 spin_unlock_irqrestore(&h->lock, flags); 2003 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 2004 } else { 2005 spin_unlock_irqrestore(&h->lock, flags); 2006 hpsa_scsi_do_simple_cmd_core(h, c); 2007 } 2008 } 2009 2010 #define MAX_DRIVER_CMD_RETRIES 25 2011 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 2012 struct CommandList *c, int data_direction) 2013 { 2014 int backoff_time = 10, retry_count = 0; 2015 2016 do { 2017 memset(c->err_info, 0, sizeof(*c->err_info)); 2018 hpsa_scsi_do_simple_cmd_core(h, c); 2019 retry_count++; 2020 if (retry_count > 3) { 2021 msleep(backoff_time); 2022 if (backoff_time < 1000) 2023 backoff_time *= 2; 2024 } 2025 } while ((check_for_unit_attention(h, c) || 2026 check_for_busy(h, c)) && 2027 retry_count <= MAX_DRIVER_CMD_RETRIES); 2028 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 2029 } 2030 2031 static void hpsa_print_cmd(struct ctlr_info *h, char *txt, 2032 struct CommandList *c) 2033 { 2034 const u8 *cdb = c->Request.CDB; 2035 const u8 *lun = c->Header.LUN.LunAddrBytes; 2036 2037 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" 2038 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", 2039 txt, lun[0], lun[1], lun[2], lun[3], 2040 lun[4], lun[5], lun[6], lun[7], 2041 cdb[0], cdb[1], cdb[2], cdb[3], 2042 cdb[4], cdb[5], cdb[6], cdb[7], 2043 cdb[8], cdb[9], cdb[10], cdb[11], 2044 cdb[12], cdb[13], cdb[14], cdb[15]); 2045 } 2046 2047 static void hpsa_scsi_interpret_error(struct ctlr_info *h, 2048 struct CommandList *cp) 2049 { 2050 const struct ErrorInfo *ei = cp->err_info; 2051 struct device *d = &cp->h->pdev->dev; 2052 const u8 *sd = ei->SenseInfo; 2053 2054 switch (ei->CommandStatus) { 2055 case CMD_TARGET_STATUS: 2056 hpsa_print_cmd(h, "SCSI status", cp); 2057 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) 2058 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", 2059 sd[2] & 0x0f, sd[12], sd[13]); 2060 else 2061 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus); 2062 if (ei->ScsiStatus == 0) 2063 dev_warn(d, "SCSI status is abnormally zero. " 2064 "(probably indicates selection timeout " 2065 "reported incorrectly due to a known " 2066 "firmware bug, circa July, 2001.)\n"); 2067 break; 2068 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 2069 break; 2070 case CMD_DATA_OVERRUN: 2071 hpsa_print_cmd(h, "overrun condition", cp); 2072 break; 2073 case CMD_INVALID: { 2074 /* controller unfortunately reports SCSI passthru's 2075 * to non-existent targets as invalid commands. 2076 */ 2077 hpsa_print_cmd(h, "invalid command", cp); 2078 dev_warn(d, "probably means device no longer present\n"); 2079 } 2080 break; 2081 case CMD_PROTOCOL_ERR: 2082 hpsa_print_cmd(h, "protocol error", cp); 2083 break; 2084 case CMD_HARDWARE_ERR: 2085 hpsa_print_cmd(h, "hardware error", cp); 2086 break; 2087 case CMD_CONNECTION_LOST: 2088 hpsa_print_cmd(h, "connection lost", cp); 2089 break; 2090 case CMD_ABORTED: 2091 hpsa_print_cmd(h, "aborted", cp); 2092 break; 2093 case CMD_ABORT_FAILED: 2094 hpsa_print_cmd(h, "abort failed", cp); 2095 break; 2096 case CMD_UNSOLICITED_ABORT: 2097 hpsa_print_cmd(h, "unsolicited abort", cp); 2098 break; 2099 case CMD_TIMEOUT: 2100 hpsa_print_cmd(h, "timed out", cp); 2101 break; 2102 case CMD_UNABORTABLE: 2103 hpsa_print_cmd(h, "unabortable", cp); 2104 break; 2105 default: 2106 hpsa_print_cmd(h, "unknown status", cp); 2107 dev_warn(d, "Unknown command status %x\n", 2108 ei->CommandStatus); 2109 } 2110 } 2111 2112 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 2113 u16 page, unsigned char *buf, 2114 unsigned char bufsize) 2115 { 2116 int rc = IO_OK; 2117 struct CommandList *c; 2118 struct ErrorInfo *ei; 2119 2120 c = cmd_special_alloc(h); 2121 2122 if (c == NULL) { /* trouble... */ 2123 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2124 return -ENOMEM; 2125 } 2126 2127 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, 2128 page, scsi3addr, TYPE_CMD)) { 2129 rc = -1; 2130 goto out; 2131 } 2132 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2133 ei = c->err_info; 2134 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2135 hpsa_scsi_interpret_error(h, c); 2136 rc = -1; 2137 } 2138 out: 2139 cmd_special_free(h, c); 2140 return rc; 2141 } 2142 2143 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, 2144 unsigned char *scsi3addr, unsigned char page, 2145 struct bmic_controller_parameters *buf, size_t bufsize) 2146 { 2147 int rc = IO_OK; 2148 struct CommandList *c; 2149 struct ErrorInfo *ei; 2150 2151 c = cmd_special_alloc(h); 2152 2153 if (c == NULL) { /* trouble... */ 2154 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2155 return -ENOMEM; 2156 } 2157 2158 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, 2159 page, scsi3addr, TYPE_CMD)) { 2160 rc = -1; 2161 goto out; 2162 } 2163 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2164 ei = c->err_info; 2165 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2166 hpsa_scsi_interpret_error(h, c); 2167 rc = -1; 2168 } 2169 out: 2170 cmd_special_free(h, c); 2171 return rc; 2172 } 2173 2174 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, 2175 u8 reset_type) 2176 { 2177 int rc = IO_OK; 2178 struct CommandList *c; 2179 struct ErrorInfo *ei; 2180 2181 c = cmd_special_alloc(h); 2182 2183 if (c == NULL) { /* trouble... */ 2184 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2185 return -ENOMEM; 2186 } 2187 2188 /* fill_cmd can't fail here, no data buffer to map. */ 2189 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 2190 scsi3addr, TYPE_MSG); 2191 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */ 2192 hpsa_scsi_do_simple_cmd_core(h, c); 2193 /* no unmap needed here because no data xfer. */ 2194 2195 ei = c->err_info; 2196 if (ei->CommandStatus != 0) { 2197 hpsa_scsi_interpret_error(h, c); 2198 rc = -1; 2199 } 2200 cmd_special_free(h, c); 2201 return rc; 2202 } 2203 2204 static void hpsa_get_raid_level(struct ctlr_info *h, 2205 unsigned char *scsi3addr, unsigned char *raid_level) 2206 { 2207 int rc; 2208 unsigned char *buf; 2209 2210 *raid_level = RAID_UNKNOWN; 2211 buf = kzalloc(64, GFP_KERNEL); 2212 if (!buf) 2213 return; 2214 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); 2215 if (rc == 0) 2216 *raid_level = buf[8]; 2217 if (*raid_level > RAID_UNKNOWN) 2218 *raid_level = RAID_UNKNOWN; 2219 kfree(buf); 2220 return; 2221 } 2222 2223 #define HPSA_MAP_DEBUG 2224 #ifdef HPSA_MAP_DEBUG 2225 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, 2226 struct raid_map_data *map_buff) 2227 { 2228 struct raid_map_disk_data *dd = &map_buff->data[0]; 2229 int map, row, col; 2230 u16 map_cnt, row_cnt, disks_per_row; 2231 2232 if (rc != 0) 2233 return; 2234 2235 /* Show details only if debugging has been activated. */ 2236 if (h->raid_offload_debug < 2) 2237 return; 2238 2239 dev_info(&h->pdev->dev, "structure_size = %u\n", 2240 le32_to_cpu(map_buff->structure_size)); 2241 dev_info(&h->pdev->dev, "volume_blk_size = %u\n", 2242 le32_to_cpu(map_buff->volume_blk_size)); 2243 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", 2244 le64_to_cpu(map_buff->volume_blk_cnt)); 2245 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", 2246 map_buff->phys_blk_shift); 2247 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", 2248 map_buff->parity_rotation_shift); 2249 dev_info(&h->pdev->dev, "strip_size = %u\n", 2250 le16_to_cpu(map_buff->strip_size)); 2251 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", 2252 le64_to_cpu(map_buff->disk_starting_blk)); 2253 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", 2254 le64_to_cpu(map_buff->disk_blk_cnt)); 2255 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", 2256 le16_to_cpu(map_buff->data_disks_per_row)); 2257 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", 2258 le16_to_cpu(map_buff->metadata_disks_per_row)); 2259 dev_info(&h->pdev->dev, "row_cnt = %u\n", 2260 le16_to_cpu(map_buff->row_cnt)); 2261 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 2262 le16_to_cpu(map_buff->layout_map_count)); 2263 dev_info(&h->pdev->dev, "flags = %u\n", 2264 le16_to_cpu(map_buff->flags)); 2265 if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON) 2266 dev_info(&h->pdev->dev, "encrypytion = ON\n"); 2267 else 2268 dev_info(&h->pdev->dev, "encrypytion = OFF\n"); 2269 dev_info(&h->pdev->dev, "dekindex = %u\n", 2270 le16_to_cpu(map_buff->dekindex)); 2271 2272 map_cnt = le16_to_cpu(map_buff->layout_map_count); 2273 for (map = 0; map < map_cnt; map++) { 2274 dev_info(&h->pdev->dev, "Map%u:\n", map); 2275 row_cnt = le16_to_cpu(map_buff->row_cnt); 2276 for (row = 0; row < row_cnt; row++) { 2277 dev_info(&h->pdev->dev, " Row%u:\n", row); 2278 disks_per_row = 2279 le16_to_cpu(map_buff->data_disks_per_row); 2280 for (col = 0; col < disks_per_row; col++, dd++) 2281 dev_info(&h->pdev->dev, 2282 " D%02u: h=0x%04x xor=%u,%u\n", 2283 col, dd->ioaccel_handle, 2284 dd->xor_mult[0], dd->xor_mult[1]); 2285 disks_per_row = 2286 le16_to_cpu(map_buff->metadata_disks_per_row); 2287 for (col = 0; col < disks_per_row; col++, dd++) 2288 dev_info(&h->pdev->dev, 2289 " M%02u: h=0x%04x xor=%u,%u\n", 2290 col, dd->ioaccel_handle, 2291 dd->xor_mult[0], dd->xor_mult[1]); 2292 } 2293 } 2294 } 2295 #else 2296 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, 2297 __attribute__((unused)) int rc, 2298 __attribute__((unused)) struct raid_map_data *map_buff) 2299 { 2300 } 2301 #endif 2302 2303 static int hpsa_get_raid_map(struct ctlr_info *h, 2304 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2305 { 2306 int rc = 0; 2307 struct CommandList *c; 2308 struct ErrorInfo *ei; 2309 2310 c = cmd_special_alloc(h); 2311 if (c == NULL) { 2312 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2313 return -ENOMEM; 2314 } 2315 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, 2316 sizeof(this_device->raid_map), 0, 2317 scsi3addr, TYPE_CMD)) { 2318 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); 2319 cmd_special_free(h, c); 2320 return -ENOMEM; 2321 } 2322 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2323 ei = c->err_info; 2324 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 2325 hpsa_scsi_interpret_error(h, c); 2326 cmd_special_free(h, c); 2327 return -1; 2328 } 2329 cmd_special_free(h, c); 2330 2331 /* @todo in the future, dynamically allocate RAID map memory */ 2332 if (le32_to_cpu(this_device->raid_map.structure_size) > 2333 sizeof(this_device->raid_map)) { 2334 dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); 2335 rc = -1; 2336 } 2337 hpsa_debug_map_buff(h, rc, &this_device->raid_map); 2338 return rc; 2339 } 2340 2341 static int hpsa_vpd_page_supported(struct ctlr_info *h, 2342 unsigned char scsi3addr[], u8 page) 2343 { 2344 int rc; 2345 int i; 2346 int pages; 2347 unsigned char *buf, bufsize; 2348 2349 buf = kzalloc(256, GFP_KERNEL); 2350 if (!buf) 2351 return 0; 2352 2353 /* Get the size of the page list first */ 2354 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2355 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2356 buf, HPSA_VPD_HEADER_SZ); 2357 if (rc != 0) 2358 goto exit_unsupported; 2359 pages = buf[3]; 2360 if ((pages + HPSA_VPD_HEADER_SZ) <= 255) 2361 bufsize = pages + HPSA_VPD_HEADER_SZ; 2362 else 2363 bufsize = 255; 2364 2365 /* Get the whole VPD page list */ 2366 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2367 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, 2368 buf, bufsize); 2369 if (rc != 0) 2370 goto exit_unsupported; 2371 2372 pages = buf[3]; 2373 for (i = 1; i <= pages; i++) 2374 if (buf[3 + i] == page) 2375 goto exit_supported; 2376 exit_unsupported: 2377 kfree(buf); 2378 return 0; 2379 exit_supported: 2380 kfree(buf); 2381 return 1; 2382 } 2383 2384 static void hpsa_get_ioaccel_status(struct ctlr_info *h, 2385 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 2386 { 2387 int rc; 2388 unsigned char *buf; 2389 u8 ioaccel_status; 2390 2391 this_device->offload_config = 0; 2392 this_device->offload_enabled = 0; 2393 2394 buf = kzalloc(64, GFP_KERNEL); 2395 if (!buf) 2396 return; 2397 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) 2398 goto out; 2399 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 2400 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); 2401 if (rc != 0) 2402 goto out; 2403 2404 #define IOACCEL_STATUS_BYTE 4 2405 #define OFFLOAD_CONFIGURED_BIT 0x01 2406 #define OFFLOAD_ENABLED_BIT 0x02 2407 ioaccel_status = buf[IOACCEL_STATUS_BYTE]; 2408 this_device->offload_config = 2409 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 2410 if (this_device->offload_config) { 2411 this_device->offload_enabled = 2412 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 2413 if (hpsa_get_raid_map(h, scsi3addr, this_device)) 2414 this_device->offload_enabled = 0; 2415 } 2416 out: 2417 kfree(buf); 2418 return; 2419 } 2420 2421 /* Get the device id from inquiry page 0x83 */ 2422 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 2423 unsigned char *device_id, int buflen) 2424 { 2425 int rc; 2426 unsigned char *buf; 2427 2428 if (buflen > 16) 2429 buflen = 16; 2430 buf = kzalloc(64, GFP_KERNEL); 2431 if (!buf) 2432 return -1; 2433 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 2434 if (rc == 0) 2435 memcpy(device_id, &buf[8], buflen); 2436 kfree(buf); 2437 return rc != 0; 2438 } 2439 2440 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 2441 struct ReportLUNdata *buf, int bufsize, 2442 int extended_response) 2443 { 2444 int rc = IO_OK; 2445 struct CommandList *c; 2446 unsigned char scsi3addr[8]; 2447 struct ErrorInfo *ei; 2448 2449 c = cmd_special_alloc(h); 2450 if (c == NULL) { /* trouble... */ 2451 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 2452 return -1; 2453 } 2454 /* address the controller */ 2455 memset(scsi3addr, 0, sizeof(scsi3addr)); 2456 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 2457 buf, bufsize, 0, scsi3addr, TYPE_CMD)) { 2458 rc = -1; 2459 goto out; 2460 } 2461 if (extended_response) 2462 c->Request.CDB[1] = extended_response; 2463 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 2464 ei = c->err_info; 2465 if (ei->CommandStatus != 0 && 2466 ei->CommandStatus != CMD_DATA_UNDERRUN) { 2467 hpsa_scsi_interpret_error(h, c); 2468 rc = -1; 2469 } else { 2470 if (buf->extended_response_flag != extended_response) { 2471 dev_err(&h->pdev->dev, 2472 "report luns requested format %u, got %u\n", 2473 extended_response, 2474 buf->extended_response_flag); 2475 rc = -1; 2476 } 2477 } 2478 out: 2479 cmd_special_free(h, c); 2480 return rc; 2481 } 2482 2483 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 2484 struct ReportLUNdata *buf, 2485 int bufsize, int extended_response) 2486 { 2487 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 2488 } 2489 2490 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 2491 struct ReportLUNdata *buf, int bufsize) 2492 { 2493 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 2494 } 2495 2496 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 2497 int bus, int target, int lun) 2498 { 2499 device->bus = bus; 2500 device->target = target; 2501 device->lun = lun; 2502 } 2503 2504 /* Use VPD inquiry to get details of volume status */ 2505 static int hpsa_get_volume_status(struct ctlr_info *h, 2506 unsigned char scsi3addr[]) 2507 { 2508 int rc; 2509 int status; 2510 int size; 2511 unsigned char *buf; 2512 2513 buf = kzalloc(64, GFP_KERNEL); 2514 if (!buf) 2515 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2516 2517 /* Does controller have VPD for logical volume status? */ 2518 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) { 2519 dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n"); 2520 goto exit_failed; 2521 } 2522 2523 /* Get the size of the VPD return buffer */ 2524 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2525 buf, HPSA_VPD_HEADER_SZ); 2526 if (rc != 0) { 2527 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n"); 2528 goto exit_failed; 2529 } 2530 size = buf[3]; 2531 2532 /* Now get the whole VPD buffer */ 2533 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, 2534 buf, size + HPSA_VPD_HEADER_SZ); 2535 if (rc != 0) { 2536 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n"); 2537 goto exit_failed; 2538 } 2539 status = buf[4]; /* status byte */ 2540 2541 kfree(buf); 2542 return status; 2543 exit_failed: 2544 kfree(buf); 2545 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 2546 } 2547 2548 /* Determine offline status of a volume. 2549 * Return either: 2550 * 0 (not offline) 2551 * -1 (offline for unknown reasons) 2552 * # (integer code indicating one of several NOT READY states 2553 * describing why a volume is to be kept offline) 2554 */ 2555 static unsigned char hpsa_volume_offline(struct ctlr_info *h, 2556 unsigned char scsi3addr[]) 2557 { 2558 struct CommandList *c; 2559 unsigned char *sense, sense_key, asc, ascq; 2560 int ldstat = 0; 2561 u16 cmd_status; 2562 u8 scsi_status; 2563 #define ASC_LUN_NOT_READY 0x04 2564 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 2565 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 2566 2567 c = cmd_alloc(h); 2568 if (!c) 2569 return 0; 2570 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); 2571 hpsa_scsi_do_simple_cmd_core(h, c); 2572 sense = c->err_info->SenseInfo; 2573 sense_key = sense[2]; 2574 asc = sense[12]; 2575 ascq = sense[13]; 2576 cmd_status = c->err_info->CommandStatus; 2577 scsi_status = c->err_info->ScsiStatus; 2578 cmd_free(h, c); 2579 /* Is the volume 'not ready'? */ 2580 if (cmd_status != CMD_TARGET_STATUS || 2581 scsi_status != SAM_STAT_CHECK_CONDITION || 2582 sense_key != NOT_READY || 2583 asc != ASC_LUN_NOT_READY) { 2584 return 0; 2585 } 2586 2587 /* Determine the reason for not ready state */ 2588 ldstat = hpsa_get_volume_status(h, scsi3addr); 2589 2590 /* Keep volume offline in certain cases: */ 2591 switch (ldstat) { 2592 case HPSA_LV_UNDERGOING_ERASE: 2593 case HPSA_LV_UNDERGOING_RPI: 2594 case HPSA_LV_PENDING_RPI: 2595 case HPSA_LV_ENCRYPTED_NO_KEY: 2596 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 2597 case HPSA_LV_UNDERGOING_ENCRYPTION: 2598 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: 2599 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 2600 return ldstat; 2601 case HPSA_VPD_LV_STATUS_UNSUPPORTED: 2602 /* If VPD status page isn't available, 2603 * use ASC/ASCQ to determine state 2604 */ 2605 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || 2606 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) 2607 return ldstat; 2608 break; 2609 default: 2610 break; 2611 } 2612 return 0; 2613 } 2614 2615 static int hpsa_update_device_info(struct ctlr_info *h, 2616 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 2617 unsigned char *is_OBDR_device) 2618 { 2619 2620 #define OBDR_SIG_OFFSET 43 2621 #define OBDR_TAPE_SIG "$DR-10" 2622 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 2623 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 2624 2625 unsigned char *inq_buff; 2626 unsigned char *obdr_sig; 2627 2628 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 2629 if (!inq_buff) 2630 goto bail_out; 2631 2632 /* Do an inquiry to the device to see what it is. */ 2633 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 2634 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 2635 /* Inquiry failed (msg printed already) */ 2636 dev_err(&h->pdev->dev, 2637 "hpsa_update_device_info: inquiry failed\n"); 2638 goto bail_out; 2639 } 2640 2641 this_device->devtype = (inq_buff[0] & 0x1f); 2642 memcpy(this_device->scsi3addr, scsi3addr, 8); 2643 memcpy(this_device->vendor, &inq_buff[8], 2644 sizeof(this_device->vendor)); 2645 memcpy(this_device->model, &inq_buff[16], 2646 sizeof(this_device->model)); 2647 memset(this_device->device_id, 0, 2648 sizeof(this_device->device_id)); 2649 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 2650 sizeof(this_device->device_id)); 2651 2652 if (this_device->devtype == TYPE_DISK && 2653 is_logical_dev_addr_mode(scsi3addr)) { 2654 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 2655 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 2656 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 2657 this_device->volume_offline = 2658 hpsa_volume_offline(h, scsi3addr); 2659 } else { 2660 this_device->raid_level = RAID_UNKNOWN; 2661 this_device->offload_config = 0; 2662 this_device->offload_enabled = 0; 2663 this_device->volume_offline = 0; 2664 } 2665 2666 if (is_OBDR_device) { 2667 /* See if this is a One-Button-Disaster-Recovery device 2668 * by looking for "$DR-10" at offset 43 in inquiry data. 2669 */ 2670 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 2671 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 2672 strncmp(obdr_sig, OBDR_TAPE_SIG, 2673 OBDR_SIG_LEN) == 0); 2674 } 2675 2676 kfree(inq_buff); 2677 return 0; 2678 2679 bail_out: 2680 kfree(inq_buff); 2681 return 1; 2682 } 2683 2684 static unsigned char *ext_target_model[] = { 2685 "MSA2012", 2686 "MSA2024", 2687 "MSA2312", 2688 "MSA2324", 2689 "P2000 G3 SAS", 2690 "MSA 2040 SAS", 2691 NULL, 2692 }; 2693 2694 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 2695 { 2696 int i; 2697 2698 for (i = 0; ext_target_model[i]; i++) 2699 if (strncmp(device->model, ext_target_model[i], 2700 strlen(ext_target_model[i])) == 0) 2701 return 1; 2702 return 0; 2703 } 2704 2705 /* Helper function to assign bus, target, lun mapping of devices. 2706 * Puts non-external target logical volumes on bus 0, external target logical 2707 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 2708 * Logical drive target and lun are assigned at this time, but 2709 * physical device lun and target assignment are deferred (assigned 2710 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 2711 */ 2712 static void figure_bus_target_lun(struct ctlr_info *h, 2713 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) 2714 { 2715 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 2716 2717 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 2718 /* physical device, target and lun filled in later */ 2719 if (is_hba_lunid(lunaddrbytes)) 2720 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff); 2721 else 2722 /* defer target, lun assignment for physical devices */ 2723 hpsa_set_bus_target_lun(device, 2, -1, -1); 2724 return; 2725 } 2726 /* It's a logical device */ 2727 if (is_ext_target(h, device)) { 2728 /* external target way, put logicals on bus 1 2729 * and match target/lun numbers box 2730 * reports, other smart array, bus 0, target 0, match lunid 2731 */ 2732 hpsa_set_bus_target_lun(device, 2733 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff); 2734 return; 2735 } 2736 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff); 2737 } 2738 2739 /* 2740 * If there is no lun 0 on a target, linux won't find any devices. 2741 * For the external targets (arrays), we have to manually detect the enclosure 2742 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 2743 * it for some reason. *tmpdevice is the target we're adding, 2744 * this_device is a pointer into the current element of currentsd[] 2745 * that we're building up in update_scsi_devices(), below. 2746 * lunzerobits is a bitmap that tracks which targets already have a 2747 * lun 0 assigned. 2748 * Returns 1 if an enclosure was added, 0 if not. 2749 */ 2750 static int add_ext_target_dev(struct ctlr_info *h, 2751 struct hpsa_scsi_dev_t *tmpdevice, 2752 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 2753 unsigned long lunzerobits[], int *n_ext_target_devs) 2754 { 2755 unsigned char scsi3addr[8]; 2756 2757 if (test_bit(tmpdevice->target, lunzerobits)) 2758 return 0; /* There is already a lun 0 on this target. */ 2759 2760 if (!is_logical_dev_addr_mode(lunaddrbytes)) 2761 return 0; /* It's the logical targets that may lack lun 0. */ 2762 2763 if (!is_ext_target(h, tmpdevice)) 2764 return 0; /* Only external target devices have this problem. */ 2765 2766 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */ 2767 return 0; 2768 2769 memset(scsi3addr, 0, 8); 2770 scsi3addr[3] = tmpdevice->target; 2771 if (is_hba_lunid(scsi3addr)) 2772 return 0; /* Don't add the RAID controller here. */ 2773 2774 if (is_scsi_rev_5(h)) 2775 return 0; /* p1210m doesn't need to do this. */ 2776 2777 if (*n_ext_target_devs >= MAX_EXT_TARGETS) { 2778 dev_warn(&h->pdev->dev, "Maximum number of external " 2779 "target devices exceeded. Check your hardware " 2780 "configuration."); 2781 return 0; 2782 } 2783 2784 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 2785 return 0; 2786 (*n_ext_target_devs)++; 2787 hpsa_set_bus_target_lun(this_device, 2788 tmpdevice->bus, tmpdevice->target, 0); 2789 set_bit(tmpdevice->target, lunzerobits); 2790 return 1; 2791 } 2792 2793 /* 2794 * Get address of physical disk used for an ioaccel2 mode command: 2795 * 1. Extract ioaccel2 handle from the command. 2796 * 2. Find a matching ioaccel2 handle from list of physical disks. 2797 * 3. Return: 2798 * 1 and set scsi3addr to address of matching physical 2799 * 0 if no matching physical disk was found. 2800 */ 2801 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, 2802 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) 2803 { 2804 struct ReportExtendedLUNdata *physicals = NULL; 2805 int responsesize = 24; /* size of physical extended response */ 2806 int extended = 2; /* flag forces reporting 'other dev info'. */ 2807 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; 2808 u32 nphysicals = 0; /* number of reported physical devs */ 2809 int found = 0; /* found match (1) or not (0) */ 2810 u32 find; /* handle we need to match */ 2811 int i; 2812 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 2813 struct hpsa_scsi_dev_t *d; /* device of request being aborted */ 2814 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ 2815 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2816 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ 2817 2818 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) 2819 return 0; /* no match */ 2820 2821 /* point to the ioaccel2 device handle */ 2822 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; 2823 if (c2a == NULL) 2824 return 0; /* no match */ 2825 2826 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; 2827 if (scmd == NULL) 2828 return 0; /* no match */ 2829 2830 d = scmd->device->hostdata; 2831 if (d == NULL) 2832 return 0; /* no match */ 2833 2834 it_nexus = cpu_to_le32((u32) d->ioaccel_handle); 2835 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus); 2836 find = c2a->scsi_nexus; 2837 2838 if (h->raid_offload_debug > 0) 2839 dev_info(&h->pdev->dev, 2840 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", 2841 __func__, scsi_nexus, 2842 d->device_id[0], d->device_id[1], d->device_id[2], 2843 d->device_id[3], d->device_id[4], d->device_id[5], 2844 d->device_id[6], d->device_id[7], d->device_id[8], 2845 d->device_id[9], d->device_id[10], d->device_id[11], 2846 d->device_id[12], d->device_id[13], d->device_id[14], 2847 d->device_id[15]); 2848 2849 /* Get the list of physical devices */ 2850 physicals = kzalloc(reportsize, GFP_KERNEL); 2851 if (physicals == NULL) 2852 return 0; 2853 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, 2854 reportsize, extended)) { 2855 dev_err(&h->pdev->dev, 2856 "Can't lookup %s device handle: report physical LUNs failed.\n", 2857 "HP SSD Smart Path"); 2858 kfree(physicals); 2859 return 0; 2860 } 2861 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / 2862 responsesize; 2863 2864 2865 /* find ioaccel2 handle in list of physicals: */ 2866 for (i = 0; i < nphysicals; i++) { 2867 /* handle is in bytes 28-31 of each lun */ 2868 if (memcmp(&((struct ReportExtendedLUNdata *) 2869 physicals)->LUN[i][20], &find, 4) != 0) { 2870 continue; /* didn't match */ 2871 } 2872 found = 1; 2873 memcpy(scsi3addr, &((struct ReportExtendedLUNdata *) 2874 physicals)->LUN[i][0], 8); 2875 if (h->raid_offload_debug > 0) 2876 dev_info(&h->pdev->dev, 2877 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 2878 __func__, find, 2879 ((struct ReportExtendedLUNdata *) 2880 physicals)->LUN[i][20], 2881 scsi3addr[0], scsi3addr[1], scsi3addr[2], 2882 scsi3addr[3], scsi3addr[4], scsi3addr[5], 2883 scsi3addr[6], scsi3addr[7]); 2884 break; /* found it */ 2885 } 2886 2887 kfree(physicals); 2888 if (found) 2889 return 1; 2890 else 2891 return 0; 2892 2893 } 2894 /* 2895 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 2896 * logdev. The number of luns in physdev and logdev are returned in 2897 * *nphysicals and *nlogicals, respectively. 2898 * Returns 0 on success, -1 otherwise. 2899 */ 2900 static int hpsa_gather_lun_info(struct ctlr_info *h, 2901 int reportlunsize, 2902 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, 2903 struct ReportLUNdata *logdev, u32 *nlogicals) 2904 { 2905 int physical_entry_size = 8; 2906 2907 *physical_mode = 0; 2908 2909 /* For I/O accelerator mode we need to read physical device handles */ 2910 if (h->transMethod & CFGTBL_Trans_io_accel1 || 2911 h->transMethod & CFGTBL_Trans_io_accel2) { 2912 *physical_mode = HPSA_REPORT_PHYS_EXTENDED; 2913 physical_entry_size = 24; 2914 } 2915 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 2916 *physical_mode)) { 2917 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 2918 return -1; 2919 } 2920 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 2921 physical_entry_size; 2922 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 2923 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 2924 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2925 *nphysicals - HPSA_MAX_PHYS_LUN); 2926 *nphysicals = HPSA_MAX_PHYS_LUN; 2927 } 2928 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { 2929 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 2930 return -1; 2931 } 2932 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 2933 /* Reject Logicals in excess of our max capability. */ 2934 if (*nlogicals > HPSA_MAX_LUN) { 2935 dev_warn(&h->pdev->dev, 2936 "maximum logical LUNs (%d) exceeded. " 2937 "%d LUNs ignored.\n", HPSA_MAX_LUN, 2938 *nlogicals - HPSA_MAX_LUN); 2939 *nlogicals = HPSA_MAX_LUN; 2940 } 2941 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 2942 dev_warn(&h->pdev->dev, 2943 "maximum logical + physical LUNs (%d) exceeded. " 2944 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 2945 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 2946 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 2947 } 2948 return 0; 2949 } 2950 2951 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 2952 int nphysicals, int nlogicals, 2953 struct ReportExtendedLUNdata *physdev_list, 2954 struct ReportLUNdata *logdev_list) 2955 { 2956 /* Helper function, figure out where the LUN ID info is coming from 2957 * given index i, lists of physical and logical devices, where in 2958 * the list the raid controller is supposed to appear (first or last) 2959 */ 2960 2961 int logicals_start = nphysicals + (raid_ctlr_position == 0); 2962 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 2963 2964 if (i == raid_ctlr_position) 2965 return RAID_CTLR_LUNID; 2966 2967 if (i < logicals_start) 2968 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 2969 2970 if (i < last_device) 2971 return &logdev_list->LUN[i - nphysicals - 2972 (raid_ctlr_position == 0)][0]; 2973 BUG(); 2974 return NULL; 2975 } 2976 2977 static int hpsa_hba_mode_enabled(struct ctlr_info *h) 2978 { 2979 int rc; 2980 int hba_mode_enabled; 2981 struct bmic_controller_parameters *ctlr_params; 2982 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), 2983 GFP_KERNEL); 2984 2985 if (!ctlr_params) 2986 return -ENOMEM; 2987 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, 2988 sizeof(struct bmic_controller_parameters)); 2989 if (rc) { 2990 kfree(ctlr_params); 2991 return rc; 2992 } 2993 2994 hba_mode_enabled = 2995 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0); 2996 kfree(ctlr_params); 2997 return hba_mode_enabled; 2998 } 2999 3000 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 3001 { 3002 /* the idea here is we could get notified 3003 * that some devices have changed, so we do a report 3004 * physical luns and report logical luns cmd, and adjust 3005 * our list of devices accordingly. 3006 * 3007 * The scsi3addr's of devices won't change so long as the 3008 * adapter is not reset. That means we can rescan and 3009 * tell which devices we already know about, vs. new 3010 * devices, vs. disappearing devices. 3011 */ 3012 struct ReportExtendedLUNdata *physdev_list = NULL; 3013 struct ReportLUNdata *logdev_list = NULL; 3014 u32 nphysicals = 0; 3015 u32 nlogicals = 0; 3016 int physical_mode = 0; 3017 u32 ndev_allocated = 0; 3018 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 3019 int ncurrent = 0; 3020 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; 3021 int i, n_ext_target_devs, ndevs_to_allocate; 3022 int raid_ctlr_position; 3023 int rescan_hba_mode; 3024 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 3025 3026 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 3027 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 3028 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 3029 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 3030 3031 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 3032 dev_err(&h->pdev->dev, "out of memory\n"); 3033 goto out; 3034 } 3035 memset(lunzerobits, 0, sizeof(lunzerobits)); 3036 3037 rescan_hba_mode = hpsa_hba_mode_enabled(h); 3038 if (rescan_hba_mode < 0) 3039 goto out; 3040 3041 if (!h->hba_mode_enabled && rescan_hba_mode) 3042 dev_warn(&h->pdev->dev, "HBA mode enabled\n"); 3043 else if (h->hba_mode_enabled && !rescan_hba_mode) 3044 dev_warn(&h->pdev->dev, "HBA mode disabled\n"); 3045 3046 h->hba_mode_enabled = rescan_hba_mode; 3047 3048 if (hpsa_gather_lun_info(h, reportlunsize, 3049 (struct ReportLUNdata *) physdev_list, &nphysicals, 3050 &physical_mode, logdev_list, &nlogicals)) 3051 goto out; 3052 3053 /* We might see up to the maximum number of logical and physical disks 3054 * plus external target devices, and a device for the local RAID 3055 * controller. 3056 */ 3057 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; 3058 3059 /* Allocate the per device structures */ 3060 for (i = 0; i < ndevs_to_allocate; i++) { 3061 if (i >= HPSA_MAX_DEVICES) { 3062 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." 3063 " %d devices ignored.\n", HPSA_MAX_DEVICES, 3064 ndevs_to_allocate - HPSA_MAX_DEVICES); 3065 break; 3066 } 3067 3068 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 3069 if (!currentsd[i]) { 3070 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 3071 __FILE__, __LINE__); 3072 goto out; 3073 } 3074 ndev_allocated++; 3075 } 3076 3077 if (unlikely(is_scsi_rev_5(h))) 3078 raid_ctlr_position = 0; 3079 else 3080 raid_ctlr_position = nphysicals + nlogicals; 3081 3082 /* adjust our table of devices */ 3083 n_ext_target_devs = 0; 3084 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 3085 u8 *lunaddrbytes, is_OBDR = 0; 3086 3087 /* Figure out where the LUN ID info is coming from */ 3088 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 3089 i, nphysicals, nlogicals, physdev_list, logdev_list); 3090 /* skip masked physical devices. */ 3091 if (lunaddrbytes[3] & 0xC0 && 3092 i < nphysicals + (raid_ctlr_position == 0)) 3093 continue; 3094 3095 /* Get device type, vendor, model, device id */ 3096 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 3097 &is_OBDR)) 3098 continue; /* skip it if we can't talk to it. */ 3099 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 3100 this_device = currentsd[ncurrent]; 3101 3102 /* 3103 * For external target devices, we have to insert a LUN 0 which 3104 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 3105 * is nonetheless an enclosure device there. We have to 3106 * present that otherwise linux won't find anything if 3107 * there is no lun 0. 3108 */ 3109 if (add_ext_target_dev(h, tmpdevice, this_device, 3110 lunaddrbytes, lunzerobits, 3111 &n_ext_target_devs)) { 3112 ncurrent++; 3113 this_device = currentsd[ncurrent]; 3114 } 3115 3116 *this_device = *tmpdevice; 3117 3118 switch (this_device->devtype) { 3119 case TYPE_ROM: 3120 /* We don't *really* support actual CD-ROM devices, 3121 * just "One Button Disaster Recovery" tape drive 3122 * which temporarily pretends to be a CD-ROM drive. 3123 * So we check that the device is really an OBDR tape 3124 * device by checking for "$DR-10" in bytes 43-48 of 3125 * the inquiry data. 3126 */ 3127 if (is_OBDR) 3128 ncurrent++; 3129 break; 3130 case TYPE_DISK: 3131 if (h->hba_mode_enabled) { 3132 /* never use raid mapper in HBA mode */ 3133 this_device->offload_enabled = 0; 3134 ncurrent++; 3135 break; 3136 } else if (h->acciopath_status) { 3137 if (i >= nphysicals) { 3138 ncurrent++; 3139 break; 3140 } 3141 } else { 3142 if (i < nphysicals) 3143 break; 3144 ncurrent++; 3145 break; 3146 } 3147 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { 3148 memcpy(&this_device->ioaccel_handle, 3149 &lunaddrbytes[20], 3150 sizeof(this_device->ioaccel_handle)); 3151 ncurrent++; 3152 } 3153 break; 3154 case TYPE_TAPE: 3155 case TYPE_MEDIUM_CHANGER: 3156 ncurrent++; 3157 break; 3158 case TYPE_RAID: 3159 /* Only present the Smartarray HBA as a RAID controller. 3160 * If it's a RAID controller other than the HBA itself 3161 * (an external RAID controller, MSA500 or similar) 3162 * don't present it. 3163 */ 3164 if (!is_hba_lunid(lunaddrbytes)) 3165 break; 3166 ncurrent++; 3167 break; 3168 default: 3169 break; 3170 } 3171 if (ncurrent >= HPSA_MAX_DEVICES) 3172 break; 3173 } 3174 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 3175 out: 3176 kfree(tmpdevice); 3177 for (i = 0; i < ndev_allocated; i++) 3178 kfree(currentsd[i]); 3179 kfree(currentsd); 3180 kfree(physdev_list); 3181 kfree(logdev_list); 3182 } 3183 3184 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 3185 * dma mapping and fills in the scatter gather entries of the 3186 * hpsa command, cp. 3187 */ 3188 static int hpsa_scatter_gather(struct ctlr_info *h, 3189 struct CommandList *cp, 3190 struct scsi_cmnd *cmd) 3191 { 3192 unsigned int len; 3193 struct scatterlist *sg; 3194 u64 addr64; 3195 int use_sg, i, sg_index, chained; 3196 struct SGDescriptor *curr_sg; 3197 3198 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 3199 3200 use_sg = scsi_dma_map(cmd); 3201 if (use_sg < 0) 3202 return use_sg; 3203 3204 if (!use_sg) 3205 goto sglist_finished; 3206 3207 curr_sg = cp->SG; 3208 chained = 0; 3209 sg_index = 0; 3210 scsi_for_each_sg(cmd, sg, use_sg, i) { 3211 if (i == h->max_cmd_sg_entries - 1 && 3212 use_sg > h->max_cmd_sg_entries) { 3213 chained = 1; 3214 curr_sg = h->cmd_sg_list[cp->cmdindex]; 3215 sg_index = 0; 3216 } 3217 addr64 = (u64) sg_dma_address(sg); 3218 len = sg_dma_len(sg); 3219 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 3220 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 3221 curr_sg->Len = len; 3222 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST; 3223 curr_sg++; 3224 } 3225 3226 if (use_sg + chained > h->maxSG) 3227 h->maxSG = use_sg + chained; 3228 3229 if (chained) { 3230 cp->Header.SGList = h->max_cmd_sg_entries; 3231 cp->Header.SGTotal = (u16) (use_sg + 1); 3232 if (hpsa_map_sg_chain_block(h, cp)) { 3233 scsi_dma_unmap(cmd); 3234 return -1; 3235 } 3236 return 0; 3237 } 3238 3239 sglist_finished: 3240 3241 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 3242 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ 3243 return 0; 3244 } 3245 3246 #define IO_ACCEL_INELIGIBLE (1) 3247 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) 3248 { 3249 int is_write = 0; 3250 u32 block; 3251 u32 block_cnt; 3252 3253 /* Perform some CDB fixups if needed using 10 byte reads/writes only */ 3254 switch (cdb[0]) { 3255 case WRITE_6: 3256 case WRITE_12: 3257 is_write = 1; 3258 case READ_6: 3259 case READ_12: 3260 if (*cdb_len == 6) { 3261 block = (((u32) cdb[2]) << 8) | cdb[3]; 3262 block_cnt = cdb[4]; 3263 } else { 3264 BUG_ON(*cdb_len != 12); 3265 block = (((u32) cdb[2]) << 24) | 3266 (((u32) cdb[3]) << 16) | 3267 (((u32) cdb[4]) << 8) | 3268 cdb[5]; 3269 block_cnt = 3270 (((u32) cdb[6]) << 24) | 3271 (((u32) cdb[7]) << 16) | 3272 (((u32) cdb[8]) << 8) | 3273 cdb[9]; 3274 } 3275 if (block_cnt > 0xffff) 3276 return IO_ACCEL_INELIGIBLE; 3277 3278 cdb[0] = is_write ? WRITE_10 : READ_10; 3279 cdb[1] = 0; 3280 cdb[2] = (u8) (block >> 24); 3281 cdb[3] = (u8) (block >> 16); 3282 cdb[4] = (u8) (block >> 8); 3283 cdb[5] = (u8) (block); 3284 cdb[6] = 0; 3285 cdb[7] = (u8) (block_cnt >> 8); 3286 cdb[8] = (u8) (block_cnt); 3287 cdb[9] = 0; 3288 *cdb_len = 10; 3289 break; 3290 } 3291 return 0; 3292 } 3293 3294 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, 3295 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3296 u8 *scsi3addr) 3297 { 3298 struct scsi_cmnd *cmd = c->scsi_cmd; 3299 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; 3300 unsigned int len; 3301 unsigned int total_len = 0; 3302 struct scatterlist *sg; 3303 u64 addr64; 3304 int use_sg, i; 3305 struct SGDescriptor *curr_sg; 3306 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; 3307 3308 /* TODO: implement chaining support */ 3309 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 3310 return IO_ACCEL_INELIGIBLE; 3311 3312 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 3313 3314 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 3315 return IO_ACCEL_INELIGIBLE; 3316 3317 c->cmd_type = CMD_IOACCEL1; 3318 3319 /* Adjust the DMA address to point to the accelerated command buffer */ 3320 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + 3321 (c->cmdindex * sizeof(*cp)); 3322 BUG_ON(c->busaddr & 0x0000007F); 3323 3324 use_sg = scsi_dma_map(cmd); 3325 if (use_sg < 0) 3326 return use_sg; 3327 3328 if (use_sg) { 3329 curr_sg = cp->SG; 3330 scsi_for_each_sg(cmd, sg, use_sg, i) { 3331 addr64 = (u64) sg_dma_address(sg); 3332 len = sg_dma_len(sg); 3333 total_len += len; 3334 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 3335 curr_sg->Addr.upper = 3336 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 3337 curr_sg->Len = len; 3338 3339 if (i == (scsi_sg_count(cmd) - 1)) 3340 curr_sg->Ext = HPSA_SG_LAST; 3341 else 3342 curr_sg->Ext = 0; /* we are not chaining */ 3343 curr_sg++; 3344 } 3345 3346 switch (cmd->sc_data_direction) { 3347 case DMA_TO_DEVICE: 3348 control |= IOACCEL1_CONTROL_DATA_OUT; 3349 break; 3350 case DMA_FROM_DEVICE: 3351 control |= IOACCEL1_CONTROL_DATA_IN; 3352 break; 3353 case DMA_NONE: 3354 control |= IOACCEL1_CONTROL_NODATAXFER; 3355 break; 3356 default: 3357 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3358 cmd->sc_data_direction); 3359 BUG(); 3360 break; 3361 } 3362 } else { 3363 control |= IOACCEL1_CONTROL_NODATAXFER; 3364 } 3365 3366 c->Header.SGList = use_sg; 3367 /* Fill out the command structure to submit */ 3368 cp->dev_handle = ioaccel_handle & 0xFFFF; 3369 cp->transfer_len = total_len; 3370 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ | 3371 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK); 3372 cp->control = control; 3373 memcpy(cp->CDB, cdb, cdb_len); 3374 memcpy(cp->CISS_LUN, scsi3addr, 8); 3375 /* Tag was already set at init time. */ 3376 enqueue_cmd_and_start_io(h, c); 3377 return 0; 3378 } 3379 3380 /* 3381 * Queue a command directly to a device behind the controller using the 3382 * I/O accelerator path. 3383 */ 3384 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, 3385 struct CommandList *c) 3386 { 3387 struct scsi_cmnd *cmd = c->scsi_cmd; 3388 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3389 3390 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, 3391 cmd->cmnd, cmd->cmd_len, dev->scsi3addr); 3392 } 3393 3394 /* 3395 * Set encryption parameters for the ioaccel2 request 3396 */ 3397 static void set_encrypt_ioaccel2(struct ctlr_info *h, 3398 struct CommandList *c, struct io_accel2_cmd *cp) 3399 { 3400 struct scsi_cmnd *cmd = c->scsi_cmd; 3401 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3402 struct raid_map_data *map = &dev->raid_map; 3403 u64 first_block; 3404 3405 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3406 3407 /* Are we doing encryption on this device */ 3408 if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON)) 3409 return; 3410 /* Set the data encryption key index. */ 3411 cp->dekindex = map->dekindex; 3412 3413 /* Set the encryption enable flag, encoded into direction field. */ 3414 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; 3415 3416 /* Set encryption tweak values based on logical block address 3417 * If block size is 512, tweak value is LBA. 3418 * For other block sizes, tweak is (LBA * block size)/ 512) 3419 */ 3420 switch (cmd->cmnd[0]) { 3421 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ 3422 case WRITE_6: 3423 case READ_6: 3424 if (map->volume_blk_size == 512) { 3425 cp->tweak_lower = 3426 (((u32) cmd->cmnd[2]) << 8) | 3427 cmd->cmnd[3]; 3428 cp->tweak_upper = 0; 3429 } else { 3430 first_block = 3431 (((u64) cmd->cmnd[2]) << 8) | 3432 cmd->cmnd[3]; 3433 first_block = (first_block * map->volume_blk_size)/512; 3434 cp->tweak_lower = (u32)first_block; 3435 cp->tweak_upper = (u32)(first_block >> 32); 3436 } 3437 break; 3438 case WRITE_10: 3439 case READ_10: 3440 if (map->volume_blk_size == 512) { 3441 cp->tweak_lower = 3442 (((u32) cmd->cmnd[2]) << 24) | 3443 (((u32) cmd->cmnd[3]) << 16) | 3444 (((u32) cmd->cmnd[4]) << 8) | 3445 cmd->cmnd[5]; 3446 cp->tweak_upper = 0; 3447 } else { 3448 first_block = 3449 (((u64) cmd->cmnd[2]) << 24) | 3450 (((u64) cmd->cmnd[3]) << 16) | 3451 (((u64) cmd->cmnd[4]) << 8) | 3452 cmd->cmnd[5]; 3453 first_block = (first_block * map->volume_blk_size)/512; 3454 cp->tweak_lower = (u32)first_block; 3455 cp->tweak_upper = (u32)(first_block >> 32); 3456 } 3457 break; 3458 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ 3459 case WRITE_12: 3460 case READ_12: 3461 if (map->volume_blk_size == 512) { 3462 cp->tweak_lower = 3463 (((u32) cmd->cmnd[2]) << 24) | 3464 (((u32) cmd->cmnd[3]) << 16) | 3465 (((u32) cmd->cmnd[4]) << 8) | 3466 cmd->cmnd[5]; 3467 cp->tweak_upper = 0; 3468 } else { 3469 first_block = 3470 (((u64) cmd->cmnd[2]) << 24) | 3471 (((u64) cmd->cmnd[3]) << 16) | 3472 (((u64) cmd->cmnd[4]) << 8) | 3473 cmd->cmnd[5]; 3474 first_block = (first_block * map->volume_blk_size)/512; 3475 cp->tweak_lower = (u32)first_block; 3476 cp->tweak_upper = (u32)(first_block >> 32); 3477 } 3478 break; 3479 case WRITE_16: 3480 case READ_16: 3481 if (map->volume_blk_size == 512) { 3482 cp->tweak_lower = 3483 (((u32) cmd->cmnd[6]) << 24) | 3484 (((u32) cmd->cmnd[7]) << 16) | 3485 (((u32) cmd->cmnd[8]) << 8) | 3486 cmd->cmnd[9]; 3487 cp->tweak_upper = 3488 (((u32) cmd->cmnd[2]) << 24) | 3489 (((u32) cmd->cmnd[3]) << 16) | 3490 (((u32) cmd->cmnd[4]) << 8) | 3491 cmd->cmnd[5]; 3492 } else { 3493 first_block = 3494 (((u64) cmd->cmnd[2]) << 56) | 3495 (((u64) cmd->cmnd[3]) << 48) | 3496 (((u64) cmd->cmnd[4]) << 40) | 3497 (((u64) cmd->cmnd[5]) << 32) | 3498 (((u64) cmd->cmnd[6]) << 24) | 3499 (((u64) cmd->cmnd[7]) << 16) | 3500 (((u64) cmd->cmnd[8]) << 8) | 3501 cmd->cmnd[9]; 3502 first_block = (first_block * map->volume_blk_size)/512; 3503 cp->tweak_lower = (u32)first_block; 3504 cp->tweak_upper = (u32)(first_block >> 32); 3505 } 3506 break; 3507 default: 3508 dev_err(&h->pdev->dev, 3509 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n", 3510 __func__); 3511 BUG(); 3512 break; 3513 } 3514 } 3515 3516 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, 3517 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3518 u8 *scsi3addr) 3519 { 3520 struct scsi_cmnd *cmd = c->scsi_cmd; 3521 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; 3522 struct ioaccel2_sg_element *curr_sg; 3523 int use_sg, i; 3524 struct scatterlist *sg; 3525 u64 addr64; 3526 u32 len; 3527 u32 total_len = 0; 3528 3529 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) 3530 return IO_ACCEL_INELIGIBLE; 3531 3532 if (fixup_ioaccel_cdb(cdb, &cdb_len)) 3533 return IO_ACCEL_INELIGIBLE; 3534 c->cmd_type = CMD_IOACCEL2; 3535 /* Adjust the DMA address to point to the accelerated command buffer */ 3536 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 3537 (c->cmdindex * sizeof(*cp)); 3538 BUG_ON(c->busaddr & 0x0000007F); 3539 3540 memset(cp, 0, sizeof(*cp)); 3541 cp->IU_type = IOACCEL2_IU_TYPE; 3542 3543 use_sg = scsi_dma_map(cmd); 3544 if (use_sg < 0) 3545 return use_sg; 3546 3547 if (use_sg) { 3548 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); 3549 curr_sg = cp->sg; 3550 scsi_for_each_sg(cmd, sg, use_sg, i) { 3551 addr64 = (u64) sg_dma_address(sg); 3552 len = sg_dma_len(sg); 3553 total_len += len; 3554 curr_sg->address = cpu_to_le64(addr64); 3555 curr_sg->length = cpu_to_le32(len); 3556 curr_sg->reserved[0] = 0; 3557 curr_sg->reserved[1] = 0; 3558 curr_sg->reserved[2] = 0; 3559 curr_sg->chain_indicator = 0; 3560 curr_sg++; 3561 } 3562 3563 switch (cmd->sc_data_direction) { 3564 case DMA_TO_DEVICE: 3565 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3566 cp->direction |= IOACCEL2_DIR_DATA_OUT; 3567 break; 3568 case DMA_FROM_DEVICE: 3569 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3570 cp->direction |= IOACCEL2_DIR_DATA_IN; 3571 break; 3572 case DMA_NONE: 3573 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3574 cp->direction |= IOACCEL2_DIR_NO_DATA; 3575 break; 3576 default: 3577 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 3578 cmd->sc_data_direction); 3579 BUG(); 3580 break; 3581 } 3582 } else { 3583 cp->direction &= ~IOACCEL2_DIRECTION_MASK; 3584 cp->direction |= IOACCEL2_DIR_NO_DATA; 3585 } 3586 3587 /* Set encryption parameters, if necessary */ 3588 set_encrypt_ioaccel2(h, c, cp); 3589 3590 cp->scsi_nexus = ioaccel_handle; 3591 cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | 3592 DIRECT_LOOKUP_BIT; 3593 memcpy(cp->cdb, cdb, sizeof(cp->cdb)); 3594 3595 /* fill in sg elements */ 3596 cp->sg_count = (u8) use_sg; 3597 3598 cp->data_len = cpu_to_le32(total_len); 3599 cp->err_ptr = cpu_to_le64(c->busaddr + 3600 offsetof(struct io_accel2_cmd, error_data)); 3601 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data)); 3602 3603 enqueue_cmd_and_start_io(h, c); 3604 return 0; 3605 } 3606 3607 /* 3608 * Queue a command to the correct I/O accelerator path. 3609 */ 3610 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, 3611 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 3612 u8 *scsi3addr) 3613 { 3614 if (h->transMethod & CFGTBL_Trans_io_accel1) 3615 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, 3616 cdb, cdb_len, scsi3addr); 3617 else 3618 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, 3619 cdb, cdb_len, scsi3addr); 3620 } 3621 3622 static void raid_map_helper(struct raid_map_data *map, 3623 int offload_to_mirror, u32 *map_index, u32 *current_group) 3624 { 3625 if (offload_to_mirror == 0) { 3626 /* use physical disk in the first mirrored group. */ 3627 *map_index %= map->data_disks_per_row; 3628 return; 3629 } 3630 do { 3631 /* determine mirror group that *map_index indicates */ 3632 *current_group = *map_index / map->data_disks_per_row; 3633 if (offload_to_mirror == *current_group) 3634 continue; 3635 if (*current_group < (map->layout_map_count - 1)) { 3636 /* select map index from next group */ 3637 *map_index += map->data_disks_per_row; 3638 (*current_group)++; 3639 } else { 3640 /* select map index from first group */ 3641 *map_index %= map->data_disks_per_row; 3642 *current_group = 0; 3643 } 3644 } while (offload_to_mirror != *current_group); 3645 } 3646 3647 /* 3648 * Attempt to perform offload RAID mapping for a logical volume I/O. 3649 */ 3650 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, 3651 struct CommandList *c) 3652 { 3653 struct scsi_cmnd *cmd = c->scsi_cmd; 3654 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 3655 struct raid_map_data *map = &dev->raid_map; 3656 struct raid_map_disk_data *dd = &map->data[0]; 3657 int is_write = 0; 3658 u32 map_index; 3659 u64 first_block, last_block; 3660 u32 block_cnt; 3661 u32 blocks_per_row; 3662 u64 first_row, last_row; 3663 u32 first_row_offset, last_row_offset; 3664 u32 first_column, last_column; 3665 u64 r0_first_row, r0_last_row; 3666 u32 r5or6_blocks_per_row; 3667 u64 r5or6_first_row, r5or6_last_row; 3668 u32 r5or6_first_row_offset, r5or6_last_row_offset; 3669 u32 r5or6_first_column, r5or6_last_column; 3670 u32 total_disks_per_row; 3671 u32 stripesize; 3672 u32 first_group, last_group, current_group; 3673 u32 map_row; 3674 u32 disk_handle; 3675 u64 disk_block; 3676 u32 disk_block_cnt; 3677 u8 cdb[16]; 3678 u8 cdb_len; 3679 #if BITS_PER_LONG == 32 3680 u64 tmpdiv; 3681 #endif 3682 int offload_to_mirror; 3683 3684 BUG_ON(!(dev->offload_config && dev->offload_enabled)); 3685 3686 /* check for valid opcode, get LBA and block count */ 3687 switch (cmd->cmnd[0]) { 3688 case WRITE_6: 3689 is_write = 1; 3690 case READ_6: 3691 first_block = 3692 (((u64) cmd->cmnd[2]) << 8) | 3693 cmd->cmnd[3]; 3694 block_cnt = cmd->cmnd[4]; 3695 break; 3696 case WRITE_10: 3697 is_write = 1; 3698 case READ_10: 3699 first_block = 3700 (((u64) cmd->cmnd[2]) << 24) | 3701 (((u64) cmd->cmnd[3]) << 16) | 3702 (((u64) cmd->cmnd[4]) << 8) | 3703 cmd->cmnd[5]; 3704 block_cnt = 3705 (((u32) cmd->cmnd[7]) << 8) | 3706 cmd->cmnd[8]; 3707 break; 3708 case WRITE_12: 3709 is_write = 1; 3710 case READ_12: 3711 first_block = 3712 (((u64) cmd->cmnd[2]) << 24) | 3713 (((u64) cmd->cmnd[3]) << 16) | 3714 (((u64) cmd->cmnd[4]) << 8) | 3715 cmd->cmnd[5]; 3716 block_cnt = 3717 (((u32) cmd->cmnd[6]) << 24) | 3718 (((u32) cmd->cmnd[7]) << 16) | 3719 (((u32) cmd->cmnd[8]) << 8) | 3720 cmd->cmnd[9]; 3721 break; 3722 case WRITE_16: 3723 is_write = 1; 3724 case READ_16: 3725 first_block = 3726 (((u64) cmd->cmnd[2]) << 56) | 3727 (((u64) cmd->cmnd[3]) << 48) | 3728 (((u64) cmd->cmnd[4]) << 40) | 3729 (((u64) cmd->cmnd[5]) << 32) | 3730 (((u64) cmd->cmnd[6]) << 24) | 3731 (((u64) cmd->cmnd[7]) << 16) | 3732 (((u64) cmd->cmnd[8]) << 8) | 3733 cmd->cmnd[9]; 3734 block_cnt = 3735 (((u32) cmd->cmnd[10]) << 24) | 3736 (((u32) cmd->cmnd[11]) << 16) | 3737 (((u32) cmd->cmnd[12]) << 8) | 3738 cmd->cmnd[13]; 3739 break; 3740 default: 3741 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ 3742 } 3743 BUG_ON(block_cnt == 0); 3744 last_block = first_block + block_cnt - 1; 3745 3746 /* check for write to non-RAID-0 */ 3747 if (is_write && dev->raid_level != 0) 3748 return IO_ACCEL_INELIGIBLE; 3749 3750 /* check for invalid block or wraparound */ 3751 if (last_block >= map->volume_blk_cnt || last_block < first_block) 3752 return IO_ACCEL_INELIGIBLE; 3753 3754 /* calculate stripe information for the request */ 3755 blocks_per_row = map->data_disks_per_row * map->strip_size; 3756 #if BITS_PER_LONG == 32 3757 tmpdiv = first_block; 3758 (void) do_div(tmpdiv, blocks_per_row); 3759 first_row = tmpdiv; 3760 tmpdiv = last_block; 3761 (void) do_div(tmpdiv, blocks_per_row); 3762 last_row = tmpdiv; 3763 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3764 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3765 tmpdiv = first_row_offset; 3766 (void) do_div(tmpdiv, map->strip_size); 3767 first_column = tmpdiv; 3768 tmpdiv = last_row_offset; 3769 (void) do_div(tmpdiv, map->strip_size); 3770 last_column = tmpdiv; 3771 #else 3772 first_row = first_block / blocks_per_row; 3773 last_row = last_block / blocks_per_row; 3774 first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); 3775 last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); 3776 first_column = first_row_offset / map->strip_size; 3777 last_column = last_row_offset / map->strip_size; 3778 #endif 3779 3780 /* if this isn't a single row/column then give to the controller */ 3781 if ((first_row != last_row) || (first_column != last_column)) 3782 return IO_ACCEL_INELIGIBLE; 3783 3784 /* proceeding with driver mapping */ 3785 total_disks_per_row = map->data_disks_per_row + 3786 map->metadata_disks_per_row; 3787 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3788 map->row_cnt; 3789 map_index = (map_row * total_disks_per_row) + first_column; 3790 3791 switch (dev->raid_level) { 3792 case HPSA_RAID_0: 3793 break; /* nothing special to do */ 3794 case HPSA_RAID_1: 3795 /* Handles load balance across RAID 1 members. 3796 * (2-drive R1 and R10 with even # of drives.) 3797 * Appropriate for SSDs, not optimal for HDDs 3798 */ 3799 BUG_ON(map->layout_map_count != 2); 3800 if (dev->offload_to_mirror) 3801 map_index += map->data_disks_per_row; 3802 dev->offload_to_mirror = !dev->offload_to_mirror; 3803 break; 3804 case HPSA_RAID_ADM: 3805 /* Handles N-way mirrors (R1-ADM) 3806 * and R10 with # of drives divisible by 3.) 3807 */ 3808 BUG_ON(map->layout_map_count != 3); 3809 3810 offload_to_mirror = dev->offload_to_mirror; 3811 raid_map_helper(map, offload_to_mirror, 3812 &map_index, ¤t_group); 3813 /* set mirror group to use next time */ 3814 offload_to_mirror = 3815 (offload_to_mirror >= map->layout_map_count - 1) 3816 ? 0 : offload_to_mirror + 1; 3817 /* FIXME: remove after debug/dev */ 3818 BUG_ON(offload_to_mirror >= map->layout_map_count); 3819 dev_warn(&h->pdev->dev, 3820 "DEBUG: Using physical disk map index %d from mirror group %d\n", 3821 map_index, offload_to_mirror); 3822 dev->offload_to_mirror = offload_to_mirror; 3823 /* Avoid direct use of dev->offload_to_mirror within this 3824 * function since multiple threads might simultaneously 3825 * increment it beyond the range of dev->layout_map_count -1. 3826 */ 3827 break; 3828 case HPSA_RAID_5: 3829 case HPSA_RAID_6: 3830 if (map->layout_map_count <= 1) 3831 break; 3832 3833 /* Verify first and last block are in same RAID group */ 3834 r5or6_blocks_per_row = 3835 map->strip_size * map->data_disks_per_row; 3836 BUG_ON(r5or6_blocks_per_row == 0); 3837 stripesize = r5or6_blocks_per_row * map->layout_map_count; 3838 #if BITS_PER_LONG == 32 3839 tmpdiv = first_block; 3840 first_group = do_div(tmpdiv, stripesize); 3841 tmpdiv = first_group; 3842 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3843 first_group = tmpdiv; 3844 tmpdiv = last_block; 3845 last_group = do_div(tmpdiv, stripesize); 3846 tmpdiv = last_group; 3847 (void) do_div(tmpdiv, r5or6_blocks_per_row); 3848 last_group = tmpdiv; 3849 #else 3850 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 3851 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 3852 #endif 3853 if (first_group != last_group) 3854 return IO_ACCEL_INELIGIBLE; 3855 3856 /* Verify request is in a single row of RAID 5/6 */ 3857 #if BITS_PER_LONG == 32 3858 tmpdiv = first_block; 3859 (void) do_div(tmpdiv, stripesize); 3860 first_row = r5or6_first_row = r0_first_row = tmpdiv; 3861 tmpdiv = last_block; 3862 (void) do_div(tmpdiv, stripesize); 3863 r5or6_last_row = r0_last_row = tmpdiv; 3864 #else 3865 first_row = r5or6_first_row = r0_first_row = 3866 first_block / stripesize; 3867 r5or6_last_row = r0_last_row = last_block / stripesize; 3868 #endif 3869 if (r5or6_first_row != r5or6_last_row) 3870 return IO_ACCEL_INELIGIBLE; 3871 3872 3873 /* Verify request is in a single column */ 3874 #if BITS_PER_LONG == 32 3875 tmpdiv = first_block; 3876 first_row_offset = do_div(tmpdiv, stripesize); 3877 tmpdiv = first_row_offset; 3878 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); 3879 r5or6_first_row_offset = first_row_offset; 3880 tmpdiv = last_block; 3881 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 3882 tmpdiv = r5or6_last_row_offset; 3883 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 3884 tmpdiv = r5or6_first_row_offset; 3885 (void) do_div(tmpdiv, map->strip_size); 3886 first_column = r5or6_first_column = tmpdiv; 3887 tmpdiv = r5or6_last_row_offset; 3888 (void) do_div(tmpdiv, map->strip_size); 3889 r5or6_last_column = tmpdiv; 3890 #else 3891 first_row_offset = r5or6_first_row_offset = 3892 (u32)((first_block % stripesize) % 3893 r5or6_blocks_per_row); 3894 3895 r5or6_last_row_offset = 3896 (u32)((last_block % stripesize) % 3897 r5or6_blocks_per_row); 3898 3899 first_column = r5or6_first_column = 3900 r5or6_first_row_offset / map->strip_size; 3901 r5or6_last_column = 3902 r5or6_last_row_offset / map->strip_size; 3903 #endif 3904 if (r5or6_first_column != r5or6_last_column) 3905 return IO_ACCEL_INELIGIBLE; 3906 3907 /* Request is eligible */ 3908 map_row = ((u32)(first_row >> map->parity_rotation_shift)) % 3909 map->row_cnt; 3910 3911 map_index = (first_group * 3912 (map->row_cnt * total_disks_per_row)) + 3913 (map_row * total_disks_per_row) + first_column; 3914 break; 3915 default: 3916 return IO_ACCEL_INELIGIBLE; 3917 } 3918 3919 disk_handle = dd[map_index].ioaccel_handle; 3920 disk_block = map->disk_starting_blk + (first_row * map->strip_size) + 3921 (first_row_offset - (first_column * map->strip_size)); 3922 disk_block_cnt = block_cnt; 3923 3924 /* handle differing logical/physical block sizes */ 3925 if (map->phys_blk_shift) { 3926 disk_block <<= map->phys_blk_shift; 3927 disk_block_cnt <<= map->phys_blk_shift; 3928 } 3929 BUG_ON(disk_block_cnt > 0xffff); 3930 3931 /* build the new CDB for the physical disk I/O */ 3932 if (disk_block > 0xffffffff) { 3933 cdb[0] = is_write ? WRITE_16 : READ_16; 3934 cdb[1] = 0; 3935 cdb[2] = (u8) (disk_block >> 56); 3936 cdb[3] = (u8) (disk_block >> 48); 3937 cdb[4] = (u8) (disk_block >> 40); 3938 cdb[5] = (u8) (disk_block >> 32); 3939 cdb[6] = (u8) (disk_block >> 24); 3940 cdb[7] = (u8) (disk_block >> 16); 3941 cdb[8] = (u8) (disk_block >> 8); 3942 cdb[9] = (u8) (disk_block); 3943 cdb[10] = (u8) (disk_block_cnt >> 24); 3944 cdb[11] = (u8) (disk_block_cnt >> 16); 3945 cdb[12] = (u8) (disk_block_cnt >> 8); 3946 cdb[13] = (u8) (disk_block_cnt); 3947 cdb[14] = 0; 3948 cdb[15] = 0; 3949 cdb_len = 16; 3950 } else { 3951 cdb[0] = is_write ? WRITE_10 : READ_10; 3952 cdb[1] = 0; 3953 cdb[2] = (u8) (disk_block >> 24); 3954 cdb[3] = (u8) (disk_block >> 16); 3955 cdb[4] = (u8) (disk_block >> 8); 3956 cdb[5] = (u8) (disk_block); 3957 cdb[6] = 0; 3958 cdb[7] = (u8) (disk_block_cnt >> 8); 3959 cdb[8] = (u8) (disk_block_cnt); 3960 cdb[9] = 0; 3961 cdb_len = 10; 3962 } 3963 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, 3964 dev->scsi3addr); 3965 } 3966 3967 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 3968 void (*done)(struct scsi_cmnd *)) 3969 { 3970 struct ctlr_info *h; 3971 struct hpsa_scsi_dev_t *dev; 3972 unsigned char scsi3addr[8]; 3973 struct CommandList *c; 3974 unsigned long flags; 3975 int rc = 0; 3976 3977 /* Get the ptr to our adapter structure out of cmd->host. */ 3978 h = sdev_to_hba(cmd->device); 3979 dev = cmd->device->hostdata; 3980 if (!dev) { 3981 cmd->result = DID_NO_CONNECT << 16; 3982 done(cmd); 3983 return 0; 3984 } 3985 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3986 3987 spin_lock_irqsave(&h->lock, flags); 3988 if (unlikely(h->lockup_detected)) { 3989 spin_unlock_irqrestore(&h->lock, flags); 3990 cmd->result = DID_ERROR << 16; 3991 done(cmd); 3992 return 0; 3993 } 3994 spin_unlock_irqrestore(&h->lock, flags); 3995 c = cmd_alloc(h); 3996 if (c == NULL) { /* trouble... */ 3997 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 3998 return SCSI_MLQUEUE_HOST_BUSY; 3999 } 4000 4001 /* Fill in the command list header */ 4002 4003 cmd->scsi_done = done; /* save this for use by completion code */ 4004 4005 /* save c in case we have to abort it */ 4006 cmd->host_scribble = (unsigned char *) c; 4007 4008 c->cmd_type = CMD_SCSI; 4009 c->scsi_cmd = cmd; 4010 4011 /* Call alternate submit routine for I/O accelerated commands. 4012 * Retries always go down the normal I/O path. 4013 */ 4014 if (likely(cmd->retries == 0 && 4015 cmd->request->cmd_type == REQ_TYPE_FS && 4016 h->acciopath_status)) { 4017 if (dev->offload_enabled) { 4018 rc = hpsa_scsi_ioaccel_raid_map(h, c); 4019 if (rc == 0) 4020 return 0; /* Sent on ioaccel path */ 4021 if (rc < 0) { /* scsi_dma_map failed. */ 4022 cmd_free(h, c); 4023 return SCSI_MLQUEUE_HOST_BUSY; 4024 } 4025 } else if (dev->ioaccel_handle) { 4026 rc = hpsa_scsi_ioaccel_direct_map(h, c); 4027 if (rc == 0) 4028 return 0; /* Sent on direct map path */ 4029 if (rc < 0) { /* scsi_dma_map failed. */ 4030 cmd_free(h, c); 4031 return SCSI_MLQUEUE_HOST_BUSY; 4032 } 4033 } 4034 } 4035 4036 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4037 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 4038 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 4039 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; 4040 4041 /* Fill in the request block... */ 4042 4043 c->Request.Timeout = 0; 4044 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 4045 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 4046 c->Request.CDBLen = cmd->cmd_len; 4047 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 4048 c->Request.Type.Type = TYPE_CMD; 4049 c->Request.Type.Attribute = ATTR_SIMPLE; 4050 switch (cmd->sc_data_direction) { 4051 case DMA_TO_DEVICE: 4052 c->Request.Type.Direction = XFER_WRITE; 4053 break; 4054 case DMA_FROM_DEVICE: 4055 c->Request.Type.Direction = XFER_READ; 4056 break; 4057 case DMA_NONE: 4058 c->Request.Type.Direction = XFER_NONE; 4059 break; 4060 case DMA_BIDIRECTIONAL: 4061 /* This can happen if a buggy application does a scsi passthru 4062 * and sets both inlen and outlen to non-zero. ( see 4063 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 4064 */ 4065 4066 c->Request.Type.Direction = XFER_RSVD; 4067 /* This is technically wrong, and hpsa controllers should 4068 * reject it with CMD_INVALID, which is the most correct 4069 * response, but non-fibre backends appear to let it 4070 * slide by, and give the same results as if this field 4071 * were set correctly. Either way is acceptable for 4072 * our purposes here. 4073 */ 4074 4075 break; 4076 4077 default: 4078 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 4079 cmd->sc_data_direction); 4080 BUG(); 4081 break; 4082 } 4083 4084 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 4085 cmd_free(h, c); 4086 return SCSI_MLQUEUE_HOST_BUSY; 4087 } 4088 enqueue_cmd_and_start_io(h, c); 4089 /* the cmd'll come back via intr handler in complete_scsi_command() */ 4090 return 0; 4091 } 4092 4093 static DEF_SCSI_QCMD(hpsa_scsi_queue_command) 4094 4095 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) 4096 { 4097 unsigned long flags; 4098 4099 /* 4100 * Don't let rescans be initiated on a controller known 4101 * to be locked up. If the controller locks up *during* 4102 * a rescan, that thread is probably hosed, but at least 4103 * we can prevent new rescan threads from piling up on a 4104 * locked up controller. 4105 */ 4106 spin_lock_irqsave(&h->lock, flags); 4107 if (unlikely(h->lockup_detected)) { 4108 spin_unlock_irqrestore(&h->lock, flags); 4109 spin_lock_irqsave(&h->scan_lock, flags); 4110 h->scan_finished = 1; 4111 wake_up_all(&h->scan_wait_queue); 4112 spin_unlock_irqrestore(&h->scan_lock, flags); 4113 return 1; 4114 } 4115 spin_unlock_irqrestore(&h->lock, flags); 4116 return 0; 4117 } 4118 4119 static void hpsa_scan_start(struct Scsi_Host *sh) 4120 { 4121 struct ctlr_info *h = shost_to_hba(sh); 4122 unsigned long flags; 4123 4124 if (do_not_scan_if_controller_locked_up(h)) 4125 return; 4126 4127 /* wait until any scan already in progress is finished. */ 4128 while (1) { 4129 spin_lock_irqsave(&h->scan_lock, flags); 4130 if (h->scan_finished) 4131 break; 4132 spin_unlock_irqrestore(&h->scan_lock, flags); 4133 wait_event(h->scan_wait_queue, h->scan_finished); 4134 /* Note: We don't need to worry about a race between this 4135 * thread and driver unload because the midlayer will 4136 * have incremented the reference count, so unload won't 4137 * happen if we're in here. 4138 */ 4139 } 4140 h->scan_finished = 0; /* mark scan as in progress */ 4141 spin_unlock_irqrestore(&h->scan_lock, flags); 4142 4143 if (do_not_scan_if_controller_locked_up(h)) 4144 return; 4145 4146 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 4147 4148 spin_lock_irqsave(&h->scan_lock, flags); 4149 h->scan_finished = 1; /* mark scan as finished. */ 4150 wake_up_all(&h->scan_wait_queue); 4151 spin_unlock_irqrestore(&h->scan_lock, flags); 4152 } 4153 4154 static int hpsa_scan_finished(struct Scsi_Host *sh, 4155 unsigned long elapsed_time) 4156 { 4157 struct ctlr_info *h = shost_to_hba(sh); 4158 unsigned long flags; 4159 int finished; 4160 4161 spin_lock_irqsave(&h->scan_lock, flags); 4162 finished = h->scan_finished; 4163 spin_unlock_irqrestore(&h->scan_lock, flags); 4164 return finished; 4165 } 4166 4167 static int hpsa_change_queue_depth(struct scsi_device *sdev, 4168 int qdepth, int reason) 4169 { 4170 struct ctlr_info *h = sdev_to_hba(sdev); 4171 4172 if (reason != SCSI_QDEPTH_DEFAULT) 4173 return -ENOTSUPP; 4174 4175 if (qdepth < 1) 4176 qdepth = 1; 4177 else 4178 if (qdepth > h->nr_cmds) 4179 qdepth = h->nr_cmds; 4180 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 4181 return sdev->queue_depth; 4182 } 4183 4184 static void hpsa_unregister_scsi(struct ctlr_info *h) 4185 { 4186 /* we are being forcibly unloaded, and may not refuse. */ 4187 scsi_remove_host(h->scsi_host); 4188 scsi_host_put(h->scsi_host); 4189 h->scsi_host = NULL; 4190 } 4191 4192 static int hpsa_register_scsi(struct ctlr_info *h) 4193 { 4194 struct Scsi_Host *sh; 4195 int error; 4196 4197 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 4198 if (sh == NULL) 4199 goto fail; 4200 4201 sh->io_port = 0; 4202 sh->n_io_port = 0; 4203 sh->this_id = -1; 4204 sh->max_channel = 3; 4205 sh->max_cmd_len = MAX_COMMAND_SIZE; 4206 sh->max_lun = HPSA_MAX_LUN; 4207 sh->max_id = HPSA_MAX_LUN; 4208 sh->can_queue = h->nr_cmds; 4209 if (h->hba_mode_enabled) 4210 sh->cmd_per_lun = 7; 4211 else 4212 sh->cmd_per_lun = h->nr_cmds; 4213 sh->sg_tablesize = h->maxsgentries; 4214 h->scsi_host = sh; 4215 sh->hostdata[0] = (unsigned long) h; 4216 sh->irq = h->intr[h->intr_mode]; 4217 sh->unique_id = sh->irq; 4218 error = scsi_add_host(sh, &h->pdev->dev); 4219 if (error) 4220 goto fail_host_put; 4221 scsi_scan_host(sh); 4222 return 0; 4223 4224 fail_host_put: 4225 dev_err(&h->pdev->dev, "%s: scsi_add_host" 4226 " failed for controller %d\n", __func__, h->ctlr); 4227 scsi_host_put(sh); 4228 return error; 4229 fail: 4230 dev_err(&h->pdev->dev, "%s: scsi_host_alloc" 4231 " failed for controller %d\n", __func__, h->ctlr); 4232 return -ENOMEM; 4233 } 4234 4235 static int wait_for_device_to_become_ready(struct ctlr_info *h, 4236 unsigned char lunaddr[]) 4237 { 4238 int rc; 4239 int count = 0; 4240 int waittime = 1; /* seconds */ 4241 struct CommandList *c; 4242 4243 c = cmd_special_alloc(h); 4244 if (!c) { 4245 dev_warn(&h->pdev->dev, "out of memory in " 4246 "wait_for_device_to_become_ready.\n"); 4247 return IO_ERROR; 4248 } 4249 4250 /* Send test unit ready until device ready, or give up. */ 4251 while (count < HPSA_TUR_RETRY_LIMIT) { 4252 4253 /* Wait for a bit. do this first, because if we send 4254 * the TUR right away, the reset will just abort it. 4255 */ 4256 msleep(1000 * waittime); 4257 count++; 4258 rc = 0; /* Device ready. */ 4259 4260 /* Increase wait time with each try, up to a point. */ 4261 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 4262 waittime = waittime * 2; 4263 4264 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ 4265 (void) fill_cmd(c, TEST_UNIT_READY, h, 4266 NULL, 0, 0, lunaddr, TYPE_CMD); 4267 hpsa_scsi_do_simple_cmd_core(h, c); 4268 /* no unmap needed here because no data xfer. */ 4269 4270 if (c->err_info->CommandStatus == CMD_SUCCESS) 4271 break; 4272 4273 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 4274 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 4275 (c->err_info->SenseInfo[2] == NO_SENSE || 4276 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 4277 break; 4278 4279 dev_warn(&h->pdev->dev, "waiting %d secs " 4280 "for device to become ready.\n", waittime); 4281 rc = 1; /* device not ready. */ 4282 } 4283 4284 if (rc) 4285 dev_warn(&h->pdev->dev, "giving up on device.\n"); 4286 else 4287 dev_warn(&h->pdev->dev, "device is ready.\n"); 4288 4289 cmd_special_free(h, c); 4290 return rc; 4291 } 4292 4293 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 4294 * complaining. Doing a host- or bus-reset can't do anything good here. 4295 */ 4296 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 4297 { 4298 int rc; 4299 struct ctlr_info *h; 4300 struct hpsa_scsi_dev_t *dev; 4301 4302 /* find the controller to which the command to be aborted was sent */ 4303 h = sdev_to_hba(scsicmd->device); 4304 if (h == NULL) /* paranoia */ 4305 return FAILED; 4306 dev = scsicmd->device->hostdata; 4307 if (!dev) { 4308 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 4309 "device lookup failed.\n"); 4310 return FAILED; 4311 } 4312 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 4313 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4314 /* send a reset to the SCSI LUN which the command was sent to */ 4315 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); 4316 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 4317 return SUCCESS; 4318 4319 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 4320 return FAILED; 4321 } 4322 4323 static void swizzle_abort_tag(u8 *tag) 4324 { 4325 u8 original_tag[8]; 4326 4327 memcpy(original_tag, tag, 8); 4328 tag[0] = original_tag[3]; 4329 tag[1] = original_tag[2]; 4330 tag[2] = original_tag[1]; 4331 tag[3] = original_tag[0]; 4332 tag[4] = original_tag[7]; 4333 tag[5] = original_tag[6]; 4334 tag[6] = original_tag[5]; 4335 tag[7] = original_tag[4]; 4336 } 4337 4338 static void hpsa_get_tag(struct ctlr_info *h, 4339 struct CommandList *c, u32 *taglower, u32 *tagupper) 4340 { 4341 if (c->cmd_type == CMD_IOACCEL1) { 4342 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 4343 &h->ioaccel_cmd_pool[c->cmdindex]; 4344 *tagupper = cm1->Tag.upper; 4345 *taglower = cm1->Tag.lower; 4346 return; 4347 } 4348 if (c->cmd_type == CMD_IOACCEL2) { 4349 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) 4350 &h->ioaccel2_cmd_pool[c->cmdindex]; 4351 /* upper tag not used in ioaccel2 mode */ 4352 memset(tagupper, 0, sizeof(*tagupper)); 4353 *taglower = cm2->Tag; 4354 return; 4355 } 4356 *tagupper = c->Header.Tag.upper; 4357 *taglower = c->Header.Tag.lower; 4358 } 4359 4360 4361 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 4362 struct CommandList *abort, int swizzle) 4363 { 4364 int rc = IO_OK; 4365 struct CommandList *c; 4366 struct ErrorInfo *ei; 4367 u32 tagupper, taglower; 4368 4369 c = cmd_special_alloc(h); 4370 if (c == NULL) { /* trouble... */ 4371 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 4372 return -ENOMEM; 4373 } 4374 4375 /* fill_cmd can't fail here, no buffer to map */ 4376 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, 4377 0, 0, scsi3addr, TYPE_MSG); 4378 if (swizzle) 4379 swizzle_abort_tag(&c->Request.CDB[4]); 4380 hpsa_scsi_do_simple_cmd_core(h, c); 4381 hpsa_get_tag(h, abort, &taglower, &tagupper); 4382 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", 4383 __func__, tagupper, taglower); 4384 /* no unmap needed here because no data xfer. */ 4385 4386 ei = c->err_info; 4387 switch (ei->CommandStatus) { 4388 case CMD_SUCCESS: 4389 break; 4390 case CMD_UNABORTABLE: /* Very common, don't make noise. */ 4391 rc = -1; 4392 break; 4393 default: 4394 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", 4395 __func__, tagupper, taglower); 4396 hpsa_scsi_interpret_error(h, c); 4397 rc = -1; 4398 break; 4399 } 4400 cmd_special_free(h, c); 4401 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", 4402 __func__, tagupper, taglower); 4403 return rc; 4404 } 4405 4406 /* 4407 * hpsa_find_cmd_in_queue 4408 * 4409 * Used to determine whether a command (find) is still present 4410 * in queue_head. Optionally excludes the last element of queue_head. 4411 * 4412 * This is used to avoid unnecessary aborts. Commands in h->reqQ have 4413 * not yet been submitted, and so can be aborted by the driver without 4414 * sending an abort to the hardware. 4415 * 4416 * Returns pointer to command if found in queue, NULL otherwise. 4417 */ 4418 static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h, 4419 struct scsi_cmnd *find, struct list_head *queue_head) 4420 { 4421 unsigned long flags; 4422 struct CommandList *c = NULL; /* ptr into cmpQ */ 4423 4424 if (!find) 4425 return 0; 4426 spin_lock_irqsave(&h->lock, flags); 4427 list_for_each_entry(c, queue_head, list) { 4428 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */ 4429 continue; 4430 if (c->scsi_cmd == find) { 4431 spin_unlock_irqrestore(&h->lock, flags); 4432 return c; 4433 } 4434 } 4435 spin_unlock_irqrestore(&h->lock, flags); 4436 return NULL; 4437 } 4438 4439 static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h, 4440 u8 *tag, struct list_head *queue_head) 4441 { 4442 unsigned long flags; 4443 struct CommandList *c; 4444 4445 spin_lock_irqsave(&h->lock, flags); 4446 list_for_each_entry(c, queue_head, list) { 4447 if (memcmp(&c->Header.Tag, tag, 8) != 0) 4448 continue; 4449 spin_unlock_irqrestore(&h->lock, flags); 4450 return c; 4451 } 4452 spin_unlock_irqrestore(&h->lock, flags); 4453 return NULL; 4454 } 4455 4456 /* ioaccel2 path firmware cannot handle abort task requests. 4457 * Change abort requests to physical target reset, and send to the 4458 * address of the physical disk used for the ioaccel 2 command. 4459 * Return 0 on success (IO_OK) 4460 * -1 on failure 4461 */ 4462 4463 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, 4464 unsigned char *scsi3addr, struct CommandList *abort) 4465 { 4466 int rc = IO_OK; 4467 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 4468 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ 4469 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ 4470 unsigned char *psa = &phys_scsi3addr[0]; 4471 4472 /* Get a pointer to the hpsa logical device. */ 4473 scmd = (struct scsi_cmnd *) abort->scsi_cmd; 4474 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); 4475 if (dev == NULL) { 4476 dev_warn(&h->pdev->dev, 4477 "Cannot abort: no device pointer for command.\n"); 4478 return -1; /* not abortable */ 4479 } 4480 4481 if (h->raid_offload_debug > 0) 4482 dev_info(&h->pdev->dev, 4483 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4484 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 4485 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], 4486 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); 4487 4488 if (!dev->offload_enabled) { 4489 dev_warn(&h->pdev->dev, 4490 "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); 4491 return -1; /* not abortable */ 4492 } 4493 4494 /* Incoming scsi3addr is logical addr. We need physical disk addr. */ 4495 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { 4496 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); 4497 return -1; /* not abortable */ 4498 } 4499 4500 /* send the reset */ 4501 if (h->raid_offload_debug > 0) 4502 dev_info(&h->pdev->dev, 4503 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4504 psa[0], psa[1], psa[2], psa[3], 4505 psa[4], psa[5], psa[6], psa[7]); 4506 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); 4507 if (rc != 0) { 4508 dev_warn(&h->pdev->dev, 4509 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4510 psa[0], psa[1], psa[2], psa[3], 4511 psa[4], psa[5], psa[6], psa[7]); 4512 return rc; /* failed to reset */ 4513 } 4514 4515 /* wait for device to recover */ 4516 if (wait_for_device_to_become_ready(h, psa) != 0) { 4517 dev_warn(&h->pdev->dev, 4518 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4519 psa[0], psa[1], psa[2], psa[3], 4520 psa[4], psa[5], psa[6], psa[7]); 4521 return -1; /* failed to recover */ 4522 } 4523 4524 /* device recovered */ 4525 dev_info(&h->pdev->dev, 4526 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 4527 psa[0], psa[1], psa[2], psa[3], 4528 psa[4], psa[5], psa[6], psa[7]); 4529 4530 return rc; /* success */ 4531 } 4532 4533 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to 4534 * tell which kind we're dealing with, so we send the abort both ways. There 4535 * shouldn't be any collisions between swizzled and unswizzled tags due to the 4536 * way we construct our tags but we check anyway in case the assumptions which 4537 * make this true someday become false. 4538 */ 4539 static int hpsa_send_abort_both_ways(struct ctlr_info *h, 4540 unsigned char *scsi3addr, struct CommandList *abort) 4541 { 4542 u8 swizzled_tag[8]; 4543 struct CommandList *c; 4544 int rc = 0, rc2 = 0; 4545 4546 /* ioccelerator mode 2 commands should be aborted via the 4547 * accelerated path, since RAID path is unaware of these commands, 4548 * but underlying firmware can't handle abort TMF. 4549 * Change abort to physical device reset. 4550 */ 4551 if (abort->cmd_type == CMD_IOACCEL2) 4552 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); 4553 4554 /* we do not expect to find the swizzled tag in our queue, but 4555 * check anyway just to be sure the assumptions which make this 4556 * the case haven't become wrong. 4557 */ 4558 memcpy(swizzled_tag, &abort->Request.CDB[4], 8); 4559 swizzle_abort_tag(swizzled_tag); 4560 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ); 4561 if (c != NULL) { 4562 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n"); 4563 return hpsa_send_abort(h, scsi3addr, abort, 0); 4564 } 4565 rc = hpsa_send_abort(h, scsi3addr, abort, 0); 4566 4567 /* if the command is still in our queue, we can't conclude that it was 4568 * aborted (it might have just completed normally) but in any case 4569 * we don't need to try to abort it another way. 4570 */ 4571 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ); 4572 if (c) 4573 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1); 4574 return rc && rc2; 4575 } 4576 4577 /* Send an abort for the specified command. 4578 * If the device and controller support it, 4579 * send a task abort request. 4580 */ 4581 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) 4582 { 4583 4584 int i, rc; 4585 struct ctlr_info *h; 4586 struct hpsa_scsi_dev_t *dev; 4587 struct CommandList *abort; /* pointer to command to be aborted */ 4588 struct CommandList *found; 4589 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ 4590 char msg[256]; /* For debug messaging. */ 4591 int ml = 0; 4592 u32 tagupper, taglower; 4593 4594 /* Find the controller of the command to be aborted */ 4595 h = sdev_to_hba(sc->device); 4596 if (WARN(h == NULL, 4597 "ABORT REQUEST FAILED, Controller lookup failed.\n")) 4598 return FAILED; 4599 4600 /* Check that controller supports some kind of task abort */ 4601 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && 4602 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 4603 return FAILED; 4604 4605 memset(msg, 0, sizeof(msg)); 4606 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ", 4607 h->scsi_host->host_no, sc->device->channel, 4608 sc->device->id, sc->device->lun); 4609 4610 /* Find the device of the command to be aborted */ 4611 dev = sc->device->hostdata; 4612 if (!dev) { 4613 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", 4614 msg); 4615 return FAILED; 4616 } 4617 4618 /* Get SCSI command to be aborted */ 4619 abort = (struct CommandList *) sc->host_scribble; 4620 if (abort == NULL) { 4621 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n", 4622 msg); 4623 return FAILED; 4624 } 4625 hpsa_get_tag(h, abort, &taglower, &tagupper); 4626 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); 4627 as = (struct scsi_cmnd *) abort->scsi_cmd; 4628 if (as != NULL) 4629 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", 4630 as->cmnd[0], as->serial_number); 4631 dev_dbg(&h->pdev->dev, "%s\n", msg); 4632 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", 4633 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 4634 4635 /* Search reqQ to See if command is queued but not submitted, 4636 * if so, complete the command with aborted status and remove 4637 * it from the reqQ. 4638 */ 4639 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ); 4640 if (found) { 4641 found->err_info->CommandStatus = CMD_ABORTED; 4642 finish_cmd(found); 4643 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n", 4644 msg); 4645 return SUCCESS; 4646 } 4647 4648 /* not in reqQ, if also not in cmpQ, must have already completed */ 4649 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); 4650 if (!found) { 4651 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n", 4652 msg); 4653 return SUCCESS; 4654 } 4655 4656 /* 4657 * Command is in flight, or possibly already completed 4658 * by the firmware (but not to the scsi mid layer) but we can't 4659 * distinguish which. Send the abort down. 4660 */ 4661 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); 4662 if (rc != 0) { 4663 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); 4664 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", 4665 h->scsi_host->host_no, 4666 dev->bus, dev->target, dev->lun); 4667 return FAILED; 4668 } 4669 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); 4670 4671 /* If the abort(s) above completed and actually aborted the 4672 * command, then the command to be aborted should already be 4673 * completed. If not, wait around a bit more to see if they 4674 * manage to complete normally. 4675 */ 4676 #define ABORT_COMPLETE_WAIT_SECS 30 4677 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { 4678 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); 4679 if (!found) 4680 return SUCCESS; 4681 msleep(100); 4682 } 4683 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", 4684 msg, ABORT_COMPLETE_WAIT_SECS); 4685 return FAILED; 4686 } 4687 4688 4689 /* 4690 * For operations that cannot sleep, a command block is allocated at init, 4691 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 4692 * which ones are free or in use. Lock must be held when calling this. 4693 * cmd_free() is the complement. 4694 */ 4695 static struct CommandList *cmd_alloc(struct ctlr_info *h) 4696 { 4697 struct CommandList *c; 4698 int i; 4699 union u64bit temp64; 4700 dma_addr_t cmd_dma_handle, err_dma_handle; 4701 unsigned long flags; 4702 4703 spin_lock_irqsave(&h->lock, flags); 4704 do { 4705 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 4706 if (i == h->nr_cmds) { 4707 spin_unlock_irqrestore(&h->lock, flags); 4708 return NULL; 4709 } 4710 } while (test_and_set_bit 4711 (i & (BITS_PER_LONG - 1), 4712 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 4713 spin_unlock_irqrestore(&h->lock, flags); 4714 4715 c = h->cmd_pool + i; 4716 memset(c, 0, sizeof(*c)); 4717 cmd_dma_handle = h->cmd_pool_dhandle 4718 + i * sizeof(*c); 4719 c->err_info = h->errinfo_pool + i; 4720 memset(c->err_info, 0, sizeof(*c->err_info)); 4721 err_dma_handle = h->errinfo_pool_dhandle 4722 + i * sizeof(*c->err_info); 4723 4724 c->cmdindex = i; 4725 4726 INIT_LIST_HEAD(&c->list); 4727 c->busaddr = (u32) cmd_dma_handle; 4728 temp64.val = (u64) err_dma_handle; 4729 c->ErrDesc.Addr.lower = temp64.val32.lower; 4730 c->ErrDesc.Addr.upper = temp64.val32.upper; 4731 c->ErrDesc.Len = sizeof(*c->err_info); 4732 4733 c->h = h; 4734 return c; 4735 } 4736 4737 /* For operations that can wait for kmalloc to possibly sleep, 4738 * this routine can be called. Lock need not be held to call 4739 * cmd_special_alloc. cmd_special_free() is the complement. 4740 */ 4741 static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 4742 { 4743 struct CommandList *c; 4744 union u64bit temp64; 4745 dma_addr_t cmd_dma_handle, err_dma_handle; 4746 4747 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 4748 if (c == NULL) 4749 return NULL; 4750 memset(c, 0, sizeof(*c)); 4751 4752 c->cmd_type = CMD_SCSI; 4753 c->cmdindex = -1; 4754 4755 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), 4756 &err_dma_handle); 4757 4758 if (c->err_info == NULL) { 4759 pci_free_consistent(h->pdev, 4760 sizeof(*c), c, cmd_dma_handle); 4761 return NULL; 4762 } 4763 memset(c->err_info, 0, sizeof(*c->err_info)); 4764 4765 INIT_LIST_HEAD(&c->list); 4766 c->busaddr = (u32) cmd_dma_handle; 4767 temp64.val = (u64) err_dma_handle; 4768 c->ErrDesc.Addr.lower = temp64.val32.lower; 4769 c->ErrDesc.Addr.upper = temp64.val32.upper; 4770 c->ErrDesc.Len = sizeof(*c->err_info); 4771 4772 c->h = h; 4773 return c; 4774 } 4775 4776 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 4777 { 4778 int i; 4779 unsigned long flags; 4780 4781 i = c - h->cmd_pool; 4782 spin_lock_irqsave(&h->lock, flags); 4783 clear_bit(i & (BITS_PER_LONG - 1), 4784 h->cmd_pool_bits + (i / BITS_PER_LONG)); 4785 spin_unlock_irqrestore(&h->lock, flags); 4786 } 4787 4788 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 4789 { 4790 union u64bit temp64; 4791 4792 temp64.val32.lower = c->ErrDesc.Addr.lower; 4793 temp64.val32.upper = c->ErrDesc.Addr.upper; 4794 pci_free_consistent(h->pdev, sizeof(*c->err_info), 4795 c->err_info, (dma_addr_t) temp64.val); 4796 pci_free_consistent(h->pdev, sizeof(*c), 4797 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 4798 } 4799 4800 #ifdef CONFIG_COMPAT 4801 4802 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 4803 { 4804 IOCTL32_Command_struct __user *arg32 = 4805 (IOCTL32_Command_struct __user *) arg; 4806 IOCTL_Command_struct arg64; 4807 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 4808 int err; 4809 u32 cp; 4810 4811 memset(&arg64, 0, sizeof(arg64)); 4812 err = 0; 4813 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4814 sizeof(arg64.LUN_info)); 4815 err |= copy_from_user(&arg64.Request, &arg32->Request, 4816 sizeof(arg64.Request)); 4817 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4818 sizeof(arg64.error_info)); 4819 err |= get_user(arg64.buf_size, &arg32->buf_size); 4820 err |= get_user(cp, &arg32->buf); 4821 arg64.buf = compat_ptr(cp); 4822 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4823 4824 if (err) 4825 return -EFAULT; 4826 4827 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); 4828 if (err) 4829 return err; 4830 err |= copy_in_user(&arg32->error_info, &p->error_info, 4831 sizeof(arg32->error_info)); 4832 if (err) 4833 return -EFAULT; 4834 return err; 4835 } 4836 4837 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 4838 int cmd, void *arg) 4839 { 4840 BIG_IOCTL32_Command_struct __user *arg32 = 4841 (BIG_IOCTL32_Command_struct __user *) arg; 4842 BIG_IOCTL_Command_struct arg64; 4843 BIG_IOCTL_Command_struct __user *p = 4844 compat_alloc_user_space(sizeof(arg64)); 4845 int err; 4846 u32 cp; 4847 4848 memset(&arg64, 0, sizeof(arg64)); 4849 err = 0; 4850 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 4851 sizeof(arg64.LUN_info)); 4852 err |= copy_from_user(&arg64.Request, &arg32->Request, 4853 sizeof(arg64.Request)); 4854 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 4855 sizeof(arg64.error_info)); 4856 err |= get_user(arg64.buf_size, &arg32->buf_size); 4857 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 4858 err |= get_user(cp, &arg32->buf); 4859 arg64.buf = compat_ptr(cp); 4860 err |= copy_to_user(p, &arg64, sizeof(arg64)); 4861 4862 if (err) 4863 return -EFAULT; 4864 4865 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 4866 if (err) 4867 return err; 4868 err |= copy_in_user(&arg32->error_info, &p->error_info, 4869 sizeof(arg32->error_info)); 4870 if (err) 4871 return -EFAULT; 4872 return err; 4873 } 4874 4875 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) 4876 { 4877 switch (cmd) { 4878 case CCISS_GETPCIINFO: 4879 case CCISS_GETINTINFO: 4880 case CCISS_SETINTINFO: 4881 case CCISS_GETNODENAME: 4882 case CCISS_SETNODENAME: 4883 case CCISS_GETHEARTBEAT: 4884 case CCISS_GETBUSTYPES: 4885 case CCISS_GETFIRMVER: 4886 case CCISS_GETDRIVVER: 4887 case CCISS_REVALIDVOLS: 4888 case CCISS_DEREGDISK: 4889 case CCISS_REGNEWDISK: 4890 case CCISS_REGNEWD: 4891 case CCISS_RESCANDISK: 4892 case CCISS_GETLUNINFO: 4893 return hpsa_ioctl(dev, cmd, arg); 4894 4895 case CCISS_PASSTHRU32: 4896 return hpsa_ioctl32_passthru(dev, cmd, arg); 4897 case CCISS_BIG_PASSTHRU32: 4898 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 4899 4900 default: 4901 return -ENOIOCTLCMD; 4902 } 4903 } 4904 #endif 4905 4906 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 4907 { 4908 struct hpsa_pci_info pciinfo; 4909 4910 if (!argp) 4911 return -EINVAL; 4912 pciinfo.domain = pci_domain_nr(h->pdev->bus); 4913 pciinfo.bus = h->pdev->bus->number; 4914 pciinfo.dev_fn = h->pdev->devfn; 4915 pciinfo.board_id = h->board_id; 4916 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 4917 return -EFAULT; 4918 return 0; 4919 } 4920 4921 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 4922 { 4923 DriverVer_type DriverVer; 4924 unsigned char vmaj, vmin, vsubmin; 4925 int rc; 4926 4927 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 4928 &vmaj, &vmin, &vsubmin); 4929 if (rc != 3) { 4930 dev_info(&h->pdev->dev, "driver version string '%s' " 4931 "unrecognized.", HPSA_DRIVER_VERSION); 4932 vmaj = 0; 4933 vmin = 0; 4934 vsubmin = 0; 4935 } 4936 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 4937 if (!argp) 4938 return -EINVAL; 4939 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 4940 return -EFAULT; 4941 return 0; 4942 } 4943 4944 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 4945 { 4946 IOCTL_Command_struct iocommand; 4947 struct CommandList *c; 4948 char *buff = NULL; 4949 union u64bit temp64; 4950 int rc = 0; 4951 4952 if (!argp) 4953 return -EINVAL; 4954 if (!capable(CAP_SYS_RAWIO)) 4955 return -EPERM; 4956 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 4957 return -EFAULT; 4958 if ((iocommand.buf_size < 1) && 4959 (iocommand.Request.Type.Direction != XFER_NONE)) { 4960 return -EINVAL; 4961 } 4962 if (iocommand.buf_size > 0) { 4963 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 4964 if (buff == NULL) 4965 return -EFAULT; 4966 if (iocommand.Request.Type.Direction & XFER_WRITE) { 4967 /* Copy the data into the buffer we created */ 4968 if (copy_from_user(buff, iocommand.buf, 4969 iocommand.buf_size)) { 4970 rc = -EFAULT; 4971 goto out_kfree; 4972 } 4973 } else { 4974 memset(buff, 0, iocommand.buf_size); 4975 } 4976 } 4977 c = cmd_special_alloc(h); 4978 if (c == NULL) { 4979 rc = -ENOMEM; 4980 goto out_kfree; 4981 } 4982 /* Fill in the command type */ 4983 c->cmd_type = CMD_IOCTL_PEND; 4984 /* Fill in Command Header */ 4985 c->Header.ReplyQueue = 0; /* unused in simple mode */ 4986 if (iocommand.buf_size > 0) { /* buffer to fill */ 4987 c->Header.SGList = 1; 4988 c->Header.SGTotal = 1; 4989 } else { /* no buffers to fill */ 4990 c->Header.SGList = 0; 4991 c->Header.SGTotal = 0; 4992 } 4993 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 4994 /* use the kernel address the cmd block for tag */ 4995 c->Header.Tag.lower = c->busaddr; 4996 4997 /* Fill in Request block */ 4998 memcpy(&c->Request, &iocommand.Request, 4999 sizeof(c->Request)); 5000 5001 /* Fill in the scatter gather information */ 5002 if (iocommand.buf_size > 0) { 5003 temp64.val = pci_map_single(h->pdev, buff, 5004 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 5005 if (dma_mapping_error(&h->pdev->dev, temp64.val)) { 5006 c->SG[0].Addr.lower = 0; 5007 c->SG[0].Addr.upper = 0; 5008 c->SG[0].Len = 0; 5009 rc = -ENOMEM; 5010 goto out; 5011 } 5012 c->SG[0].Addr.lower = temp64.val32.lower; 5013 c->SG[0].Addr.upper = temp64.val32.upper; 5014 c->SG[0].Len = iocommand.buf_size; 5015 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/ 5016 } 5017 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 5018 if (iocommand.buf_size > 0) 5019 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 5020 check_ioctl_unit_attention(h, c); 5021 5022 /* Copy the error information out */ 5023 memcpy(&iocommand.error_info, c->err_info, 5024 sizeof(iocommand.error_info)); 5025 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 5026 rc = -EFAULT; 5027 goto out; 5028 } 5029 if ((iocommand.Request.Type.Direction & XFER_READ) && 5030 iocommand.buf_size > 0) { 5031 /* Copy the data out of the buffer we created */ 5032 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 5033 rc = -EFAULT; 5034 goto out; 5035 } 5036 } 5037 out: 5038 cmd_special_free(h, c); 5039 out_kfree: 5040 kfree(buff); 5041 return rc; 5042 } 5043 5044 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 5045 { 5046 BIG_IOCTL_Command_struct *ioc; 5047 struct CommandList *c; 5048 unsigned char **buff = NULL; 5049 int *buff_size = NULL; 5050 union u64bit temp64; 5051 BYTE sg_used = 0; 5052 int status = 0; 5053 int i; 5054 u32 left; 5055 u32 sz; 5056 BYTE __user *data_ptr; 5057 5058 if (!argp) 5059 return -EINVAL; 5060 if (!capable(CAP_SYS_RAWIO)) 5061 return -EPERM; 5062 ioc = (BIG_IOCTL_Command_struct *) 5063 kmalloc(sizeof(*ioc), GFP_KERNEL); 5064 if (!ioc) { 5065 status = -ENOMEM; 5066 goto cleanup1; 5067 } 5068 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 5069 status = -EFAULT; 5070 goto cleanup1; 5071 } 5072 if ((ioc->buf_size < 1) && 5073 (ioc->Request.Type.Direction != XFER_NONE)) { 5074 status = -EINVAL; 5075 goto cleanup1; 5076 } 5077 /* Check kmalloc limits using all SGs */ 5078 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 5079 status = -EINVAL; 5080 goto cleanup1; 5081 } 5082 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { 5083 status = -EINVAL; 5084 goto cleanup1; 5085 } 5086 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL); 5087 if (!buff) { 5088 status = -ENOMEM; 5089 goto cleanup1; 5090 } 5091 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL); 5092 if (!buff_size) { 5093 status = -ENOMEM; 5094 goto cleanup1; 5095 } 5096 left = ioc->buf_size; 5097 data_ptr = ioc->buf; 5098 while (left) { 5099 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 5100 buff_size[sg_used] = sz; 5101 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 5102 if (buff[sg_used] == NULL) { 5103 status = -ENOMEM; 5104 goto cleanup1; 5105 } 5106 if (ioc->Request.Type.Direction & XFER_WRITE) { 5107 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 5108 status = -ENOMEM; 5109 goto cleanup1; 5110 } 5111 } else 5112 memset(buff[sg_used], 0, sz); 5113 left -= sz; 5114 data_ptr += sz; 5115 sg_used++; 5116 } 5117 c = cmd_special_alloc(h); 5118 if (c == NULL) { 5119 status = -ENOMEM; 5120 goto cleanup1; 5121 } 5122 c->cmd_type = CMD_IOCTL_PEND; 5123 c->Header.ReplyQueue = 0; 5124 c->Header.SGList = c->Header.SGTotal = sg_used; 5125 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 5126 c->Header.Tag.lower = c->busaddr; 5127 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 5128 if (ioc->buf_size > 0) { 5129 int i; 5130 for (i = 0; i < sg_used; i++) { 5131 temp64.val = pci_map_single(h->pdev, buff[i], 5132 buff_size[i], PCI_DMA_BIDIRECTIONAL); 5133 if (dma_mapping_error(&h->pdev->dev, temp64.val)) { 5134 c->SG[i].Addr.lower = 0; 5135 c->SG[i].Addr.upper = 0; 5136 c->SG[i].Len = 0; 5137 hpsa_pci_unmap(h->pdev, c, i, 5138 PCI_DMA_BIDIRECTIONAL); 5139 status = -ENOMEM; 5140 goto cleanup0; 5141 } 5142 c->SG[i].Addr.lower = temp64.val32.lower; 5143 c->SG[i].Addr.upper = temp64.val32.upper; 5144 c->SG[i].Len = buff_size[i]; 5145 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST; 5146 } 5147 } 5148 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 5149 if (sg_used) 5150 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 5151 check_ioctl_unit_attention(h, c); 5152 /* Copy the error information out */ 5153 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 5154 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 5155 status = -EFAULT; 5156 goto cleanup0; 5157 } 5158 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { 5159 /* Copy the data out of the buffer we created */ 5160 BYTE __user *ptr = ioc->buf; 5161 for (i = 0; i < sg_used; i++) { 5162 if (copy_to_user(ptr, buff[i], buff_size[i])) { 5163 status = -EFAULT; 5164 goto cleanup0; 5165 } 5166 ptr += buff_size[i]; 5167 } 5168 } 5169 status = 0; 5170 cleanup0: 5171 cmd_special_free(h, c); 5172 cleanup1: 5173 if (buff) { 5174 for (i = 0; i < sg_used; i++) 5175 kfree(buff[i]); 5176 kfree(buff); 5177 } 5178 kfree(buff_size); 5179 kfree(ioc); 5180 return status; 5181 } 5182 5183 static void check_ioctl_unit_attention(struct ctlr_info *h, 5184 struct CommandList *c) 5185 { 5186 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 5187 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 5188 (void) check_for_unit_attention(h, c); 5189 } 5190 5191 static int increment_passthru_count(struct ctlr_info *h) 5192 { 5193 unsigned long flags; 5194 5195 spin_lock_irqsave(&h->passthru_count_lock, flags); 5196 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { 5197 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5198 return -1; 5199 } 5200 h->passthru_count++; 5201 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5202 return 0; 5203 } 5204 5205 static void decrement_passthru_count(struct ctlr_info *h) 5206 { 5207 unsigned long flags; 5208 5209 spin_lock_irqsave(&h->passthru_count_lock, flags); 5210 if (h->passthru_count <= 0) { 5211 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5212 /* not expecting to get here. */ 5213 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); 5214 return; 5215 } 5216 h->passthru_count--; 5217 spin_unlock_irqrestore(&h->passthru_count_lock, flags); 5218 } 5219 5220 /* 5221 * ioctl 5222 */ 5223 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) 5224 { 5225 struct ctlr_info *h; 5226 void __user *argp = (void __user *)arg; 5227 int rc; 5228 5229 h = sdev_to_hba(dev); 5230 5231 switch (cmd) { 5232 case CCISS_DEREGDISK: 5233 case CCISS_REGNEWDISK: 5234 case CCISS_REGNEWD: 5235 hpsa_scan_start(h->scsi_host); 5236 return 0; 5237 case CCISS_GETPCIINFO: 5238 return hpsa_getpciinfo_ioctl(h, argp); 5239 case CCISS_GETDRIVVER: 5240 return hpsa_getdrivver_ioctl(h, argp); 5241 case CCISS_PASSTHRU: 5242 if (increment_passthru_count(h)) 5243 return -EAGAIN; 5244 rc = hpsa_passthru_ioctl(h, argp); 5245 decrement_passthru_count(h); 5246 return rc; 5247 case CCISS_BIG_PASSTHRU: 5248 if (increment_passthru_count(h)) 5249 return -EAGAIN; 5250 rc = hpsa_big_passthru_ioctl(h, argp); 5251 decrement_passthru_count(h); 5252 return rc; 5253 default: 5254 return -ENOTTY; 5255 } 5256 } 5257 5258 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, 5259 u8 reset_type) 5260 { 5261 struct CommandList *c; 5262 5263 c = cmd_alloc(h); 5264 if (!c) 5265 return -ENOMEM; 5266 /* fill_cmd can't fail here, no data buffer to map */ 5267 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 5268 RAID_CTLR_LUNID, TYPE_MSG); 5269 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 5270 c->waiting = NULL; 5271 enqueue_cmd_and_start_io(h, c); 5272 /* Don't wait for completion, the reset won't complete. Don't free 5273 * the command either. This is the last command we will send before 5274 * re-initializing everything, so it doesn't matter and won't leak. 5275 */ 5276 return 0; 5277 } 5278 5279 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 5280 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 5281 int cmd_type) 5282 { 5283 int pci_dir = XFER_NONE; 5284 struct CommandList *a; /* for commands to be aborted */ 5285 5286 c->cmd_type = CMD_IOCTL_PEND; 5287 c->Header.ReplyQueue = 0; 5288 if (buff != NULL && size > 0) { 5289 c->Header.SGList = 1; 5290 c->Header.SGTotal = 1; 5291 } else { 5292 c->Header.SGList = 0; 5293 c->Header.SGTotal = 0; 5294 } 5295 c->Header.Tag.lower = c->busaddr; 5296 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 5297 5298 c->Request.Type.Type = cmd_type; 5299 if (cmd_type == TYPE_CMD) { 5300 switch (cmd) { 5301 case HPSA_INQUIRY: 5302 /* are we trying to read a vital product page */ 5303 if (page_code & VPD_PAGE) { 5304 c->Request.CDB[1] = 0x01; 5305 c->Request.CDB[2] = (page_code & 0xff); 5306 } 5307 c->Request.CDBLen = 6; 5308 c->Request.Type.Attribute = ATTR_SIMPLE; 5309 c->Request.Type.Direction = XFER_READ; 5310 c->Request.Timeout = 0; 5311 c->Request.CDB[0] = HPSA_INQUIRY; 5312 c->Request.CDB[4] = size & 0xFF; 5313 break; 5314 case HPSA_REPORT_LOG: 5315 case HPSA_REPORT_PHYS: 5316 /* Talking to controller so It's a physical command 5317 mode = 00 target = 0. Nothing to write. 5318 */ 5319 c->Request.CDBLen = 12; 5320 c->Request.Type.Attribute = ATTR_SIMPLE; 5321 c->Request.Type.Direction = XFER_READ; 5322 c->Request.Timeout = 0; 5323 c->Request.CDB[0] = cmd; 5324 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5325 c->Request.CDB[7] = (size >> 16) & 0xFF; 5326 c->Request.CDB[8] = (size >> 8) & 0xFF; 5327 c->Request.CDB[9] = size & 0xFF; 5328 break; 5329 case HPSA_CACHE_FLUSH: 5330 c->Request.CDBLen = 12; 5331 c->Request.Type.Attribute = ATTR_SIMPLE; 5332 c->Request.Type.Direction = XFER_WRITE; 5333 c->Request.Timeout = 0; 5334 c->Request.CDB[0] = BMIC_WRITE; 5335 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 5336 c->Request.CDB[7] = (size >> 8) & 0xFF; 5337 c->Request.CDB[8] = size & 0xFF; 5338 break; 5339 case TEST_UNIT_READY: 5340 c->Request.CDBLen = 6; 5341 c->Request.Type.Attribute = ATTR_SIMPLE; 5342 c->Request.Type.Direction = XFER_NONE; 5343 c->Request.Timeout = 0; 5344 break; 5345 case HPSA_GET_RAID_MAP: 5346 c->Request.CDBLen = 12; 5347 c->Request.Type.Attribute = ATTR_SIMPLE; 5348 c->Request.Type.Direction = XFER_READ; 5349 c->Request.Timeout = 0; 5350 c->Request.CDB[0] = HPSA_CISS_READ; 5351 c->Request.CDB[1] = cmd; 5352 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 5353 c->Request.CDB[7] = (size >> 16) & 0xFF; 5354 c->Request.CDB[8] = (size >> 8) & 0xFF; 5355 c->Request.CDB[9] = size & 0xFF; 5356 break; 5357 case BMIC_SENSE_CONTROLLER_PARAMETERS: 5358 c->Request.CDBLen = 10; 5359 c->Request.Type.Attribute = ATTR_SIMPLE; 5360 c->Request.Type.Direction = XFER_READ; 5361 c->Request.Timeout = 0; 5362 c->Request.CDB[0] = BMIC_READ; 5363 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; 5364 c->Request.CDB[7] = (size >> 16) & 0xFF; 5365 c->Request.CDB[8] = (size >> 8) & 0xFF; 5366 break; 5367 default: 5368 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 5369 BUG(); 5370 return -1; 5371 } 5372 } else if (cmd_type == TYPE_MSG) { 5373 switch (cmd) { 5374 5375 case HPSA_DEVICE_RESET_MSG: 5376 c->Request.CDBLen = 16; 5377 c->Request.Type.Type = 1; /* It is a MSG not a CMD */ 5378 c->Request.Type.Attribute = ATTR_SIMPLE; 5379 c->Request.Type.Direction = XFER_NONE; 5380 c->Request.Timeout = 0; /* Don't time out */ 5381 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 5382 c->Request.CDB[0] = cmd; 5383 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; 5384 /* If bytes 4-7 are zero, it means reset the */ 5385 /* LunID device */ 5386 c->Request.CDB[4] = 0x00; 5387 c->Request.CDB[5] = 0x00; 5388 c->Request.CDB[6] = 0x00; 5389 c->Request.CDB[7] = 0x00; 5390 break; 5391 case HPSA_ABORT_MSG: 5392 a = buff; /* point to command to be aborted */ 5393 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n", 5394 a->Header.Tag.upper, a->Header.Tag.lower, 5395 c->Header.Tag.upper, c->Header.Tag.lower); 5396 c->Request.CDBLen = 16; 5397 c->Request.Type.Type = TYPE_MSG; 5398 c->Request.Type.Attribute = ATTR_SIMPLE; 5399 c->Request.Type.Direction = XFER_WRITE; 5400 c->Request.Timeout = 0; /* Don't time out */ 5401 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; 5402 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; 5403 c->Request.CDB[2] = 0x00; /* reserved */ 5404 c->Request.CDB[3] = 0x00; /* reserved */ 5405 /* Tag to abort goes in CDB[4]-CDB[11] */ 5406 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF; 5407 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF; 5408 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF; 5409 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF; 5410 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF; 5411 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF; 5412 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF; 5413 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF; 5414 c->Request.CDB[12] = 0x00; /* reserved */ 5415 c->Request.CDB[13] = 0x00; /* reserved */ 5416 c->Request.CDB[14] = 0x00; /* reserved */ 5417 c->Request.CDB[15] = 0x00; /* reserved */ 5418 break; 5419 default: 5420 dev_warn(&h->pdev->dev, "unknown message type %d\n", 5421 cmd); 5422 BUG(); 5423 } 5424 } else { 5425 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 5426 BUG(); 5427 } 5428 5429 switch (c->Request.Type.Direction) { 5430 case XFER_READ: 5431 pci_dir = PCI_DMA_FROMDEVICE; 5432 break; 5433 case XFER_WRITE: 5434 pci_dir = PCI_DMA_TODEVICE; 5435 break; 5436 case XFER_NONE: 5437 pci_dir = PCI_DMA_NONE; 5438 break; 5439 default: 5440 pci_dir = PCI_DMA_BIDIRECTIONAL; 5441 } 5442 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) 5443 return -1; 5444 return 0; 5445 } 5446 5447 /* 5448 * Map (physical) PCI mem into (virtual) kernel space 5449 */ 5450 static void __iomem *remap_pci_mem(ulong base, ulong size) 5451 { 5452 ulong page_base = ((ulong) base) & PAGE_MASK; 5453 ulong page_offs = ((ulong) base) - page_base; 5454 void __iomem *page_remapped = ioremap_nocache(page_base, 5455 page_offs + size); 5456 5457 return page_remapped ? (page_remapped + page_offs) : NULL; 5458 } 5459 5460 /* Takes cmds off the submission queue and sends them to the hardware, 5461 * then puts them on the queue of cmds waiting for completion. 5462 */ 5463 static void start_io(struct ctlr_info *h) 5464 { 5465 struct CommandList *c; 5466 unsigned long flags; 5467 5468 spin_lock_irqsave(&h->lock, flags); 5469 while (!list_empty(&h->reqQ)) { 5470 c = list_entry(h->reqQ.next, struct CommandList, list); 5471 /* can't do anything if fifo is full */ 5472 if ((h->access.fifo_full(h))) { 5473 h->fifo_recently_full = 1; 5474 dev_warn(&h->pdev->dev, "fifo full\n"); 5475 break; 5476 } 5477 h->fifo_recently_full = 0; 5478 5479 /* Get the first entry from the Request Q */ 5480 removeQ(c); 5481 h->Qdepth--; 5482 5483 /* Put job onto the completed Q */ 5484 addQ(&h->cmpQ, c); 5485 5486 /* Must increment commands_outstanding before unlocking 5487 * and submitting to avoid race checking for fifo full 5488 * condition. 5489 */ 5490 h->commands_outstanding++; 5491 5492 /* Tell the controller execute command */ 5493 spin_unlock_irqrestore(&h->lock, flags); 5494 h->access.submit_command(h, c); 5495 spin_lock_irqsave(&h->lock, flags); 5496 } 5497 spin_unlock_irqrestore(&h->lock, flags); 5498 } 5499 5500 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) 5501 { 5502 return h->access.command_completed(h, q); 5503 } 5504 5505 static inline bool interrupt_pending(struct ctlr_info *h) 5506 { 5507 return h->access.intr_pending(h); 5508 } 5509 5510 static inline long interrupt_not_for_us(struct ctlr_info *h) 5511 { 5512 return (h->access.intr_pending(h) == 0) || 5513 (h->interrupts_enabled == 0); 5514 } 5515 5516 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 5517 u32 raw_tag) 5518 { 5519 if (unlikely(tag_index >= h->nr_cmds)) { 5520 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 5521 return 1; 5522 } 5523 return 0; 5524 } 5525 5526 static inline void finish_cmd(struct CommandList *c) 5527 { 5528 unsigned long flags; 5529 int io_may_be_stalled = 0; 5530 struct ctlr_info *h = c->h; 5531 5532 spin_lock_irqsave(&h->lock, flags); 5533 removeQ(c); 5534 5535 /* 5536 * Check for possibly stalled i/o. 5537 * 5538 * If a fifo_full condition is encountered, requests will back up 5539 * in h->reqQ. This queue is only emptied out by start_io which is 5540 * only called when a new i/o request comes in. If no i/o's are 5541 * forthcoming, the i/o's in h->reqQ can get stuck. So we call 5542 * start_io from here if we detect such a danger. 5543 * 5544 * Normally, we shouldn't hit this case, but pounding on the 5545 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if 5546 * commands_outstanding is low. We want to avoid calling 5547 * start_io from in here as much as possible, and esp. don't 5548 * want to get in a cycle where we call start_io every time 5549 * through here. 5550 */ 5551 if (unlikely(h->fifo_recently_full) && 5552 h->commands_outstanding < 5) 5553 io_may_be_stalled = 1; 5554 5555 spin_unlock_irqrestore(&h->lock, flags); 5556 5557 dial_up_lockup_detection_on_fw_flash_complete(c->h, c); 5558 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI 5559 || c->cmd_type == CMD_IOACCEL2)) 5560 complete_scsi_command(c); 5561 else if (c->cmd_type == CMD_IOCTL_PEND) 5562 complete(c->waiting); 5563 if (unlikely(io_may_be_stalled)) 5564 start_io(h); 5565 } 5566 5567 static inline u32 hpsa_tag_contains_index(u32 tag) 5568 { 5569 return tag & DIRECT_LOOKUP_BIT; 5570 } 5571 5572 static inline u32 hpsa_tag_to_index(u32 tag) 5573 { 5574 return tag >> DIRECT_LOOKUP_SHIFT; 5575 } 5576 5577 5578 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) 5579 { 5580 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 5581 #define HPSA_SIMPLE_ERROR_BITS 0x03 5582 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 5583 return tag & ~HPSA_SIMPLE_ERROR_BITS; 5584 return tag & ~HPSA_PERF_ERROR_BITS; 5585 } 5586 5587 /* process completion of an indexed ("direct lookup") command */ 5588 static inline void process_indexed_cmd(struct ctlr_info *h, 5589 u32 raw_tag) 5590 { 5591 u32 tag_index; 5592 struct CommandList *c; 5593 5594 tag_index = hpsa_tag_to_index(raw_tag); 5595 if (!bad_tag(h, tag_index, raw_tag)) { 5596 c = h->cmd_pool + tag_index; 5597 finish_cmd(c); 5598 } 5599 } 5600 5601 /* process completion of a non-indexed command */ 5602 static inline void process_nonindexed_cmd(struct ctlr_info *h, 5603 u32 raw_tag) 5604 { 5605 u32 tag; 5606 struct CommandList *c = NULL; 5607 unsigned long flags; 5608 5609 tag = hpsa_tag_discard_error_bits(h, raw_tag); 5610 spin_lock_irqsave(&h->lock, flags); 5611 list_for_each_entry(c, &h->cmpQ, list) { 5612 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 5613 spin_unlock_irqrestore(&h->lock, flags); 5614 finish_cmd(c); 5615 return; 5616 } 5617 } 5618 spin_unlock_irqrestore(&h->lock, flags); 5619 bad_tag(h, h->nr_cmds + 1, raw_tag); 5620 } 5621 5622 /* Some controllers, like p400, will give us one interrupt 5623 * after a soft reset, even if we turned interrupts off. 5624 * Only need to check for this in the hpsa_xxx_discard_completions 5625 * functions. 5626 */ 5627 static int ignore_bogus_interrupt(struct ctlr_info *h) 5628 { 5629 if (likely(!reset_devices)) 5630 return 0; 5631 5632 if (likely(h->interrupts_enabled)) 5633 return 0; 5634 5635 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 5636 "(known firmware bug.) Ignoring.\n"); 5637 5638 return 1; 5639 } 5640 5641 /* 5642 * Convert &h->q[x] (passed to interrupt handlers) back to h. 5643 * Relies on (h-q[x] == x) being true for x such that 5644 * 0 <= x < MAX_REPLY_QUEUES. 5645 */ 5646 static struct ctlr_info *queue_to_hba(u8 *queue) 5647 { 5648 return container_of((queue - *queue), struct ctlr_info, q[0]); 5649 } 5650 5651 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) 5652 { 5653 struct ctlr_info *h = queue_to_hba(queue); 5654 u8 q = *(u8 *) queue; 5655 u32 raw_tag; 5656 5657 if (ignore_bogus_interrupt(h)) 5658 return IRQ_NONE; 5659 5660 if (interrupt_not_for_us(h)) 5661 return IRQ_NONE; 5662 h->last_intr_timestamp = get_jiffies_64(); 5663 while (interrupt_pending(h)) { 5664 raw_tag = get_next_completion(h, q); 5665 while (raw_tag != FIFO_EMPTY) 5666 raw_tag = next_command(h, q); 5667 } 5668 return IRQ_HANDLED; 5669 } 5670 5671 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) 5672 { 5673 struct ctlr_info *h = queue_to_hba(queue); 5674 u32 raw_tag; 5675 u8 q = *(u8 *) queue; 5676 5677 if (ignore_bogus_interrupt(h)) 5678 return IRQ_NONE; 5679 5680 h->last_intr_timestamp = get_jiffies_64(); 5681 raw_tag = get_next_completion(h, q); 5682 while (raw_tag != FIFO_EMPTY) 5683 raw_tag = next_command(h, q); 5684 return IRQ_HANDLED; 5685 } 5686 5687 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) 5688 { 5689 struct ctlr_info *h = queue_to_hba((u8 *) queue); 5690 u32 raw_tag; 5691 u8 q = *(u8 *) queue; 5692 5693 if (interrupt_not_for_us(h)) 5694 return IRQ_NONE; 5695 h->last_intr_timestamp = get_jiffies_64(); 5696 while (interrupt_pending(h)) { 5697 raw_tag = get_next_completion(h, q); 5698 while (raw_tag != FIFO_EMPTY) { 5699 if (likely(hpsa_tag_contains_index(raw_tag))) 5700 process_indexed_cmd(h, raw_tag); 5701 else 5702 process_nonindexed_cmd(h, raw_tag); 5703 raw_tag = next_command(h, q); 5704 } 5705 } 5706 return IRQ_HANDLED; 5707 } 5708 5709 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) 5710 { 5711 struct ctlr_info *h = queue_to_hba(queue); 5712 u32 raw_tag; 5713 u8 q = *(u8 *) queue; 5714 5715 h->last_intr_timestamp = get_jiffies_64(); 5716 raw_tag = get_next_completion(h, q); 5717 while (raw_tag != FIFO_EMPTY) { 5718 if (likely(hpsa_tag_contains_index(raw_tag))) 5719 process_indexed_cmd(h, raw_tag); 5720 else 5721 process_nonindexed_cmd(h, raw_tag); 5722 raw_tag = next_command(h, q); 5723 } 5724 return IRQ_HANDLED; 5725 } 5726 5727 /* Send a message CDB to the firmware. Careful, this only works 5728 * in simple mode, not performant mode due to the tag lookup. 5729 * We only ever use this immediately after a controller reset. 5730 */ 5731 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 5732 unsigned char type) 5733 { 5734 struct Command { 5735 struct CommandListHeader CommandHeader; 5736 struct RequestBlock Request; 5737 struct ErrDescriptor ErrorDescriptor; 5738 }; 5739 struct Command *cmd; 5740 static const size_t cmd_sz = sizeof(*cmd) + 5741 sizeof(cmd->ErrorDescriptor); 5742 dma_addr_t paddr64; 5743 uint32_t paddr32, tag; 5744 void __iomem *vaddr; 5745 int i, err; 5746 5747 vaddr = pci_ioremap_bar(pdev, 0); 5748 if (vaddr == NULL) 5749 return -ENOMEM; 5750 5751 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 5752 * CCISS commands, so they must be allocated from the lower 4GiB of 5753 * memory. 5754 */ 5755 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5756 if (err) { 5757 iounmap(vaddr); 5758 return -ENOMEM; 5759 } 5760 5761 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 5762 if (cmd == NULL) { 5763 iounmap(vaddr); 5764 return -ENOMEM; 5765 } 5766 5767 /* This must fit, because of the 32-bit consistent DMA mask. Also, 5768 * although there's no guarantee, we assume that the address is at 5769 * least 4-byte aligned (most likely, it's page-aligned). 5770 */ 5771 paddr32 = paddr64; 5772 5773 cmd->CommandHeader.ReplyQueue = 0; 5774 cmd->CommandHeader.SGList = 0; 5775 cmd->CommandHeader.SGTotal = 0; 5776 cmd->CommandHeader.Tag.lower = paddr32; 5777 cmd->CommandHeader.Tag.upper = 0; 5778 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 5779 5780 cmd->Request.CDBLen = 16; 5781 cmd->Request.Type.Type = TYPE_MSG; 5782 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 5783 cmd->Request.Type.Direction = XFER_NONE; 5784 cmd->Request.Timeout = 0; /* Don't time out */ 5785 cmd->Request.CDB[0] = opcode; 5786 cmd->Request.CDB[1] = type; 5787 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 5788 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); 5789 cmd->ErrorDescriptor.Addr.upper = 0; 5790 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); 5791 5792 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 5793 5794 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 5795 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 5796 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) 5797 break; 5798 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 5799 } 5800 5801 iounmap(vaddr); 5802 5803 /* we leak the DMA buffer here ... no choice since the controller could 5804 * still complete the command. 5805 */ 5806 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 5807 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 5808 opcode, type); 5809 return -ETIMEDOUT; 5810 } 5811 5812 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 5813 5814 if (tag & HPSA_ERROR_BIT) { 5815 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 5816 opcode, type); 5817 return -EIO; 5818 } 5819 5820 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 5821 opcode, type); 5822 return 0; 5823 } 5824 5825 #define hpsa_noop(p) hpsa_message(p, 3, 0) 5826 5827 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 5828 void * __iomem vaddr, u32 use_doorbell) 5829 { 5830 u16 pmcsr; 5831 int pos; 5832 5833 if (use_doorbell) { 5834 /* For everything after the P600, the PCI power state method 5835 * of resetting the controller doesn't work, so we have this 5836 * other way using the doorbell register. 5837 */ 5838 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 5839 writel(use_doorbell, vaddr + SA5_DOORBELL); 5840 5841 /* PMC hardware guys tell us we need a 10 second delay after 5842 * doorbell reset and before any attempt to talk to the board 5843 * at all to ensure that this actually works and doesn't fall 5844 * over in some weird corner cases. 5845 */ 5846 msleep(10000); 5847 } else { /* Try to do it the PCI power state way */ 5848 5849 /* Quoting from the Open CISS Specification: "The Power 5850 * Management Control/Status Register (CSR) controls the power 5851 * state of the device. The normal operating state is D0, 5852 * CSR=00h. The software off state is D3, CSR=03h. To reset 5853 * the controller, place the interface device in D3 then to D0, 5854 * this causes a secondary PCI reset which will reset the 5855 * controller." */ 5856 5857 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 5858 if (pos == 0) { 5859 dev_err(&pdev->dev, 5860 "hpsa_reset_controller: " 5861 "PCI PM not supported\n"); 5862 return -ENODEV; 5863 } 5864 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 5865 /* enter the D3hot power management state */ 5866 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 5867 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 5868 pmcsr |= PCI_D3hot; 5869 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 5870 5871 msleep(500); 5872 5873 /* enter the D0 power management state */ 5874 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 5875 pmcsr |= PCI_D0; 5876 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 5877 5878 /* 5879 * The P600 requires a small delay when changing states. 5880 * Otherwise we may think the board did not reset and we bail. 5881 * This for kdump only and is particular to the P600. 5882 */ 5883 msleep(500); 5884 } 5885 return 0; 5886 } 5887 5888 static void init_driver_version(char *driver_version, int len) 5889 { 5890 memset(driver_version, 0, len); 5891 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); 5892 } 5893 5894 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) 5895 { 5896 char *driver_version; 5897 int i, size = sizeof(cfgtable->driver_version); 5898 5899 driver_version = kmalloc(size, GFP_KERNEL); 5900 if (!driver_version) 5901 return -ENOMEM; 5902 5903 init_driver_version(driver_version, size); 5904 for (i = 0; i < size; i++) 5905 writeb(driver_version[i], &cfgtable->driver_version[i]); 5906 kfree(driver_version); 5907 return 0; 5908 } 5909 5910 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, 5911 unsigned char *driver_ver) 5912 { 5913 int i; 5914 5915 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 5916 driver_ver[i] = readb(&cfgtable->driver_version[i]); 5917 } 5918 5919 static int controller_reset_failed(struct CfgTable __iomem *cfgtable) 5920 { 5921 5922 char *driver_ver, *old_driver_ver; 5923 int rc, size = sizeof(cfgtable->driver_version); 5924 5925 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 5926 if (!old_driver_ver) 5927 return -ENOMEM; 5928 driver_ver = old_driver_ver + size; 5929 5930 /* After a reset, the 32 bytes of "driver version" in the cfgtable 5931 * should have been changed, otherwise we know the reset failed. 5932 */ 5933 init_driver_version(old_driver_ver, size); 5934 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 5935 rc = !memcmp(driver_ver, old_driver_ver, size); 5936 kfree(old_driver_ver); 5937 return rc; 5938 } 5939 /* This does a hard reset of the controller using PCI power management 5940 * states or the using the doorbell register. 5941 */ 5942 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 5943 { 5944 u64 cfg_offset; 5945 u32 cfg_base_addr; 5946 u64 cfg_base_addr_index; 5947 void __iomem *vaddr; 5948 unsigned long paddr; 5949 u32 misc_fw_support; 5950 int rc; 5951 struct CfgTable __iomem *cfgtable; 5952 u32 use_doorbell; 5953 u32 board_id; 5954 u16 command_register; 5955 5956 /* For controllers as old as the P600, this is very nearly 5957 * the same thing as 5958 * 5959 * pci_save_state(pci_dev); 5960 * pci_set_power_state(pci_dev, PCI_D3hot); 5961 * pci_set_power_state(pci_dev, PCI_D0); 5962 * pci_restore_state(pci_dev); 5963 * 5964 * For controllers newer than the P600, the pci power state 5965 * method of resetting doesn't work so we have another way 5966 * using the doorbell register. 5967 */ 5968 5969 rc = hpsa_lookup_board_id(pdev, &board_id); 5970 if (rc < 0 || !ctlr_is_resettable(board_id)) { 5971 dev_warn(&pdev->dev, "Not resetting device.\n"); 5972 return -ENODEV; 5973 } 5974 5975 /* if controller is soft- but not hard resettable... */ 5976 if (!ctlr_is_hard_resettable(board_id)) 5977 return -ENOTSUPP; /* try soft reset later. */ 5978 5979 /* Save the PCI command register */ 5980 pci_read_config_word(pdev, 4, &command_register); 5981 /* Turn the board off. This is so that later pci_restore_state() 5982 * won't turn the board on before the rest of config space is ready. 5983 */ 5984 pci_disable_device(pdev); 5985 pci_save_state(pdev); 5986 5987 /* find the first memory BAR, so we can find the cfg table */ 5988 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 5989 if (rc) 5990 return rc; 5991 vaddr = remap_pci_mem(paddr, 0x250); 5992 if (!vaddr) 5993 return -ENOMEM; 5994 5995 /* find cfgtable in order to check if reset via doorbell is supported */ 5996 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 5997 &cfg_base_addr_index, &cfg_offset); 5998 if (rc) 5999 goto unmap_vaddr; 6000 cfgtable = remap_pci_mem(pci_resource_start(pdev, 6001 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 6002 if (!cfgtable) { 6003 rc = -ENOMEM; 6004 goto unmap_vaddr; 6005 } 6006 rc = write_driver_ver_to_cfgtable(cfgtable); 6007 if (rc) 6008 goto unmap_vaddr; 6009 6010 /* If reset via doorbell register is supported, use that. 6011 * There are two such methods. Favor the newest method. 6012 */ 6013 misc_fw_support = readl(&cfgtable->misc_fw_support); 6014 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 6015 if (use_doorbell) { 6016 use_doorbell = DOORBELL_CTLR_RESET2; 6017 } else { 6018 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 6019 if (use_doorbell) { 6020 dev_warn(&pdev->dev, "Soft reset not supported. " 6021 "Firmware update is required.\n"); 6022 rc = -ENOTSUPP; /* try soft reset */ 6023 goto unmap_cfgtable; 6024 } 6025 } 6026 6027 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 6028 if (rc) 6029 goto unmap_cfgtable; 6030 6031 pci_restore_state(pdev); 6032 rc = pci_enable_device(pdev); 6033 if (rc) { 6034 dev_warn(&pdev->dev, "failed to enable device.\n"); 6035 goto unmap_cfgtable; 6036 } 6037 pci_write_config_word(pdev, 4, command_register); 6038 6039 /* Some devices (notably the HP Smart Array 5i Controller) 6040 need a little pause here */ 6041 msleep(HPSA_POST_RESET_PAUSE_MSECS); 6042 6043 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 6044 if (rc) { 6045 dev_warn(&pdev->dev, 6046 "failed waiting for board to become ready " 6047 "after hard reset\n"); 6048 goto unmap_cfgtable; 6049 } 6050 6051 rc = controller_reset_failed(vaddr); 6052 if (rc < 0) 6053 goto unmap_cfgtable; 6054 if (rc) { 6055 dev_warn(&pdev->dev, "Unable to successfully reset " 6056 "controller. Will try soft reset.\n"); 6057 rc = -ENOTSUPP; 6058 } else { 6059 dev_info(&pdev->dev, "board ready after hard reset.\n"); 6060 } 6061 6062 unmap_cfgtable: 6063 iounmap(cfgtable); 6064 6065 unmap_vaddr: 6066 iounmap(vaddr); 6067 return rc; 6068 } 6069 6070 /* 6071 * We cannot read the structure directly, for portability we must use 6072 * the io functions. 6073 * This is for debug only. 6074 */ 6075 static void print_cfg_table(struct device *dev, struct CfgTable *tb) 6076 { 6077 #ifdef HPSA_DEBUG 6078 int i; 6079 char temp_name[17]; 6080 6081 dev_info(dev, "Controller Configuration information\n"); 6082 dev_info(dev, "------------------------------------\n"); 6083 for (i = 0; i < 4; i++) 6084 temp_name[i] = readb(&(tb->Signature[i])); 6085 temp_name[4] = '\0'; 6086 dev_info(dev, " Signature = %s\n", temp_name); 6087 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 6088 dev_info(dev, " Transport methods supported = 0x%x\n", 6089 readl(&(tb->TransportSupport))); 6090 dev_info(dev, " Transport methods active = 0x%x\n", 6091 readl(&(tb->TransportActive))); 6092 dev_info(dev, " Requested transport Method = 0x%x\n", 6093 readl(&(tb->HostWrite.TransportRequest))); 6094 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 6095 readl(&(tb->HostWrite.CoalIntDelay))); 6096 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 6097 readl(&(tb->HostWrite.CoalIntCount))); 6098 dev_info(dev, " Max outstanding commands = 0x%d\n", 6099 readl(&(tb->CmdsOutMax))); 6100 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 6101 for (i = 0; i < 16; i++) 6102 temp_name[i] = readb(&(tb->ServerName[i])); 6103 temp_name[16] = '\0'; 6104 dev_info(dev, " Server Name = %s\n", temp_name); 6105 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 6106 readl(&(tb->HeartBeat))); 6107 #endif /* HPSA_DEBUG */ 6108 } 6109 6110 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 6111 { 6112 int i, offset, mem_type, bar_type; 6113 6114 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 6115 return 0; 6116 offset = 0; 6117 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 6118 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 6119 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 6120 offset += 4; 6121 else { 6122 mem_type = pci_resource_flags(pdev, i) & 6123 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 6124 switch (mem_type) { 6125 case PCI_BASE_ADDRESS_MEM_TYPE_32: 6126 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 6127 offset += 4; /* 32 bit */ 6128 break; 6129 case PCI_BASE_ADDRESS_MEM_TYPE_64: 6130 offset += 8; 6131 break; 6132 default: /* reserved in PCI 2.2 */ 6133 dev_warn(&pdev->dev, 6134 "base address is invalid\n"); 6135 return -1; 6136 break; 6137 } 6138 } 6139 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 6140 return i + 1; 6141 } 6142 return -1; 6143 } 6144 6145 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 6146 * controllers that are capable. If not, we use IO-APIC mode. 6147 */ 6148 6149 static void hpsa_interrupt_mode(struct ctlr_info *h) 6150 { 6151 #ifdef CONFIG_PCI_MSI 6152 int err, i; 6153 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES]; 6154 6155 for (i = 0; i < MAX_REPLY_QUEUES; i++) { 6156 hpsa_msix_entries[i].vector = 0; 6157 hpsa_msix_entries[i].entry = i; 6158 } 6159 6160 /* Some boards advertise MSI but don't really support it */ 6161 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 6162 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 6163 goto default_int_mode; 6164 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 6165 dev_info(&h->pdev->dev, "MSIX\n"); 6166 h->msix_vector = MAX_REPLY_QUEUES; 6167 if (h->msix_vector > num_online_cpus()) 6168 h->msix_vector = num_online_cpus(); 6169 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 6170 h->msix_vector); 6171 if (err > 0) { 6172 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 6173 "available\n", err); 6174 h->msix_vector = err; 6175 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 6176 h->msix_vector); 6177 } 6178 if (!err) { 6179 for (i = 0; i < h->msix_vector; i++) 6180 h->intr[i] = hpsa_msix_entries[i].vector; 6181 return; 6182 } else { 6183 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", 6184 err); 6185 h->msix_vector = 0; 6186 goto default_int_mode; 6187 } 6188 } 6189 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 6190 dev_info(&h->pdev->dev, "MSI\n"); 6191 if (!pci_enable_msi(h->pdev)) 6192 h->msi_vector = 1; 6193 else 6194 dev_warn(&h->pdev->dev, "MSI init failed\n"); 6195 } 6196 default_int_mode: 6197 #endif /* CONFIG_PCI_MSI */ 6198 /* if we get here we're going to use the default interrupt mode */ 6199 h->intr[h->intr_mode] = h->pdev->irq; 6200 } 6201 6202 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 6203 { 6204 int i; 6205 u32 subsystem_vendor_id, subsystem_device_id; 6206 6207 subsystem_vendor_id = pdev->subsystem_vendor; 6208 subsystem_device_id = pdev->subsystem_device; 6209 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 6210 subsystem_vendor_id; 6211 6212 for (i = 0; i < ARRAY_SIZE(products); i++) 6213 if (*board_id == products[i].board_id) 6214 return i; 6215 6216 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 6217 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 6218 !hpsa_allow_any) { 6219 dev_warn(&pdev->dev, "unrecognized board ID: " 6220 "0x%08x, ignoring.\n", *board_id); 6221 return -ENODEV; 6222 } 6223 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 6224 } 6225 6226 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 6227 unsigned long *memory_bar) 6228 { 6229 int i; 6230 6231 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 6232 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 6233 /* addressing mode bits already removed */ 6234 *memory_bar = pci_resource_start(pdev, i); 6235 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 6236 *memory_bar); 6237 return 0; 6238 } 6239 dev_warn(&pdev->dev, "no memory BAR found\n"); 6240 return -ENODEV; 6241 } 6242 6243 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, 6244 int wait_for_ready) 6245 { 6246 int i, iterations; 6247 u32 scratchpad; 6248 if (wait_for_ready) 6249 iterations = HPSA_BOARD_READY_ITERATIONS; 6250 else 6251 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 6252 6253 for (i = 0; i < iterations; i++) { 6254 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 6255 if (wait_for_ready) { 6256 if (scratchpad == HPSA_FIRMWARE_READY) 6257 return 0; 6258 } else { 6259 if (scratchpad != HPSA_FIRMWARE_READY) 6260 return 0; 6261 } 6262 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 6263 } 6264 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 6265 return -ENODEV; 6266 } 6267 6268 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 6269 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 6270 u64 *cfg_offset) 6271 { 6272 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 6273 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 6274 *cfg_base_addr &= (u32) 0x0000ffff; 6275 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 6276 if (*cfg_base_addr_index == -1) { 6277 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 6278 return -ENODEV; 6279 } 6280 return 0; 6281 } 6282 6283 static int hpsa_find_cfgtables(struct ctlr_info *h) 6284 { 6285 u64 cfg_offset; 6286 u32 cfg_base_addr; 6287 u64 cfg_base_addr_index; 6288 u32 trans_offset; 6289 int rc; 6290 6291 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 6292 &cfg_base_addr_index, &cfg_offset); 6293 if (rc) 6294 return rc; 6295 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 6296 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 6297 if (!h->cfgtable) 6298 return -ENOMEM; 6299 rc = write_driver_ver_to_cfgtable(h->cfgtable); 6300 if (rc) 6301 return rc; 6302 /* Find performant mode table. */ 6303 trans_offset = readl(&h->cfgtable->TransMethodOffset); 6304 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 6305 cfg_base_addr_index)+cfg_offset+trans_offset, 6306 sizeof(*h->transtable)); 6307 if (!h->transtable) 6308 return -ENOMEM; 6309 return 0; 6310 } 6311 6312 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 6313 { 6314 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 6315 6316 /* Limit commands in memory limited kdump scenario. */ 6317 if (reset_devices && h->max_commands > 32) 6318 h->max_commands = 32; 6319 6320 if (h->max_commands < 16) { 6321 dev_warn(&h->pdev->dev, "Controller reports " 6322 "max supported commands of %d, an obvious lie. " 6323 "Using 16. Ensure that firmware is up to date.\n", 6324 h->max_commands); 6325 h->max_commands = 16; 6326 } 6327 } 6328 6329 /* Interrogate the hardware for some limits: 6330 * max commands, max SG elements without chaining, and with chaining, 6331 * SG chain block size, etc. 6332 */ 6333 static void hpsa_find_board_params(struct ctlr_info *h) 6334 { 6335 hpsa_get_max_perf_mode_cmds(h); 6336 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 6337 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 6338 h->fw_support = readl(&(h->cfgtable->misc_fw_support)); 6339 /* 6340 * Limit in-command s/g elements to 32 save dma'able memory. 6341 * Howvever spec says if 0, use 31 6342 */ 6343 h->max_cmd_sg_entries = 31; 6344 if (h->maxsgentries > 512) { 6345 h->max_cmd_sg_entries = 32; 6346 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 6347 h->maxsgentries--; /* save one for chain pointer */ 6348 } else { 6349 h->maxsgentries = 31; /* default to traditional values */ 6350 h->chainsize = 0; 6351 } 6352 6353 /* Find out what task management functions are supported and cache */ 6354 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); 6355 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) 6356 dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); 6357 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 6358 dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); 6359 } 6360 6361 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 6362 { 6363 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { 6364 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 6365 return false; 6366 } 6367 return true; 6368 } 6369 6370 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) 6371 { 6372 u32 driver_support; 6373 6374 #ifdef CONFIG_X86 6375 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 6376 driver_support = readl(&(h->cfgtable->driver_support)); 6377 driver_support |= ENABLE_SCSI_PREFETCH; 6378 #endif 6379 driver_support |= ENABLE_UNIT_ATTN; 6380 writel(driver_support, &(h->cfgtable->driver_support)); 6381 } 6382 6383 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 6384 * in a prefetch beyond physical memory. 6385 */ 6386 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 6387 { 6388 u32 dma_prefetch; 6389 6390 if (h->board_id != 0x3225103C) 6391 return; 6392 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 6393 dma_prefetch |= 0x8000; 6394 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 6395 } 6396 6397 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) 6398 { 6399 int i; 6400 u32 doorbell_value; 6401 unsigned long flags; 6402 /* wait until the clear_event_notify bit 6 is cleared by controller. */ 6403 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 6404 spin_lock_irqsave(&h->lock, flags); 6405 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 6406 spin_unlock_irqrestore(&h->lock, flags); 6407 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) 6408 break; 6409 /* delay and try again */ 6410 msleep(20); 6411 } 6412 } 6413 6414 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 6415 { 6416 int i; 6417 u32 doorbell_value; 6418 unsigned long flags; 6419 6420 /* under certain very rare conditions, this can take awhile. 6421 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 6422 * as we enter this code.) 6423 */ 6424 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 6425 spin_lock_irqsave(&h->lock, flags); 6426 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 6427 spin_unlock_irqrestore(&h->lock, flags); 6428 if (!(doorbell_value & CFGTBL_ChangeReq)) 6429 break; 6430 /* delay and try again */ 6431 usleep_range(10000, 20000); 6432 } 6433 } 6434 6435 static int hpsa_enter_simple_mode(struct ctlr_info *h) 6436 { 6437 u32 trans_support; 6438 6439 trans_support = readl(&(h->cfgtable->TransportSupport)); 6440 if (!(trans_support & SIMPLE_MODE)) 6441 return -ENOTSUPP; 6442 6443 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 6444 6445 /* Update the field, and then ring the doorbell */ 6446 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 6447 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 6448 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6449 hpsa_wait_for_mode_change_ack(h); 6450 print_cfg_table(&h->pdev->dev, h->cfgtable); 6451 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) 6452 goto error; 6453 h->transMethod = CFGTBL_Trans_Simple; 6454 return 0; 6455 error: 6456 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); 6457 return -ENODEV; 6458 } 6459 6460 static int hpsa_pci_init(struct ctlr_info *h) 6461 { 6462 int prod_index, err; 6463 6464 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 6465 if (prod_index < 0) 6466 return -ENODEV; 6467 h->product_name = products[prod_index].product_name; 6468 h->access = *(products[prod_index].access); 6469 6470 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 6471 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 6472 6473 err = pci_enable_device(h->pdev); 6474 if (err) { 6475 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 6476 return err; 6477 } 6478 6479 /* Enable bus mastering (pci_disable_device may disable this) */ 6480 pci_set_master(h->pdev); 6481 6482 err = pci_request_regions(h->pdev, HPSA); 6483 if (err) { 6484 dev_err(&h->pdev->dev, 6485 "cannot obtain PCI resources, aborting\n"); 6486 return err; 6487 } 6488 hpsa_interrupt_mode(h); 6489 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 6490 if (err) 6491 goto err_out_free_res; 6492 h->vaddr = remap_pci_mem(h->paddr, 0x250); 6493 if (!h->vaddr) { 6494 err = -ENOMEM; 6495 goto err_out_free_res; 6496 } 6497 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 6498 if (err) 6499 goto err_out_free_res; 6500 err = hpsa_find_cfgtables(h); 6501 if (err) 6502 goto err_out_free_res; 6503 hpsa_find_board_params(h); 6504 6505 if (!hpsa_CISS_signature_present(h)) { 6506 err = -ENODEV; 6507 goto err_out_free_res; 6508 } 6509 hpsa_set_driver_support_bits(h); 6510 hpsa_p600_dma_prefetch_quirk(h); 6511 err = hpsa_enter_simple_mode(h); 6512 if (err) 6513 goto err_out_free_res; 6514 return 0; 6515 6516 err_out_free_res: 6517 if (h->transtable) 6518 iounmap(h->transtable); 6519 if (h->cfgtable) 6520 iounmap(h->cfgtable); 6521 if (h->vaddr) 6522 iounmap(h->vaddr); 6523 pci_disable_device(h->pdev); 6524 pci_release_regions(h->pdev); 6525 return err; 6526 } 6527 6528 static void hpsa_hba_inquiry(struct ctlr_info *h) 6529 { 6530 int rc; 6531 6532 #define HBA_INQUIRY_BYTE_COUNT 64 6533 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 6534 if (!h->hba_inquiry_data) 6535 return; 6536 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 6537 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 6538 if (rc != 0) { 6539 kfree(h->hba_inquiry_data); 6540 h->hba_inquiry_data = NULL; 6541 } 6542 } 6543 6544 static int hpsa_init_reset_devices(struct pci_dev *pdev) 6545 { 6546 int rc, i; 6547 6548 if (!reset_devices) 6549 return 0; 6550 6551 /* Reset the controller with a PCI power-cycle or via doorbell */ 6552 rc = hpsa_kdump_hard_reset_controller(pdev); 6553 6554 /* -ENOTSUPP here means we cannot reset the controller 6555 * but it's already (and still) up and running in 6556 * "performant mode". Or, it might be 640x, which can't reset 6557 * due to concerns about shared bbwc between 6402/6404 pair. 6558 */ 6559 if (rc == -ENOTSUPP) 6560 return rc; /* just try to do the kdump anyhow. */ 6561 if (rc) 6562 return -ENODEV; 6563 6564 /* Now try to get the controller to respond to a no-op */ 6565 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); 6566 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 6567 if (hpsa_noop(pdev) == 0) 6568 break; 6569 else 6570 dev_warn(&pdev->dev, "no-op failed%s\n", 6571 (i < 11 ? "; re-trying" : "")); 6572 } 6573 return 0; 6574 } 6575 6576 static int hpsa_allocate_cmd_pool(struct ctlr_info *h) 6577 { 6578 h->cmd_pool_bits = kzalloc( 6579 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 6580 sizeof(unsigned long), GFP_KERNEL); 6581 h->cmd_pool = pci_alloc_consistent(h->pdev, 6582 h->nr_cmds * sizeof(*h->cmd_pool), 6583 &(h->cmd_pool_dhandle)); 6584 h->errinfo_pool = pci_alloc_consistent(h->pdev, 6585 h->nr_cmds * sizeof(*h->errinfo_pool), 6586 &(h->errinfo_pool_dhandle)); 6587 if ((h->cmd_pool_bits == NULL) 6588 || (h->cmd_pool == NULL) 6589 || (h->errinfo_pool == NULL)) { 6590 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 6591 return -ENOMEM; 6592 } 6593 return 0; 6594 } 6595 6596 static void hpsa_free_cmd_pool(struct ctlr_info *h) 6597 { 6598 kfree(h->cmd_pool_bits); 6599 if (h->cmd_pool) 6600 pci_free_consistent(h->pdev, 6601 h->nr_cmds * sizeof(struct CommandList), 6602 h->cmd_pool, h->cmd_pool_dhandle); 6603 if (h->ioaccel2_cmd_pool) 6604 pci_free_consistent(h->pdev, 6605 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 6606 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 6607 if (h->errinfo_pool) 6608 pci_free_consistent(h->pdev, 6609 h->nr_cmds * sizeof(struct ErrorInfo), 6610 h->errinfo_pool, 6611 h->errinfo_pool_dhandle); 6612 if (h->ioaccel_cmd_pool) 6613 pci_free_consistent(h->pdev, 6614 h->nr_cmds * sizeof(struct io_accel1_cmd), 6615 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 6616 } 6617 6618 static int hpsa_request_irq(struct ctlr_info *h, 6619 irqreturn_t (*msixhandler)(int, void *), 6620 irqreturn_t (*intxhandler)(int, void *)) 6621 { 6622 int rc, i; 6623 6624 /* 6625 * initialize h->q[x] = x so that interrupt handlers know which 6626 * queue to process. 6627 */ 6628 for (i = 0; i < MAX_REPLY_QUEUES; i++) 6629 h->q[i] = (u8) i; 6630 6631 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { 6632 /* If performant mode and MSI-X, use multiple reply queues */ 6633 for (i = 0; i < h->msix_vector; i++) 6634 rc = request_irq(h->intr[i], msixhandler, 6635 0, h->devname, 6636 &h->q[i]); 6637 } else { 6638 /* Use single reply pool */ 6639 if (h->msix_vector > 0 || h->msi_vector) { 6640 rc = request_irq(h->intr[h->intr_mode], 6641 msixhandler, 0, h->devname, 6642 &h->q[h->intr_mode]); 6643 } else { 6644 rc = request_irq(h->intr[h->intr_mode], 6645 intxhandler, IRQF_SHARED, h->devname, 6646 &h->q[h->intr_mode]); 6647 } 6648 } 6649 if (rc) { 6650 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 6651 h->intr[h->intr_mode], h->devname); 6652 return -ENODEV; 6653 } 6654 return 0; 6655 } 6656 6657 static int hpsa_kdump_soft_reset(struct ctlr_info *h) 6658 { 6659 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, 6660 HPSA_RESET_TYPE_CONTROLLER)) { 6661 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 6662 return -EIO; 6663 } 6664 6665 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 6666 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 6667 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 6668 return -1; 6669 } 6670 6671 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 6672 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 6673 dev_warn(&h->pdev->dev, "Board failed to become ready " 6674 "after soft reset.\n"); 6675 return -1; 6676 } 6677 6678 return 0; 6679 } 6680 6681 static void free_irqs(struct ctlr_info *h) 6682 { 6683 int i; 6684 6685 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { 6686 /* Single reply queue, only one irq to free */ 6687 i = h->intr_mode; 6688 free_irq(h->intr[i], &h->q[i]); 6689 return; 6690 } 6691 6692 for (i = 0; i < h->msix_vector; i++) 6693 free_irq(h->intr[i], &h->q[i]); 6694 } 6695 6696 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) 6697 { 6698 free_irqs(h); 6699 #ifdef CONFIG_PCI_MSI 6700 if (h->msix_vector) { 6701 if (h->pdev->msix_enabled) 6702 pci_disable_msix(h->pdev); 6703 } else if (h->msi_vector) { 6704 if (h->pdev->msi_enabled) 6705 pci_disable_msi(h->pdev); 6706 } 6707 #endif /* CONFIG_PCI_MSI */ 6708 } 6709 6710 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 6711 { 6712 hpsa_free_irqs_and_disable_msix(h); 6713 hpsa_free_sg_chain_blocks(h); 6714 hpsa_free_cmd_pool(h); 6715 kfree(h->ioaccel1_blockFetchTable); 6716 kfree(h->blockFetchTable); 6717 pci_free_consistent(h->pdev, h->reply_pool_size, 6718 h->reply_pool, h->reply_pool_dhandle); 6719 if (h->vaddr) 6720 iounmap(h->vaddr); 6721 if (h->transtable) 6722 iounmap(h->transtable); 6723 if (h->cfgtable) 6724 iounmap(h->cfgtable); 6725 pci_release_regions(h->pdev); 6726 kfree(h); 6727 } 6728 6729 /* Called when controller lockup detected. */ 6730 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) 6731 { 6732 struct CommandList *c = NULL; 6733 6734 assert_spin_locked(&h->lock); 6735 /* Mark all outstanding commands as failed and complete them. */ 6736 while (!list_empty(list)) { 6737 c = list_entry(list->next, struct CommandList, list); 6738 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 6739 finish_cmd(c); 6740 } 6741 } 6742 6743 static void controller_lockup_detected(struct ctlr_info *h) 6744 { 6745 unsigned long flags; 6746 6747 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6748 spin_lock_irqsave(&h->lock, flags); 6749 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6750 spin_unlock_irqrestore(&h->lock, flags); 6751 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 6752 h->lockup_detected); 6753 pci_disable_device(h->pdev); 6754 spin_lock_irqsave(&h->lock, flags); 6755 fail_all_cmds_on_list(h, &h->cmpQ); 6756 fail_all_cmds_on_list(h, &h->reqQ); 6757 spin_unlock_irqrestore(&h->lock, flags); 6758 } 6759 6760 static void detect_controller_lockup(struct ctlr_info *h) 6761 { 6762 u64 now; 6763 u32 heartbeat; 6764 unsigned long flags; 6765 6766 now = get_jiffies_64(); 6767 /* If we've received an interrupt recently, we're ok. */ 6768 if (time_after64(h->last_intr_timestamp + 6769 (h->heartbeat_sample_interval), now)) 6770 return; 6771 6772 /* 6773 * If we've already checked the heartbeat recently, we're ok. 6774 * This could happen if someone sends us a signal. We 6775 * otherwise don't care about signals in this thread. 6776 */ 6777 if (time_after64(h->last_heartbeat_timestamp + 6778 (h->heartbeat_sample_interval), now)) 6779 return; 6780 6781 /* If heartbeat has not changed since we last looked, we're not ok. */ 6782 spin_lock_irqsave(&h->lock, flags); 6783 heartbeat = readl(&h->cfgtable->HeartBeat); 6784 spin_unlock_irqrestore(&h->lock, flags); 6785 if (h->last_heartbeat == heartbeat) { 6786 controller_lockup_detected(h); 6787 return; 6788 } 6789 6790 /* We're ok. */ 6791 h->last_heartbeat = heartbeat; 6792 h->last_heartbeat_timestamp = now; 6793 } 6794 6795 static void hpsa_ack_ctlr_events(struct ctlr_info *h) 6796 { 6797 int i; 6798 char *event_type; 6799 6800 /* Clear the driver-requested rescan flag */ 6801 h->drv_req_rescan = 0; 6802 6803 /* Ask the controller to clear the events we're handling. */ 6804 if ((h->transMethod & (CFGTBL_Trans_io_accel1 6805 | CFGTBL_Trans_io_accel2)) && 6806 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || 6807 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { 6808 6809 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) 6810 event_type = "state change"; 6811 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) 6812 event_type = "configuration change"; 6813 /* Stop sending new RAID offload reqs via the IO accelerator */ 6814 scsi_block_requests(h->scsi_host); 6815 for (i = 0; i < h->ndevices; i++) 6816 h->dev[i]->offload_enabled = 0; 6817 hpsa_drain_accel_commands(h); 6818 /* Set 'accelerator path config change' bit */ 6819 dev_warn(&h->pdev->dev, 6820 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", 6821 h->events, event_type); 6822 writel(h->events, &(h->cfgtable->clear_event_notify)); 6823 /* Set the "clear event notify field update" bit 6 */ 6824 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6825 /* Wait until ctlr clears 'clear event notify field', bit 6 */ 6826 hpsa_wait_for_clear_event_notify_ack(h); 6827 scsi_unblock_requests(h->scsi_host); 6828 } else { 6829 /* Acknowledge controller notification events. */ 6830 writel(h->events, &(h->cfgtable->clear_event_notify)); 6831 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 6832 hpsa_wait_for_clear_event_notify_ack(h); 6833 #if 0 6834 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 6835 hpsa_wait_for_mode_change_ack(h); 6836 #endif 6837 } 6838 return; 6839 } 6840 6841 /* Check a register on the controller to see if there are configuration 6842 * changes (added/changed/removed logical drives, etc.) which mean that 6843 * we should rescan the controller for devices. 6844 * Also check flag for driver-initiated rescan. 6845 */ 6846 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) 6847 { 6848 if (h->drv_req_rescan) 6849 return 1; 6850 6851 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 6852 return 0; 6853 6854 h->events = readl(&(h->cfgtable->event_notify)); 6855 return h->events & RESCAN_REQUIRED_EVENT_BITS; 6856 } 6857 6858 /* 6859 * Check if any of the offline devices have become ready 6860 */ 6861 static int hpsa_offline_devices_ready(struct ctlr_info *h) 6862 { 6863 unsigned long flags; 6864 struct offline_device_entry *d; 6865 struct list_head *this, *tmp; 6866 6867 spin_lock_irqsave(&h->offline_device_lock, flags); 6868 list_for_each_safe(this, tmp, &h->offline_device_list) { 6869 d = list_entry(this, struct offline_device_entry, 6870 offline_list); 6871 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6872 if (!hpsa_volume_offline(h, d->scsi3addr)) 6873 return 1; 6874 spin_lock_irqsave(&h->offline_device_lock, flags); 6875 } 6876 spin_unlock_irqrestore(&h->offline_device_lock, flags); 6877 return 0; 6878 } 6879 6880 6881 static void hpsa_monitor_ctlr_worker(struct work_struct *work) 6882 { 6883 unsigned long flags; 6884 struct ctlr_info *h = container_of(to_delayed_work(work), 6885 struct ctlr_info, monitor_ctlr_work); 6886 detect_controller_lockup(h); 6887 if (h->lockup_detected) 6888 return; 6889 6890 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 6891 scsi_host_get(h->scsi_host); 6892 h->drv_req_rescan = 0; 6893 hpsa_ack_ctlr_events(h); 6894 hpsa_scan_start(h->scsi_host); 6895 scsi_host_put(h->scsi_host); 6896 } 6897 6898 spin_lock_irqsave(&h->lock, flags); 6899 if (h->remove_in_progress) { 6900 spin_unlock_irqrestore(&h->lock, flags); 6901 return; 6902 } 6903 schedule_delayed_work(&h->monitor_ctlr_work, 6904 h->heartbeat_sample_interval); 6905 spin_unlock_irqrestore(&h->lock, flags); 6906 } 6907 6908 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6909 { 6910 int dac, rc; 6911 struct ctlr_info *h; 6912 int try_soft_reset = 0; 6913 unsigned long flags; 6914 6915 if (number_of_controllers == 0) 6916 printk(KERN_INFO DRIVER_NAME "\n"); 6917 6918 rc = hpsa_init_reset_devices(pdev); 6919 if (rc) { 6920 if (rc != -ENOTSUPP) 6921 return rc; 6922 /* If the reset fails in a particular way (it has no way to do 6923 * a proper hard reset, so returns -ENOTSUPP) we can try to do 6924 * a soft reset once we get the controller configured up to the 6925 * point that it can accept a command. 6926 */ 6927 try_soft_reset = 1; 6928 rc = 0; 6929 } 6930 6931 reinit_after_soft_reset: 6932 6933 /* Command structures must be aligned on a 32-byte boundary because 6934 * the 5 lower bits of the address are used by the hardware. and by 6935 * the driver. See comments in hpsa.h for more info. 6936 */ 6937 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 6938 h = kzalloc(sizeof(*h), GFP_KERNEL); 6939 if (!h) 6940 return -ENOMEM; 6941 6942 h->pdev = pdev; 6943 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 6944 INIT_LIST_HEAD(&h->cmpQ); 6945 INIT_LIST_HEAD(&h->reqQ); 6946 INIT_LIST_HEAD(&h->offline_device_list); 6947 spin_lock_init(&h->lock); 6948 spin_lock_init(&h->offline_device_lock); 6949 spin_lock_init(&h->scan_lock); 6950 spin_lock_init(&h->passthru_count_lock); 6951 rc = hpsa_pci_init(h); 6952 if (rc != 0) 6953 goto clean1; 6954 6955 sprintf(h->devname, HPSA "%d", number_of_controllers); 6956 h->ctlr = number_of_controllers; 6957 number_of_controllers++; 6958 6959 /* configure PCI DMA stuff */ 6960 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 6961 if (rc == 0) { 6962 dac = 1; 6963 } else { 6964 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6965 if (rc == 0) { 6966 dac = 0; 6967 } else { 6968 dev_err(&pdev->dev, "no suitable DMA available\n"); 6969 goto clean1; 6970 } 6971 } 6972 6973 /* make sure the board interrupts are off */ 6974 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6975 6976 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 6977 goto clean2; 6978 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 6979 h->devname, pdev->device, 6980 h->intr[h->intr_mode], dac ? "" : " not"); 6981 if (hpsa_allocate_cmd_pool(h)) 6982 goto clean4; 6983 if (hpsa_allocate_sg_chain_blocks(h)) 6984 goto clean4; 6985 init_waitqueue_head(&h->scan_wait_queue); 6986 h->scan_finished = 1; /* no scan currently in progress */ 6987 6988 pci_set_drvdata(pdev, h); 6989 h->ndevices = 0; 6990 h->hba_mode_enabled = 0; 6991 h->scsi_host = NULL; 6992 spin_lock_init(&h->devlock); 6993 hpsa_put_ctlr_into_performant_mode(h); 6994 6995 /* At this point, the controller is ready to take commands. 6996 * Now, if reset_devices and the hard reset didn't work, try 6997 * the soft reset and see if that works. 6998 */ 6999 if (try_soft_reset) { 7000 7001 /* This is kind of gross. We may or may not get a completion 7002 * from the soft reset command, and if we do, then the value 7003 * from the fifo may or may not be valid. So, we wait 10 secs 7004 * after the reset throwing away any completions we get during 7005 * that time. Unregister the interrupt handler and register 7006 * fake ones to scoop up any residual completions. 7007 */ 7008 spin_lock_irqsave(&h->lock, flags); 7009 h->access.set_intr_mask(h, HPSA_INTR_OFF); 7010 spin_unlock_irqrestore(&h->lock, flags); 7011 free_irqs(h); 7012 rc = hpsa_request_irq(h, hpsa_msix_discard_completions, 7013 hpsa_intx_discard_completions); 7014 if (rc) { 7015 dev_warn(&h->pdev->dev, "Failed to request_irq after " 7016 "soft reset.\n"); 7017 goto clean4; 7018 } 7019 7020 rc = hpsa_kdump_soft_reset(h); 7021 if (rc) 7022 /* Neither hard nor soft reset worked, we're hosed. */ 7023 goto clean4; 7024 7025 dev_info(&h->pdev->dev, "Board READY.\n"); 7026 dev_info(&h->pdev->dev, 7027 "Waiting for stale completions to drain.\n"); 7028 h->access.set_intr_mask(h, HPSA_INTR_ON); 7029 msleep(10000); 7030 h->access.set_intr_mask(h, HPSA_INTR_OFF); 7031 7032 rc = controller_reset_failed(h->cfgtable); 7033 if (rc) 7034 dev_info(&h->pdev->dev, 7035 "Soft reset appears to have failed.\n"); 7036 7037 /* since the controller's reset, we have to go back and re-init 7038 * everything. Easiest to just forget what we've done and do it 7039 * all over again. 7040 */ 7041 hpsa_undo_allocations_after_kdump_soft_reset(h); 7042 try_soft_reset = 0; 7043 if (rc) 7044 /* don't go to clean4, we already unallocated */ 7045 return -ENODEV; 7046 7047 goto reinit_after_soft_reset; 7048 } 7049 7050 /* Enable Accelerated IO path at driver layer */ 7051 h->acciopath_status = 1; 7052 7053 h->drv_req_rescan = 0; 7054 7055 /* Turn the interrupts on so we can service requests */ 7056 h->access.set_intr_mask(h, HPSA_INTR_ON); 7057 7058 hpsa_hba_inquiry(h); 7059 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 7060 7061 /* Monitor the controller for firmware lockups */ 7062 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 7063 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); 7064 schedule_delayed_work(&h->monitor_ctlr_work, 7065 h->heartbeat_sample_interval); 7066 return 0; 7067 7068 clean4: 7069 hpsa_free_sg_chain_blocks(h); 7070 hpsa_free_cmd_pool(h); 7071 free_irqs(h); 7072 clean2: 7073 clean1: 7074 kfree(h); 7075 return rc; 7076 } 7077 7078 static void hpsa_flush_cache(struct ctlr_info *h) 7079 { 7080 char *flush_buf; 7081 struct CommandList *c; 7082 unsigned long flags; 7083 7084 /* Don't bother trying to flush the cache if locked up */ 7085 spin_lock_irqsave(&h->lock, flags); 7086 if (unlikely(h->lockup_detected)) { 7087 spin_unlock_irqrestore(&h->lock, flags); 7088 return; 7089 } 7090 spin_unlock_irqrestore(&h->lock, flags); 7091 7092 flush_buf = kzalloc(4, GFP_KERNEL); 7093 if (!flush_buf) 7094 return; 7095 7096 c = cmd_special_alloc(h); 7097 if (!c) { 7098 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 7099 goto out_of_memory; 7100 } 7101 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 7102 RAID_CTLR_LUNID, TYPE_CMD)) { 7103 goto out; 7104 } 7105 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 7106 if (c->err_info->CommandStatus != 0) 7107 out: 7108 dev_warn(&h->pdev->dev, 7109 "error flushing cache on controller\n"); 7110 cmd_special_free(h, c); 7111 out_of_memory: 7112 kfree(flush_buf); 7113 } 7114 7115 static void hpsa_shutdown(struct pci_dev *pdev) 7116 { 7117 struct ctlr_info *h; 7118 7119 h = pci_get_drvdata(pdev); 7120 /* Turn board interrupts off and send the flush cache command 7121 * sendcmd will turn off interrupt, and send the flush... 7122 * To write all data in the battery backed cache to disks 7123 */ 7124 hpsa_flush_cache(h); 7125 h->access.set_intr_mask(h, HPSA_INTR_OFF); 7126 hpsa_free_irqs_and_disable_msix(h); 7127 } 7128 7129 static void hpsa_free_device_info(struct ctlr_info *h) 7130 { 7131 int i; 7132 7133 for (i = 0; i < h->ndevices; i++) 7134 kfree(h->dev[i]); 7135 } 7136 7137 static void hpsa_remove_one(struct pci_dev *pdev) 7138 { 7139 struct ctlr_info *h; 7140 unsigned long flags; 7141 7142 if (pci_get_drvdata(pdev) == NULL) { 7143 dev_err(&pdev->dev, "unable to remove device\n"); 7144 return; 7145 } 7146 h = pci_get_drvdata(pdev); 7147 7148 /* Get rid of any controller monitoring work items */ 7149 spin_lock_irqsave(&h->lock, flags); 7150 h->remove_in_progress = 1; 7151 cancel_delayed_work(&h->monitor_ctlr_work); 7152 spin_unlock_irqrestore(&h->lock, flags); 7153 7154 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 7155 hpsa_shutdown(pdev); 7156 iounmap(h->vaddr); 7157 iounmap(h->transtable); 7158 iounmap(h->cfgtable); 7159 hpsa_free_device_info(h); 7160 hpsa_free_sg_chain_blocks(h); 7161 pci_free_consistent(h->pdev, 7162 h->nr_cmds * sizeof(struct CommandList), 7163 h->cmd_pool, h->cmd_pool_dhandle); 7164 pci_free_consistent(h->pdev, 7165 h->nr_cmds * sizeof(struct ErrorInfo), 7166 h->errinfo_pool, h->errinfo_pool_dhandle); 7167 pci_free_consistent(h->pdev, h->reply_pool_size, 7168 h->reply_pool, h->reply_pool_dhandle); 7169 kfree(h->cmd_pool_bits); 7170 kfree(h->blockFetchTable); 7171 kfree(h->ioaccel1_blockFetchTable); 7172 kfree(h->ioaccel2_blockFetchTable); 7173 kfree(h->hba_inquiry_data); 7174 pci_disable_device(pdev); 7175 pci_release_regions(pdev); 7176 kfree(h); 7177 } 7178 7179 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 7180 __attribute__((unused)) pm_message_t state) 7181 { 7182 return -ENOSYS; 7183 } 7184 7185 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 7186 { 7187 return -ENOSYS; 7188 } 7189 7190 static struct pci_driver hpsa_pci_driver = { 7191 .name = HPSA, 7192 .probe = hpsa_init_one, 7193 .remove = hpsa_remove_one, 7194 .id_table = hpsa_pci_device_id, /* id_table */ 7195 .shutdown = hpsa_shutdown, 7196 .suspend = hpsa_suspend, 7197 .resume = hpsa_resume, 7198 }; 7199 7200 /* Fill in bucket_map[], given nsgs (the max number of 7201 * scatter gather elements supported) and bucket[], 7202 * which is an array of 8 integers. The bucket[] array 7203 * contains 8 different DMA transfer sizes (in 16 7204 * byte increments) which the controller uses to fetch 7205 * commands. This function fills in bucket_map[], which 7206 * maps a given number of scatter gather elements to one of 7207 * the 8 DMA transfer sizes. The point of it is to allow the 7208 * controller to only do as much DMA as needed to fetch the 7209 * command, with the DMA transfer size encoded in the lower 7210 * bits of the command address. 7211 */ 7212 static void calc_bucket_map(int bucket[], int num_buckets, 7213 int nsgs, int min_blocks, int *bucket_map) 7214 { 7215 int i, j, b, size; 7216 7217 /* Note, bucket_map must have nsgs+1 entries. */ 7218 for (i = 0; i <= nsgs; i++) { 7219 /* Compute size of a command with i SG entries */ 7220 size = i + min_blocks; 7221 b = num_buckets; /* Assume the biggest bucket */ 7222 /* Find the bucket that is just big enough */ 7223 for (j = 0; j < num_buckets; j++) { 7224 if (bucket[j] >= size) { 7225 b = j; 7226 break; 7227 } 7228 } 7229 /* for a command with i SG entries, use bucket b. */ 7230 bucket_map[i] = b; 7231 } 7232 } 7233 7234 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) 7235 { 7236 int i; 7237 unsigned long register_value; 7238 unsigned long transMethod = CFGTBL_Trans_Performant | 7239 (trans_support & CFGTBL_Trans_use_short_tags) | 7240 CFGTBL_Trans_enable_directed_msix | 7241 (trans_support & (CFGTBL_Trans_io_accel1 | 7242 CFGTBL_Trans_io_accel2)); 7243 struct access_method access = SA5_performant_access; 7244 7245 /* This is a bit complicated. There are 8 registers on 7246 * the controller which we write to to tell it 8 different 7247 * sizes of commands which there may be. It's a way of 7248 * reducing the DMA done to fetch each command. Encoded into 7249 * each command's tag are 3 bits which communicate to the controller 7250 * which of the eight sizes that command fits within. The size of 7251 * each command depends on how many scatter gather entries there are. 7252 * Each SG entry requires 16 bytes. The eight registers are programmed 7253 * with the number of 16-byte blocks a command of that size requires. 7254 * The smallest command possible requires 5 such 16 byte blocks. 7255 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte 7256 * blocks. Note, this only extends to the SG entries contained 7257 * within the command block, and does not extend to chained blocks 7258 * of SG elements. bft[] contains the eight values we write to 7259 * the registers. They are not evenly distributed, but have more 7260 * sizes for small commands, and fewer sizes for larger commands. 7261 */ 7262 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; 7263 #define MIN_IOACCEL2_BFT_ENTRY 5 7264 #define HPSA_IOACCEL2_HEADER_SZ 4 7265 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, 7266 13, 14, 15, 16, 17, 18, 19, 7267 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; 7268 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); 7269 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); 7270 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > 7271 16 * MIN_IOACCEL2_BFT_ENTRY); 7272 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); 7273 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); 7274 /* 5 = 1 s/g entry or 4k 7275 * 6 = 2 s/g entry or 8k 7276 * 8 = 4 s/g entry or 16k 7277 * 10 = 6 s/g entry or 24k 7278 */ 7279 7280 /* Controller spec: zero out this buffer. */ 7281 memset(h->reply_pool, 0, h->reply_pool_size); 7282 7283 bft[7] = SG_ENTRIES_IN_CMD + 4; 7284 calc_bucket_map(bft, ARRAY_SIZE(bft), 7285 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); 7286 for (i = 0; i < 8; i++) 7287 writel(bft[i], &h->transtable->BlockFetch[i]); 7288 7289 /* size of controller ring buffer */ 7290 writel(h->max_commands, &h->transtable->RepQSize); 7291 writel(h->nreply_queues, &h->transtable->RepQCount); 7292 writel(0, &h->transtable->RepQCtrAddrLow32); 7293 writel(0, &h->transtable->RepQCtrAddrHigh32); 7294 7295 for (i = 0; i < h->nreply_queues; i++) { 7296 writel(0, &h->transtable->RepQAddr[i].upper); 7297 writel(h->reply_pool_dhandle + 7298 (h->max_commands * sizeof(u64) * i), 7299 &h->transtable->RepQAddr[i].lower); 7300 } 7301 7302 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); 7303 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); 7304 /* 7305 * enable outbound interrupt coalescing in accelerator mode; 7306 */ 7307 if (trans_support & CFGTBL_Trans_io_accel1) { 7308 access = SA5_ioaccel_mode1_access; 7309 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 7310 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 7311 } else { 7312 if (trans_support & CFGTBL_Trans_io_accel2) { 7313 access = SA5_ioaccel_mode2_access; 7314 writel(10, &h->cfgtable->HostWrite.CoalIntDelay); 7315 writel(4, &h->cfgtable->HostWrite.CoalIntCount); 7316 } 7317 } 7318 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7319 hpsa_wait_for_mode_change_ack(h); 7320 register_value = readl(&(h->cfgtable->TransportActive)); 7321 if (!(register_value & CFGTBL_Trans_Performant)) { 7322 dev_warn(&h->pdev->dev, "unable to get board into" 7323 " performant mode\n"); 7324 return; 7325 } 7326 /* Change the access methods to the performant access methods */ 7327 h->access = access; 7328 h->transMethod = transMethod; 7329 7330 if (!((trans_support & CFGTBL_Trans_io_accel1) || 7331 (trans_support & CFGTBL_Trans_io_accel2))) 7332 return; 7333 7334 if (trans_support & CFGTBL_Trans_io_accel1) { 7335 /* Set up I/O accelerator mode */ 7336 for (i = 0; i < h->nreply_queues; i++) { 7337 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); 7338 h->reply_queue[i].current_entry = 7339 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); 7340 } 7341 bft[7] = h->ioaccel_maxsg + 8; 7342 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, 7343 h->ioaccel1_blockFetchTable); 7344 7345 /* initialize all reply queue entries to unused */ 7346 memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED, 7347 h->reply_pool_size); 7348 7349 /* set all the constant fields in the accelerator command 7350 * frames once at init time to save CPU cycles later. 7351 */ 7352 for (i = 0; i < h->nr_cmds; i++) { 7353 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; 7354 7355 cp->function = IOACCEL1_FUNCTION_SCSIIO; 7356 cp->err_info = (u32) (h->errinfo_pool_dhandle + 7357 (i * sizeof(struct ErrorInfo))); 7358 cp->err_info_len = sizeof(struct ErrorInfo); 7359 cp->sgl_offset = IOACCEL1_SGLOFFSET; 7360 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; 7361 cp->timeout_sec = 0; 7362 cp->ReplyQueue = 0; 7363 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) | 7364 DIRECT_LOOKUP_BIT; 7365 cp->Tag.upper = 0; 7366 cp->host_addr.lower = 7367 (u32) (h->ioaccel_cmd_pool_dhandle + 7368 (i * sizeof(struct io_accel1_cmd))); 7369 cp->host_addr.upper = 0; 7370 } 7371 } else if (trans_support & CFGTBL_Trans_io_accel2) { 7372 u64 cfg_offset, cfg_base_addr_index; 7373 u32 bft2_offset, cfg_base_addr; 7374 int rc; 7375 7376 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 7377 &cfg_base_addr_index, &cfg_offset); 7378 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); 7379 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; 7380 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, 7381 4, h->ioaccel2_blockFetchTable); 7382 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); 7383 BUILD_BUG_ON(offsetof(struct CfgTable, 7384 io_accel_request_size_offset) != 0xb8); 7385 h->ioaccel2_bft2_regs = 7386 remap_pci_mem(pci_resource_start(h->pdev, 7387 cfg_base_addr_index) + 7388 cfg_offset + bft2_offset, 7389 ARRAY_SIZE(bft2) * 7390 sizeof(*h->ioaccel2_bft2_regs)); 7391 for (i = 0; i < ARRAY_SIZE(bft2); i++) 7392 writel(bft2[i], &h->ioaccel2_bft2_regs[i]); 7393 } 7394 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 7395 hpsa_wait_for_mode_change_ack(h); 7396 } 7397 7398 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) 7399 { 7400 h->ioaccel_maxsg = 7401 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 7402 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) 7403 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; 7404 7405 /* Command structures must be aligned on a 128-byte boundary 7406 * because the 7 lower bits of the address are used by the 7407 * hardware. 7408 */ 7409 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 7410 IOACCEL1_COMMANDLIST_ALIGNMENT); 7411 h->ioaccel_cmd_pool = 7412 pci_alloc_consistent(h->pdev, 7413 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 7414 &(h->ioaccel_cmd_pool_dhandle)); 7415 7416 h->ioaccel1_blockFetchTable = 7417 kmalloc(((h->ioaccel_maxsg + 1) * 7418 sizeof(u32)), GFP_KERNEL); 7419 7420 if ((h->ioaccel_cmd_pool == NULL) || 7421 (h->ioaccel1_blockFetchTable == NULL)) 7422 goto clean_up; 7423 7424 memset(h->ioaccel_cmd_pool, 0, 7425 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); 7426 return 0; 7427 7428 clean_up: 7429 if (h->ioaccel_cmd_pool) 7430 pci_free_consistent(h->pdev, 7431 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 7432 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); 7433 kfree(h->ioaccel1_blockFetchTable); 7434 return 1; 7435 } 7436 7437 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) 7438 { 7439 /* Allocate ioaccel2 mode command blocks and block fetch table */ 7440 7441 h->ioaccel_maxsg = 7442 readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); 7443 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) 7444 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; 7445 7446 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 7447 IOACCEL2_COMMANDLIST_ALIGNMENT); 7448 h->ioaccel2_cmd_pool = 7449 pci_alloc_consistent(h->pdev, 7450 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 7451 &(h->ioaccel2_cmd_pool_dhandle)); 7452 7453 h->ioaccel2_blockFetchTable = 7454 kmalloc(((h->ioaccel_maxsg + 1) * 7455 sizeof(u32)), GFP_KERNEL); 7456 7457 if ((h->ioaccel2_cmd_pool == NULL) || 7458 (h->ioaccel2_blockFetchTable == NULL)) 7459 goto clean_up; 7460 7461 memset(h->ioaccel2_cmd_pool, 0, 7462 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); 7463 return 0; 7464 7465 clean_up: 7466 if (h->ioaccel2_cmd_pool) 7467 pci_free_consistent(h->pdev, 7468 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 7469 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); 7470 kfree(h->ioaccel2_blockFetchTable); 7471 return 1; 7472 } 7473 7474 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 7475 { 7476 u32 trans_support; 7477 unsigned long transMethod = CFGTBL_Trans_Performant | 7478 CFGTBL_Trans_use_short_tags; 7479 int i; 7480 7481 if (hpsa_simple_mode) 7482 return; 7483 7484 trans_support = readl(&(h->cfgtable->TransportSupport)); 7485 if (!(trans_support & PERFORMANT_MODE)) 7486 return; 7487 7488 /* Check for I/O accelerator mode support */ 7489 if (trans_support & CFGTBL_Trans_io_accel1) { 7490 transMethod |= CFGTBL_Trans_io_accel1 | 7491 CFGTBL_Trans_enable_directed_msix; 7492 if (hpsa_alloc_ioaccel_cmd_and_bft(h)) 7493 goto clean_up; 7494 } else { 7495 if (trans_support & CFGTBL_Trans_io_accel2) { 7496 transMethod |= CFGTBL_Trans_io_accel2 | 7497 CFGTBL_Trans_enable_directed_msix; 7498 if (ioaccel2_alloc_cmds_and_bft(h)) 7499 goto clean_up; 7500 } 7501 } 7502 7503 /* TODO, check that this next line h->nreply_queues is correct */ 7504 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 7505 hpsa_get_max_perf_mode_cmds(h); 7506 /* Performant mode ring buffer and supporting data structures */ 7507 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; 7508 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 7509 &(h->reply_pool_dhandle)); 7510 7511 for (i = 0; i < h->nreply_queues; i++) { 7512 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; 7513 h->reply_queue[i].size = h->max_commands; 7514 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ 7515 h->reply_queue[i].current_entry = 0; 7516 } 7517 7518 /* Need a block fetch table for performant mode */ 7519 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 7520 sizeof(u32)), GFP_KERNEL); 7521 7522 if ((h->reply_pool == NULL) 7523 || (h->blockFetchTable == NULL)) 7524 goto clean_up; 7525 7526 hpsa_enter_performant_mode(h, trans_support); 7527 return; 7528 7529 clean_up: 7530 if (h->reply_pool) 7531 pci_free_consistent(h->pdev, h->reply_pool_size, 7532 h->reply_pool, h->reply_pool_dhandle); 7533 kfree(h->blockFetchTable); 7534 } 7535 7536 static int is_accelerated_cmd(struct CommandList *c) 7537 { 7538 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; 7539 } 7540 7541 static void hpsa_drain_accel_commands(struct ctlr_info *h) 7542 { 7543 struct CommandList *c = NULL; 7544 unsigned long flags; 7545 int accel_cmds_out; 7546 7547 do { /* wait for all outstanding commands to drain out */ 7548 accel_cmds_out = 0; 7549 spin_lock_irqsave(&h->lock, flags); 7550 list_for_each_entry(c, &h->cmpQ, list) 7551 accel_cmds_out += is_accelerated_cmd(c); 7552 list_for_each_entry(c, &h->reqQ, list) 7553 accel_cmds_out += is_accelerated_cmd(c); 7554 spin_unlock_irqrestore(&h->lock, flags); 7555 if (accel_cmds_out <= 0) 7556 break; 7557 msleep(100); 7558 } while (1); 7559 } 7560 7561 /* 7562 * This is it. Register the PCI driver information for the cards we control 7563 * the OS will call our registered routines when it finds one of our cards. 7564 */ 7565 static int __init hpsa_init(void) 7566 { 7567 return pci_register_driver(&hpsa_pci_driver); 7568 } 7569 7570 static void __exit hpsa_cleanup(void) 7571 { 7572 pci_unregister_driver(&hpsa_pci_driver); 7573 } 7574 7575 static void __attribute__((unused)) verify_offsets(void) 7576 { 7577 #define VERIFY_OFFSET(member, offset) \ 7578 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) 7579 7580 VERIFY_OFFSET(structure_size, 0); 7581 VERIFY_OFFSET(volume_blk_size, 4); 7582 VERIFY_OFFSET(volume_blk_cnt, 8); 7583 VERIFY_OFFSET(phys_blk_shift, 16); 7584 VERIFY_OFFSET(parity_rotation_shift, 17); 7585 VERIFY_OFFSET(strip_size, 18); 7586 VERIFY_OFFSET(disk_starting_blk, 20); 7587 VERIFY_OFFSET(disk_blk_cnt, 28); 7588 VERIFY_OFFSET(data_disks_per_row, 36); 7589 VERIFY_OFFSET(metadata_disks_per_row, 38); 7590 VERIFY_OFFSET(row_cnt, 40); 7591 VERIFY_OFFSET(layout_map_count, 42); 7592 VERIFY_OFFSET(flags, 44); 7593 VERIFY_OFFSET(dekindex, 46); 7594 /* VERIFY_OFFSET(reserved, 48 */ 7595 VERIFY_OFFSET(data, 64); 7596 7597 #undef VERIFY_OFFSET 7598 7599 #define VERIFY_OFFSET(member, offset) \ 7600 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) 7601 7602 VERIFY_OFFSET(IU_type, 0); 7603 VERIFY_OFFSET(direction, 1); 7604 VERIFY_OFFSET(reply_queue, 2); 7605 /* VERIFY_OFFSET(reserved1, 3); */ 7606 VERIFY_OFFSET(scsi_nexus, 4); 7607 VERIFY_OFFSET(Tag, 8); 7608 VERIFY_OFFSET(cdb, 16); 7609 VERIFY_OFFSET(cciss_lun, 32); 7610 VERIFY_OFFSET(data_len, 40); 7611 VERIFY_OFFSET(cmd_priority_task_attr, 44); 7612 VERIFY_OFFSET(sg_count, 45); 7613 /* VERIFY_OFFSET(reserved3 */ 7614 VERIFY_OFFSET(err_ptr, 48); 7615 VERIFY_OFFSET(err_len, 56); 7616 /* VERIFY_OFFSET(reserved4 */ 7617 VERIFY_OFFSET(sg, 64); 7618 7619 #undef VERIFY_OFFSET 7620 7621 #define VERIFY_OFFSET(member, offset) \ 7622 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) 7623 7624 VERIFY_OFFSET(dev_handle, 0x00); 7625 VERIFY_OFFSET(reserved1, 0x02); 7626 VERIFY_OFFSET(function, 0x03); 7627 VERIFY_OFFSET(reserved2, 0x04); 7628 VERIFY_OFFSET(err_info, 0x0C); 7629 VERIFY_OFFSET(reserved3, 0x10); 7630 VERIFY_OFFSET(err_info_len, 0x12); 7631 VERIFY_OFFSET(reserved4, 0x13); 7632 VERIFY_OFFSET(sgl_offset, 0x14); 7633 VERIFY_OFFSET(reserved5, 0x15); 7634 VERIFY_OFFSET(transfer_len, 0x1C); 7635 VERIFY_OFFSET(reserved6, 0x20); 7636 VERIFY_OFFSET(io_flags, 0x24); 7637 VERIFY_OFFSET(reserved7, 0x26); 7638 VERIFY_OFFSET(LUN, 0x34); 7639 VERIFY_OFFSET(control, 0x3C); 7640 VERIFY_OFFSET(CDB, 0x40); 7641 VERIFY_OFFSET(reserved8, 0x50); 7642 VERIFY_OFFSET(host_context_flags, 0x60); 7643 VERIFY_OFFSET(timeout_sec, 0x62); 7644 VERIFY_OFFSET(ReplyQueue, 0x64); 7645 VERIFY_OFFSET(reserved9, 0x65); 7646 VERIFY_OFFSET(Tag, 0x68); 7647 VERIFY_OFFSET(host_addr, 0x70); 7648 VERIFY_OFFSET(CISS_LUN, 0x78); 7649 VERIFY_OFFSET(SG, 0x78 + 8); 7650 #undef VERIFY_OFFSET 7651 } 7652 7653 module_init(hpsa_init); 7654 module_exit(hpsa_cleanup); 7655