1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/pci-aspm.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/delay.h> 30 #include <linux/fs.h> 31 #include <linux/timer.h> 32 #include <linux/seq_file.h> 33 #include <linux/init.h> 34 #include <linux/spinlock.h> 35 #include <linux/compat.h> 36 #include <linux/blktrace_api.h> 37 #include <linux/uaccess.h> 38 #include <linux/io.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/completion.h> 41 #include <linux/moduleparam.h> 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_cmnd.h> 44 #include <scsi/scsi_device.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_tcq.h> 47 #include <linux/cciss_ioctl.h> 48 #include <linux/string.h> 49 #include <linux/bitmap.h> 50 #include <linux/atomic.h> 51 #include <linux/kthread.h> 52 #include <linux/jiffies.h> 53 #include "hpsa_cmd.h" 54 #include "hpsa.h" 55 56 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 57 #define HPSA_DRIVER_VERSION "2.0.2-1" 58 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 59 #define HPSA "hpsa" 60 61 /* How long to wait (in milliseconds) for board to go into simple mode */ 62 #define MAX_CONFIG_WAIT 30000 63 #define MAX_IOCTL_CONFIG_WAIT 1000 64 65 /*define how many times we will try a command because of bus resets */ 66 #define MAX_CMD_RETRIES 3 67 68 /* Embedded module documentation macros - see modules.h */ 69 MODULE_AUTHOR("Hewlett-Packard Company"); 70 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 71 HPSA_DRIVER_VERSION); 72 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 73 MODULE_VERSION(HPSA_DRIVER_VERSION); 74 MODULE_LICENSE("GPL"); 75 76 static int hpsa_allow_any; 77 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 78 MODULE_PARM_DESC(hpsa_allow_any, 79 "Allow hpsa driver to access unknown HP Smart Array hardware"); 80 static int hpsa_simple_mode; 81 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 82 MODULE_PARM_DESC(hpsa_simple_mode, 83 "Use 'simple mode' rather than 'performant mode'"); 84 85 /* define the PCI info for the cards we can control */ 86 static const struct pci_device_id hpsa_pci_device_id[] = { 87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 102 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 103 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 104 {0,} 105 }; 106 107 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 108 109 /* board_id = Subsystem Device ID & Vendor ID 110 * product = Marketing Name for the board 111 * access = Address of the struct of function pointers 112 */ 113 static struct board_type products[] = { 114 {0x3241103C, "Smart Array P212", &SA5_access}, 115 {0x3243103C, "Smart Array P410", &SA5_access}, 116 {0x3245103C, "Smart Array P410i", &SA5_access}, 117 {0x3247103C, "Smart Array P411", &SA5_access}, 118 {0x3249103C, "Smart Array P812", &SA5_access}, 119 {0x324a103C, "Smart Array P712m", &SA5_access}, 120 {0x324b103C, "Smart Array P711m", &SA5_access}, 121 {0x3350103C, "Smart Array", &SA5_access}, 122 {0x3351103C, "Smart Array", &SA5_access}, 123 {0x3352103C, "Smart Array", &SA5_access}, 124 {0x3353103C, "Smart Array", &SA5_access}, 125 {0x3354103C, "Smart Array", &SA5_access}, 126 {0x3355103C, "Smart Array", &SA5_access}, 127 {0x3356103C, "Smart Array", &SA5_access}, 128 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 129 }; 130 131 static int number_of_controllers; 132 133 static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list); 134 static spinlock_t lockup_detector_lock; 135 static struct task_struct *hpsa_lockup_detector; 136 137 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 138 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 139 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 140 static void start_io(struct ctlr_info *h); 141 142 #ifdef CONFIG_COMPAT 143 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 144 #endif 145 146 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 147 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); 148 static struct CommandList *cmd_alloc(struct ctlr_info *h); 149 static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 150 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 151 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 152 int cmd_type); 153 154 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 155 static void hpsa_scan_start(struct Scsi_Host *); 156 static int hpsa_scan_finished(struct Scsi_Host *sh, 157 unsigned long elapsed_time); 158 static int hpsa_change_queue_depth(struct scsi_device *sdev, 159 int qdepth, int reason); 160 161 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 162 static int hpsa_slave_alloc(struct scsi_device *sdev); 163 static void hpsa_slave_destroy(struct scsi_device *sdev); 164 165 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 166 static int check_for_unit_attention(struct ctlr_info *h, 167 struct CommandList *c); 168 static void check_ioctl_unit_attention(struct ctlr_info *h, 169 struct CommandList *c); 170 /* performant mode helper functions */ 171 static void calc_bucket_map(int *bucket, int num_buckets, 172 int nsgs, int *bucket_map); 173 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 174 static inline u32 next_command(struct ctlr_info *h); 175 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 176 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 177 u64 *cfg_offset); 178 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 179 unsigned long *memory_bar); 180 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 181 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 182 void __iomem *vaddr, int wait_for_ready); 183 #define BOARD_NOT_READY 0 184 #define BOARD_READY 1 185 186 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 187 { 188 unsigned long *priv = shost_priv(sdev->host); 189 return (struct ctlr_info *) *priv; 190 } 191 192 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 193 { 194 unsigned long *priv = shost_priv(sh); 195 return (struct ctlr_info *) *priv; 196 } 197 198 static int check_for_unit_attention(struct ctlr_info *h, 199 struct CommandList *c) 200 { 201 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 202 return 0; 203 204 switch (c->err_info->SenseInfo[12]) { 205 case STATE_CHANGED: 206 dev_warn(&h->pdev->dev, HPSA "%d: a state change " 207 "detected, command retried\n", h->ctlr); 208 break; 209 case LUN_FAILED: 210 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure " 211 "detected, action required\n", h->ctlr); 212 break; 213 case REPORT_LUNS_CHANGED: 214 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data " 215 "changed, action required\n", h->ctlr); 216 /* 217 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external 218 * target (array) devices. 219 */ 220 break; 221 case POWER_OR_RESET: 222 dev_warn(&h->pdev->dev, HPSA "%d: a power on " 223 "or device reset detected\n", h->ctlr); 224 break; 225 case UNIT_ATTENTION_CLEARED: 226 dev_warn(&h->pdev->dev, HPSA "%d: unit attention " 227 "cleared by another initiator\n", h->ctlr); 228 break; 229 default: 230 dev_warn(&h->pdev->dev, HPSA "%d: unknown " 231 "unit attention detected\n", h->ctlr); 232 break; 233 } 234 return 1; 235 } 236 237 static ssize_t host_store_rescan(struct device *dev, 238 struct device_attribute *attr, 239 const char *buf, size_t count) 240 { 241 struct ctlr_info *h; 242 struct Scsi_Host *shost = class_to_shost(dev); 243 h = shost_to_hba(shost); 244 hpsa_scan_start(h->scsi_host); 245 return count; 246 } 247 248 static ssize_t host_show_firmware_revision(struct device *dev, 249 struct device_attribute *attr, char *buf) 250 { 251 struct ctlr_info *h; 252 struct Scsi_Host *shost = class_to_shost(dev); 253 unsigned char *fwrev; 254 255 h = shost_to_hba(shost); 256 if (!h->hba_inquiry_data) 257 return 0; 258 fwrev = &h->hba_inquiry_data[32]; 259 return snprintf(buf, 20, "%c%c%c%c\n", 260 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 261 } 262 263 static ssize_t host_show_commands_outstanding(struct device *dev, 264 struct device_attribute *attr, char *buf) 265 { 266 struct Scsi_Host *shost = class_to_shost(dev); 267 struct ctlr_info *h = shost_to_hba(shost); 268 269 return snprintf(buf, 20, "%d\n", h->commands_outstanding); 270 } 271 272 static ssize_t host_show_transport_mode(struct device *dev, 273 struct device_attribute *attr, char *buf) 274 { 275 struct ctlr_info *h; 276 struct Scsi_Host *shost = class_to_shost(dev); 277 278 h = shost_to_hba(shost); 279 return snprintf(buf, 20, "%s\n", 280 h->transMethod & CFGTBL_Trans_Performant ? 281 "performant" : "simple"); 282 } 283 284 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 285 static u32 unresettable_controller[] = { 286 0x324a103C, /* Smart Array P712m */ 287 0x324b103C, /* SmartArray P711m */ 288 0x3223103C, /* Smart Array P800 */ 289 0x3234103C, /* Smart Array P400 */ 290 0x3235103C, /* Smart Array P400i */ 291 0x3211103C, /* Smart Array E200i */ 292 0x3212103C, /* Smart Array E200 */ 293 0x3213103C, /* Smart Array E200i */ 294 0x3214103C, /* Smart Array E200i */ 295 0x3215103C, /* Smart Array E200i */ 296 0x3237103C, /* Smart Array E500 */ 297 0x323D103C, /* Smart Array P700m */ 298 0x40800E11, /* Smart Array 5i */ 299 0x409C0E11, /* Smart Array 6400 */ 300 0x409D0E11, /* Smart Array 6400 EM */ 301 0x40700E11, /* Smart Array 5300 */ 302 0x40820E11, /* Smart Array 532 */ 303 0x40830E11, /* Smart Array 5312 */ 304 0x409A0E11, /* Smart Array 641 */ 305 0x409B0E11, /* Smart Array 642 */ 306 0x40910E11, /* Smart Array 6i */ 307 }; 308 309 /* List of controllers which cannot even be soft reset */ 310 static u32 soft_unresettable_controller[] = { 311 0x40800E11, /* Smart Array 5i */ 312 0x40700E11, /* Smart Array 5300 */ 313 0x40820E11, /* Smart Array 532 */ 314 0x40830E11, /* Smart Array 5312 */ 315 0x409A0E11, /* Smart Array 641 */ 316 0x409B0E11, /* Smart Array 642 */ 317 0x40910E11, /* Smart Array 6i */ 318 /* Exclude 640x boards. These are two pci devices in one slot 319 * which share a battery backed cache module. One controls the 320 * cache, the other accesses the cache through the one that controls 321 * it. If we reset the one controlling the cache, the other will 322 * likely not be happy. Just forbid resetting this conjoined mess. 323 * The 640x isn't really supported by hpsa anyway. 324 */ 325 0x409C0E11, /* Smart Array 6400 */ 326 0x409D0E11, /* Smart Array 6400 EM */ 327 }; 328 329 static int ctlr_is_hard_resettable(u32 board_id) 330 { 331 int i; 332 333 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 334 if (unresettable_controller[i] == board_id) 335 return 0; 336 return 1; 337 } 338 339 static int ctlr_is_soft_resettable(u32 board_id) 340 { 341 int i; 342 343 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 344 if (soft_unresettable_controller[i] == board_id) 345 return 0; 346 return 1; 347 } 348 349 static int ctlr_is_resettable(u32 board_id) 350 { 351 return ctlr_is_hard_resettable(board_id) || 352 ctlr_is_soft_resettable(board_id); 353 } 354 355 static ssize_t host_show_resettable(struct device *dev, 356 struct device_attribute *attr, char *buf) 357 { 358 struct ctlr_info *h; 359 struct Scsi_Host *shost = class_to_shost(dev); 360 361 h = shost_to_hba(shost); 362 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 363 } 364 365 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 366 { 367 return (scsi3addr[3] & 0xC0) == 0x40; 368 } 369 370 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 371 "UNKNOWN" 372 }; 373 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 374 375 static ssize_t raid_level_show(struct device *dev, 376 struct device_attribute *attr, char *buf) 377 { 378 ssize_t l = 0; 379 unsigned char rlevel; 380 struct ctlr_info *h; 381 struct scsi_device *sdev; 382 struct hpsa_scsi_dev_t *hdev; 383 unsigned long flags; 384 385 sdev = to_scsi_device(dev); 386 h = sdev_to_hba(sdev); 387 spin_lock_irqsave(&h->lock, flags); 388 hdev = sdev->hostdata; 389 if (!hdev) { 390 spin_unlock_irqrestore(&h->lock, flags); 391 return -ENODEV; 392 } 393 394 /* Is this even a logical drive? */ 395 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 396 spin_unlock_irqrestore(&h->lock, flags); 397 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 398 return l; 399 } 400 401 rlevel = hdev->raid_level; 402 spin_unlock_irqrestore(&h->lock, flags); 403 if (rlevel > RAID_UNKNOWN) 404 rlevel = RAID_UNKNOWN; 405 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 406 return l; 407 } 408 409 static ssize_t lunid_show(struct device *dev, 410 struct device_attribute *attr, char *buf) 411 { 412 struct ctlr_info *h; 413 struct scsi_device *sdev; 414 struct hpsa_scsi_dev_t *hdev; 415 unsigned long flags; 416 unsigned char lunid[8]; 417 418 sdev = to_scsi_device(dev); 419 h = sdev_to_hba(sdev); 420 spin_lock_irqsave(&h->lock, flags); 421 hdev = sdev->hostdata; 422 if (!hdev) { 423 spin_unlock_irqrestore(&h->lock, flags); 424 return -ENODEV; 425 } 426 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 427 spin_unlock_irqrestore(&h->lock, flags); 428 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 429 lunid[0], lunid[1], lunid[2], lunid[3], 430 lunid[4], lunid[5], lunid[6], lunid[7]); 431 } 432 433 static ssize_t unique_id_show(struct device *dev, 434 struct device_attribute *attr, char *buf) 435 { 436 struct ctlr_info *h; 437 struct scsi_device *sdev; 438 struct hpsa_scsi_dev_t *hdev; 439 unsigned long flags; 440 unsigned char sn[16]; 441 442 sdev = to_scsi_device(dev); 443 h = sdev_to_hba(sdev); 444 spin_lock_irqsave(&h->lock, flags); 445 hdev = sdev->hostdata; 446 if (!hdev) { 447 spin_unlock_irqrestore(&h->lock, flags); 448 return -ENODEV; 449 } 450 memcpy(sn, hdev->device_id, sizeof(sn)); 451 spin_unlock_irqrestore(&h->lock, flags); 452 return snprintf(buf, 16 * 2 + 2, 453 "%02X%02X%02X%02X%02X%02X%02X%02X" 454 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 455 sn[0], sn[1], sn[2], sn[3], 456 sn[4], sn[5], sn[6], sn[7], 457 sn[8], sn[9], sn[10], sn[11], 458 sn[12], sn[13], sn[14], sn[15]); 459 } 460 461 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 462 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 463 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 464 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 465 static DEVICE_ATTR(firmware_revision, S_IRUGO, 466 host_show_firmware_revision, NULL); 467 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 468 host_show_commands_outstanding, NULL); 469 static DEVICE_ATTR(transport_mode, S_IRUGO, 470 host_show_transport_mode, NULL); 471 static DEVICE_ATTR(resettable, S_IRUGO, 472 host_show_resettable, NULL); 473 474 static struct device_attribute *hpsa_sdev_attrs[] = { 475 &dev_attr_raid_level, 476 &dev_attr_lunid, 477 &dev_attr_unique_id, 478 NULL, 479 }; 480 481 static struct device_attribute *hpsa_shost_attrs[] = { 482 &dev_attr_rescan, 483 &dev_attr_firmware_revision, 484 &dev_attr_commands_outstanding, 485 &dev_attr_transport_mode, 486 &dev_attr_resettable, 487 NULL, 488 }; 489 490 static struct scsi_host_template hpsa_driver_template = { 491 .module = THIS_MODULE, 492 .name = HPSA, 493 .proc_name = HPSA, 494 .queuecommand = hpsa_scsi_queue_command, 495 .scan_start = hpsa_scan_start, 496 .scan_finished = hpsa_scan_finished, 497 .change_queue_depth = hpsa_change_queue_depth, 498 .this_id = -1, 499 .use_clustering = ENABLE_CLUSTERING, 500 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 501 .ioctl = hpsa_ioctl, 502 .slave_alloc = hpsa_slave_alloc, 503 .slave_destroy = hpsa_slave_destroy, 504 #ifdef CONFIG_COMPAT 505 .compat_ioctl = hpsa_compat_ioctl, 506 #endif 507 .sdev_attrs = hpsa_sdev_attrs, 508 .shost_attrs = hpsa_shost_attrs, 509 .max_sectors = 8192, 510 }; 511 512 513 /* Enqueuing and dequeuing functions for cmdlists. */ 514 static inline void addQ(struct list_head *list, struct CommandList *c) 515 { 516 list_add_tail(&c->list, list); 517 } 518 519 static inline u32 next_command(struct ctlr_info *h) 520 { 521 u32 a; 522 523 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 524 return h->access.command_completed(h); 525 526 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 527 a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 528 (h->reply_pool_head)++; 529 h->commands_outstanding--; 530 } else { 531 a = FIFO_EMPTY; 532 } 533 /* Check for wraparound */ 534 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 535 h->reply_pool_head = h->reply_pool; 536 h->reply_pool_wraparound ^= 1; 537 } 538 return a; 539 } 540 541 /* set_performant_mode: Modify the tag for cciss performant 542 * set bit 0 for pull model, bits 3-1 for block fetch 543 * register number 544 */ 545 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 546 { 547 if (likely(h->transMethod & CFGTBL_Trans_Performant)) 548 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 549 } 550 551 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 552 struct CommandList *c) 553 { 554 unsigned long flags; 555 556 set_performant_mode(h, c); 557 spin_lock_irqsave(&h->lock, flags); 558 addQ(&h->reqQ, c); 559 h->Qdepth++; 560 start_io(h); 561 spin_unlock_irqrestore(&h->lock, flags); 562 } 563 564 static inline void removeQ(struct CommandList *c) 565 { 566 if (WARN_ON(list_empty(&c->list))) 567 return; 568 list_del_init(&c->list); 569 } 570 571 static inline int is_hba_lunid(unsigned char scsi3addr[]) 572 { 573 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 574 } 575 576 static inline int is_scsi_rev_5(struct ctlr_info *h) 577 { 578 if (!h->hba_inquiry_data) 579 return 0; 580 if ((h->hba_inquiry_data[2] & 0x07) == 5) 581 return 1; 582 return 0; 583 } 584 585 static int hpsa_find_target_lun(struct ctlr_info *h, 586 unsigned char scsi3addr[], int bus, int *target, int *lun) 587 { 588 /* finds an unused bus, target, lun for a new physical device 589 * assumes h->devlock is held 590 */ 591 int i, found = 0; 592 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 593 594 bitmap_zero(lun_taken, HPSA_MAX_DEVICES); 595 596 for (i = 0; i < h->ndevices; i++) { 597 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 598 __set_bit(h->dev[i]->target, lun_taken); 599 } 600 601 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); 602 if (i < HPSA_MAX_DEVICES) { 603 /* *bus = 1; */ 604 *target = i; 605 *lun = 0; 606 found = 1; 607 } 608 return !found; 609 } 610 611 /* Add an entry into h->dev[] array. */ 612 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 613 struct hpsa_scsi_dev_t *device, 614 struct hpsa_scsi_dev_t *added[], int *nadded) 615 { 616 /* assumes h->devlock is held */ 617 int n = h->ndevices; 618 int i; 619 unsigned char addr1[8], addr2[8]; 620 struct hpsa_scsi_dev_t *sd; 621 622 if (n >= HPSA_MAX_DEVICES) { 623 dev_err(&h->pdev->dev, "too many devices, some will be " 624 "inaccessible.\n"); 625 return -1; 626 } 627 628 /* physical devices do not have lun or target assigned until now. */ 629 if (device->lun != -1) 630 /* Logical device, lun is already assigned. */ 631 goto lun_assigned; 632 633 /* If this device a non-zero lun of a multi-lun device 634 * byte 4 of the 8-byte LUN addr will contain the logical 635 * unit no, zero otherise. 636 */ 637 if (device->scsi3addr[4] == 0) { 638 /* This is not a non-zero lun of a multi-lun device */ 639 if (hpsa_find_target_lun(h, device->scsi3addr, 640 device->bus, &device->target, &device->lun) != 0) 641 return -1; 642 goto lun_assigned; 643 } 644 645 /* This is a non-zero lun of a multi-lun device. 646 * Search through our list and find the device which 647 * has the same 8 byte LUN address, excepting byte 4. 648 * Assign the same bus and target for this new LUN. 649 * Use the logical unit number from the firmware. 650 */ 651 memcpy(addr1, device->scsi3addr, 8); 652 addr1[4] = 0; 653 for (i = 0; i < n; i++) { 654 sd = h->dev[i]; 655 memcpy(addr2, sd->scsi3addr, 8); 656 addr2[4] = 0; 657 /* differ only in byte 4? */ 658 if (memcmp(addr1, addr2, 8) == 0) { 659 device->bus = sd->bus; 660 device->target = sd->target; 661 device->lun = device->scsi3addr[4]; 662 break; 663 } 664 } 665 if (device->lun == -1) { 666 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 667 " suspect firmware bug or unsupported hardware " 668 "configuration.\n"); 669 return -1; 670 } 671 672 lun_assigned: 673 674 h->dev[n] = device; 675 h->ndevices++; 676 added[*nadded] = device; 677 (*nadded)++; 678 679 /* initially, (before registering with scsi layer) we don't 680 * know our hostno and we don't want to print anything first 681 * time anyway (the scsi layer's inquiries will show that info) 682 */ 683 /* if (hostno != -1) */ 684 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 685 scsi_device_type(device->devtype), hostno, 686 device->bus, device->target, device->lun); 687 return 0; 688 } 689 690 /* Update an entry in h->dev[] array. */ 691 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, 692 int entry, struct hpsa_scsi_dev_t *new_entry) 693 { 694 /* assumes h->devlock is held */ 695 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 696 697 /* Raid level changed. */ 698 h->dev[entry]->raid_level = new_entry->raid_level; 699 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", 700 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 701 new_entry->target, new_entry->lun); 702 } 703 704 /* Replace an entry from h->dev[] array. */ 705 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 706 int entry, struct hpsa_scsi_dev_t *new_entry, 707 struct hpsa_scsi_dev_t *added[], int *nadded, 708 struct hpsa_scsi_dev_t *removed[], int *nremoved) 709 { 710 /* assumes h->devlock is held */ 711 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 712 removed[*nremoved] = h->dev[entry]; 713 (*nremoved)++; 714 715 /* 716 * New physical devices won't have target/lun assigned yet 717 * so we need to preserve the values in the slot we are replacing. 718 */ 719 if (new_entry->target == -1) { 720 new_entry->target = h->dev[entry]->target; 721 new_entry->lun = h->dev[entry]->lun; 722 } 723 724 h->dev[entry] = new_entry; 725 added[*nadded] = new_entry; 726 (*nadded)++; 727 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 728 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 729 new_entry->target, new_entry->lun); 730 } 731 732 /* Remove an entry from h->dev[] array. */ 733 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 734 struct hpsa_scsi_dev_t *removed[], int *nremoved) 735 { 736 /* assumes h->devlock is held */ 737 int i; 738 struct hpsa_scsi_dev_t *sd; 739 740 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 741 742 sd = h->dev[entry]; 743 removed[*nremoved] = h->dev[entry]; 744 (*nremoved)++; 745 746 for (i = entry; i < h->ndevices-1; i++) 747 h->dev[i] = h->dev[i+1]; 748 h->ndevices--; 749 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 750 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 751 sd->lun); 752 } 753 754 #define SCSI3ADDR_EQ(a, b) ( \ 755 (a)[7] == (b)[7] && \ 756 (a)[6] == (b)[6] && \ 757 (a)[5] == (b)[5] && \ 758 (a)[4] == (b)[4] && \ 759 (a)[3] == (b)[3] && \ 760 (a)[2] == (b)[2] && \ 761 (a)[1] == (b)[1] && \ 762 (a)[0] == (b)[0]) 763 764 static void fixup_botched_add(struct ctlr_info *h, 765 struct hpsa_scsi_dev_t *added) 766 { 767 /* called when scsi_add_device fails in order to re-adjust 768 * h->dev[] to match the mid layer's view. 769 */ 770 unsigned long flags; 771 int i, j; 772 773 spin_lock_irqsave(&h->lock, flags); 774 for (i = 0; i < h->ndevices; i++) { 775 if (h->dev[i] == added) { 776 for (j = i; j < h->ndevices-1; j++) 777 h->dev[j] = h->dev[j+1]; 778 h->ndevices--; 779 break; 780 } 781 } 782 spin_unlock_irqrestore(&h->lock, flags); 783 kfree(added); 784 } 785 786 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 787 struct hpsa_scsi_dev_t *dev2) 788 { 789 /* we compare everything except lun and target as these 790 * are not yet assigned. Compare parts likely 791 * to differ first 792 */ 793 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 794 sizeof(dev1->scsi3addr)) != 0) 795 return 0; 796 if (memcmp(dev1->device_id, dev2->device_id, 797 sizeof(dev1->device_id)) != 0) 798 return 0; 799 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 800 return 0; 801 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 802 return 0; 803 if (dev1->devtype != dev2->devtype) 804 return 0; 805 if (dev1->bus != dev2->bus) 806 return 0; 807 return 1; 808 } 809 810 static inline int device_updated(struct hpsa_scsi_dev_t *dev1, 811 struct hpsa_scsi_dev_t *dev2) 812 { 813 /* Device attributes that can change, but don't mean 814 * that the device is a different device, nor that the OS 815 * needs to be told anything about the change. 816 */ 817 if (dev1->raid_level != dev2->raid_level) 818 return 1; 819 return 0; 820 } 821 822 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 823 * and return needle location in *index. If scsi3addr matches, but not 824 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 825 * location in *index. 826 * In the case of a minor device attribute change, such as RAID level, just 827 * return DEVICE_UPDATED, along with the updated device's location in index. 828 * If needle not found, return DEVICE_NOT_FOUND. 829 */ 830 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 831 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 832 int *index) 833 { 834 int i; 835 #define DEVICE_NOT_FOUND 0 836 #define DEVICE_CHANGED 1 837 #define DEVICE_SAME 2 838 #define DEVICE_UPDATED 3 839 for (i = 0; i < haystack_size; i++) { 840 if (haystack[i] == NULL) /* previously removed. */ 841 continue; 842 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 843 *index = i; 844 if (device_is_the_same(needle, haystack[i])) { 845 if (device_updated(needle, haystack[i])) 846 return DEVICE_UPDATED; 847 return DEVICE_SAME; 848 } else { 849 return DEVICE_CHANGED; 850 } 851 } 852 } 853 *index = -1; 854 return DEVICE_NOT_FOUND; 855 } 856 857 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 858 struct hpsa_scsi_dev_t *sd[], int nsds) 859 { 860 /* sd contains scsi3 addresses and devtypes, and inquiry 861 * data. This function takes what's in sd to be the current 862 * reality and updates h->dev[] to reflect that reality. 863 */ 864 int i, entry, device_change, changes = 0; 865 struct hpsa_scsi_dev_t *csd; 866 unsigned long flags; 867 struct hpsa_scsi_dev_t **added, **removed; 868 int nadded, nremoved; 869 struct Scsi_Host *sh = NULL; 870 871 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 872 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 873 874 if (!added || !removed) { 875 dev_warn(&h->pdev->dev, "out of memory in " 876 "adjust_hpsa_scsi_table\n"); 877 goto free_and_out; 878 } 879 880 spin_lock_irqsave(&h->devlock, flags); 881 882 /* find any devices in h->dev[] that are not in 883 * sd[] and remove them from h->dev[], and for any 884 * devices which have changed, remove the old device 885 * info and add the new device info. 886 * If minor device attributes change, just update 887 * the existing device structure. 888 */ 889 i = 0; 890 nremoved = 0; 891 nadded = 0; 892 while (i < h->ndevices) { 893 csd = h->dev[i]; 894 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 895 if (device_change == DEVICE_NOT_FOUND) { 896 changes++; 897 hpsa_scsi_remove_entry(h, hostno, i, 898 removed, &nremoved); 899 continue; /* remove ^^^, hence i not incremented */ 900 } else if (device_change == DEVICE_CHANGED) { 901 changes++; 902 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 903 added, &nadded, removed, &nremoved); 904 /* Set it to NULL to prevent it from being freed 905 * at the bottom of hpsa_update_scsi_devices() 906 */ 907 sd[entry] = NULL; 908 } else if (device_change == DEVICE_UPDATED) { 909 hpsa_scsi_update_entry(h, hostno, i, sd[entry]); 910 } 911 i++; 912 } 913 914 /* Now, make sure every device listed in sd[] is also 915 * listed in h->dev[], adding them if they aren't found 916 */ 917 918 for (i = 0; i < nsds; i++) { 919 if (!sd[i]) /* if already added above. */ 920 continue; 921 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 922 h->ndevices, &entry); 923 if (device_change == DEVICE_NOT_FOUND) { 924 changes++; 925 if (hpsa_scsi_add_entry(h, hostno, sd[i], 926 added, &nadded) != 0) 927 break; 928 sd[i] = NULL; /* prevent from being freed later. */ 929 } else if (device_change == DEVICE_CHANGED) { 930 /* should never happen... */ 931 changes++; 932 dev_warn(&h->pdev->dev, 933 "device unexpectedly changed.\n"); 934 /* but if it does happen, we just ignore that device */ 935 } 936 } 937 spin_unlock_irqrestore(&h->devlock, flags); 938 939 /* Don't notify scsi mid layer of any changes the first time through 940 * (or if there are no changes) scsi_scan_host will do it later the 941 * first time through. 942 */ 943 if (hostno == -1 || !changes) 944 goto free_and_out; 945 946 sh = h->scsi_host; 947 /* Notify scsi mid layer of any removed devices */ 948 for (i = 0; i < nremoved; i++) { 949 struct scsi_device *sdev = 950 scsi_device_lookup(sh, removed[i]->bus, 951 removed[i]->target, removed[i]->lun); 952 if (sdev != NULL) { 953 scsi_remove_device(sdev); 954 scsi_device_put(sdev); 955 } else { 956 /* We don't expect to get here. 957 * future cmds to this device will get selection 958 * timeout as if the device was gone. 959 */ 960 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 961 " for removal.", hostno, removed[i]->bus, 962 removed[i]->target, removed[i]->lun); 963 } 964 kfree(removed[i]); 965 removed[i] = NULL; 966 } 967 968 /* Notify scsi mid layer of any added devices */ 969 for (i = 0; i < nadded; i++) { 970 if (scsi_add_device(sh, added[i]->bus, 971 added[i]->target, added[i]->lun) == 0) 972 continue; 973 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 974 "device not added.\n", hostno, added[i]->bus, 975 added[i]->target, added[i]->lun); 976 /* now we have to remove it from h->dev, 977 * since it didn't get added to scsi mid layer 978 */ 979 fixup_botched_add(h, added[i]); 980 } 981 982 free_and_out: 983 kfree(added); 984 kfree(removed); 985 } 986 987 /* 988 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * 989 * Assume's h->devlock is held. 990 */ 991 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 992 int bus, int target, int lun) 993 { 994 int i; 995 struct hpsa_scsi_dev_t *sd; 996 997 for (i = 0; i < h->ndevices; i++) { 998 sd = h->dev[i]; 999 if (sd->bus == bus && sd->target == target && sd->lun == lun) 1000 return sd; 1001 } 1002 return NULL; 1003 } 1004 1005 /* link sdev->hostdata to our per-device structure. */ 1006 static int hpsa_slave_alloc(struct scsi_device *sdev) 1007 { 1008 struct hpsa_scsi_dev_t *sd; 1009 unsigned long flags; 1010 struct ctlr_info *h; 1011 1012 h = sdev_to_hba(sdev); 1013 spin_lock_irqsave(&h->devlock, flags); 1014 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 1015 sdev_id(sdev), sdev->lun); 1016 if (sd != NULL) 1017 sdev->hostdata = sd; 1018 spin_unlock_irqrestore(&h->devlock, flags); 1019 return 0; 1020 } 1021 1022 static void hpsa_slave_destroy(struct scsi_device *sdev) 1023 { 1024 /* nothing to do. */ 1025 } 1026 1027 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 1028 { 1029 int i; 1030 1031 if (!h->cmd_sg_list) 1032 return; 1033 for (i = 0; i < h->nr_cmds; i++) { 1034 kfree(h->cmd_sg_list[i]); 1035 h->cmd_sg_list[i] = NULL; 1036 } 1037 kfree(h->cmd_sg_list); 1038 h->cmd_sg_list = NULL; 1039 } 1040 1041 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 1042 { 1043 int i; 1044 1045 if (h->chainsize <= 0) 1046 return 0; 1047 1048 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 1049 GFP_KERNEL); 1050 if (!h->cmd_sg_list) 1051 return -ENOMEM; 1052 for (i = 0; i < h->nr_cmds; i++) { 1053 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 1054 h->chainsize, GFP_KERNEL); 1055 if (!h->cmd_sg_list[i]) 1056 goto clean; 1057 } 1058 return 0; 1059 1060 clean: 1061 hpsa_free_sg_chain_blocks(h); 1062 return -ENOMEM; 1063 } 1064 1065 static void hpsa_map_sg_chain_block(struct ctlr_info *h, 1066 struct CommandList *c) 1067 { 1068 struct SGDescriptor *chain_sg, *chain_block; 1069 u64 temp64; 1070 1071 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1072 chain_block = h->cmd_sg_list[c->cmdindex]; 1073 chain_sg->Ext = HPSA_SG_CHAIN; 1074 chain_sg->Len = sizeof(*chain_sg) * 1075 (c->Header.SGTotal - h->max_cmd_sg_entries); 1076 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 1077 PCI_DMA_TODEVICE); 1078 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 1079 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); 1080 } 1081 1082 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 1083 struct CommandList *c) 1084 { 1085 struct SGDescriptor *chain_sg; 1086 union u64bit temp64; 1087 1088 if (c->Header.SGTotal <= h->max_cmd_sg_entries) 1089 return; 1090 1091 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1092 temp64.val32.lower = chain_sg->Addr.lower; 1093 temp64.val32.upper = chain_sg->Addr.upper; 1094 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1095 } 1096 1097 static void complete_scsi_command(struct CommandList *cp) 1098 { 1099 struct scsi_cmnd *cmd; 1100 struct ctlr_info *h; 1101 struct ErrorInfo *ei; 1102 1103 unsigned char sense_key; 1104 unsigned char asc; /* additional sense code */ 1105 unsigned char ascq; /* additional sense code qualifier */ 1106 unsigned long sense_data_size; 1107 1108 ei = cp->err_info; 1109 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1110 h = cp->h; 1111 1112 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1113 if (cp->Header.SGTotal > h->max_cmd_sg_entries) 1114 hpsa_unmap_sg_chain_block(h, cp); 1115 1116 cmd->result = (DID_OK << 16); /* host byte */ 1117 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1118 cmd->result |= ei->ScsiStatus; 1119 1120 /* copy the sense data whether we need to or not. */ 1121 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 1122 sense_data_size = SCSI_SENSE_BUFFERSIZE; 1123 else 1124 sense_data_size = sizeof(ei->SenseInfo); 1125 if (ei->SenseLen < sense_data_size) 1126 sense_data_size = ei->SenseLen; 1127 1128 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 1129 scsi_set_resid(cmd, ei->ResidualCnt); 1130 1131 if (ei->CommandStatus == 0) { 1132 cmd->scsi_done(cmd); 1133 cmd_free(h, cp); 1134 return; 1135 } 1136 1137 /* an error has occurred */ 1138 switch (ei->CommandStatus) { 1139 1140 case CMD_TARGET_STATUS: 1141 if (ei->ScsiStatus) { 1142 /* Get sense key */ 1143 sense_key = 0xf & ei->SenseInfo[2]; 1144 /* Get additional sense code */ 1145 asc = ei->SenseInfo[12]; 1146 /* Get addition sense code qualifier */ 1147 ascq = ei->SenseInfo[13]; 1148 } 1149 1150 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1151 if (check_for_unit_attention(h, cp)) { 1152 cmd->result = DID_SOFT_ERROR << 16; 1153 break; 1154 } 1155 if (sense_key == ILLEGAL_REQUEST) { 1156 /* 1157 * SCSI REPORT_LUNS is commonly unsupported on 1158 * Smart Array. Suppress noisy complaint. 1159 */ 1160 if (cp->Request.CDB[0] == REPORT_LUNS) 1161 break; 1162 1163 /* If ASC/ASCQ indicate Logical Unit 1164 * Not Supported condition, 1165 */ 1166 if ((asc == 0x25) && (ascq == 0x0)) { 1167 dev_warn(&h->pdev->dev, "cp %p " 1168 "has check condition\n", cp); 1169 break; 1170 } 1171 } 1172 1173 if (sense_key == NOT_READY) { 1174 /* If Sense is Not Ready, Logical Unit 1175 * Not ready, Manual Intervention 1176 * required 1177 */ 1178 if ((asc == 0x04) && (ascq == 0x03)) { 1179 dev_warn(&h->pdev->dev, "cp %p " 1180 "has check condition: unit " 1181 "not ready, manual " 1182 "intervention required\n", cp); 1183 break; 1184 } 1185 } 1186 if (sense_key == ABORTED_COMMAND) { 1187 /* Aborted command is retryable */ 1188 dev_warn(&h->pdev->dev, "cp %p " 1189 "has check condition: aborted command: " 1190 "ASC: 0x%x, ASCQ: 0x%x\n", 1191 cp, asc, ascq); 1192 cmd->result = DID_SOFT_ERROR << 16; 1193 break; 1194 } 1195 /* Must be some other type of check condition */ 1196 dev_warn(&h->pdev->dev, "cp %p has check condition: " 1197 "unknown type: " 1198 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1199 "Returning result: 0x%x, " 1200 "cmd=[%02x %02x %02x %02x %02x " 1201 "%02x %02x %02x %02x %02x %02x " 1202 "%02x %02x %02x %02x %02x]\n", 1203 cp, sense_key, asc, ascq, 1204 cmd->result, 1205 cmd->cmnd[0], cmd->cmnd[1], 1206 cmd->cmnd[2], cmd->cmnd[3], 1207 cmd->cmnd[4], cmd->cmnd[5], 1208 cmd->cmnd[6], cmd->cmnd[7], 1209 cmd->cmnd[8], cmd->cmnd[9], 1210 cmd->cmnd[10], cmd->cmnd[11], 1211 cmd->cmnd[12], cmd->cmnd[13], 1212 cmd->cmnd[14], cmd->cmnd[15]); 1213 break; 1214 } 1215 1216 1217 /* Problem was not a check condition 1218 * Pass it up to the upper layers... 1219 */ 1220 if (ei->ScsiStatus) { 1221 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1222 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1223 "Returning result: 0x%x\n", 1224 cp, ei->ScsiStatus, 1225 sense_key, asc, ascq, 1226 cmd->result); 1227 } else { /* scsi status is zero??? How??? */ 1228 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1229 "Returning no connection.\n", cp), 1230 1231 /* Ordinarily, this case should never happen, 1232 * but there is a bug in some released firmware 1233 * revisions that allows it to happen if, for 1234 * example, a 4100 backplane loses power and 1235 * the tape drive is in it. We assume that 1236 * it's a fatal error of some kind because we 1237 * can't show that it wasn't. We will make it 1238 * look like selection timeout since that is 1239 * the most common reason for this to occur, 1240 * and it's severe enough. 1241 */ 1242 1243 cmd->result = DID_NO_CONNECT << 16; 1244 } 1245 break; 1246 1247 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1248 break; 1249 case CMD_DATA_OVERRUN: 1250 dev_warn(&h->pdev->dev, "cp %p has" 1251 " completed with data overrun " 1252 "reported\n", cp); 1253 break; 1254 case CMD_INVALID: { 1255 /* print_bytes(cp, sizeof(*cp), 1, 0); 1256 print_cmd(cp); */ 1257 /* We get CMD_INVALID if you address a non-existent device 1258 * instead of a selection timeout (no response). You will 1259 * see this if you yank out a drive, then try to access it. 1260 * This is kind of a shame because it means that any other 1261 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1262 * missing target. */ 1263 cmd->result = DID_NO_CONNECT << 16; 1264 } 1265 break; 1266 case CMD_PROTOCOL_ERR: 1267 dev_warn(&h->pdev->dev, "cp %p has " 1268 "protocol error \n", cp); 1269 break; 1270 case CMD_HARDWARE_ERR: 1271 cmd->result = DID_ERROR << 16; 1272 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1273 break; 1274 case CMD_CONNECTION_LOST: 1275 cmd->result = DID_ERROR << 16; 1276 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1277 break; 1278 case CMD_ABORTED: 1279 cmd->result = DID_ABORT << 16; 1280 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1281 cp, ei->ScsiStatus); 1282 break; 1283 case CMD_ABORT_FAILED: 1284 cmd->result = DID_ERROR << 16; 1285 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1286 break; 1287 case CMD_UNSOLICITED_ABORT: 1288 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 1289 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " 1290 "abort\n", cp); 1291 break; 1292 case CMD_TIMEOUT: 1293 cmd->result = DID_TIME_OUT << 16; 1294 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1295 break; 1296 case CMD_UNABORTABLE: 1297 cmd->result = DID_ERROR << 16; 1298 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1299 break; 1300 default: 1301 cmd->result = DID_ERROR << 16; 1302 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1303 cp, ei->CommandStatus); 1304 } 1305 cmd->scsi_done(cmd); 1306 cmd_free(h, cp); 1307 } 1308 1309 static void hpsa_pci_unmap(struct pci_dev *pdev, 1310 struct CommandList *c, int sg_used, int data_direction) 1311 { 1312 int i; 1313 union u64bit addr64; 1314 1315 for (i = 0; i < sg_used; i++) { 1316 addr64.val32.lower = c->SG[i].Addr.lower; 1317 addr64.val32.upper = c->SG[i].Addr.upper; 1318 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, 1319 data_direction); 1320 } 1321 } 1322 1323 static void hpsa_map_one(struct pci_dev *pdev, 1324 struct CommandList *cp, 1325 unsigned char *buf, 1326 size_t buflen, 1327 int data_direction) 1328 { 1329 u64 addr64; 1330 1331 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1332 cp->Header.SGList = 0; 1333 cp->Header.SGTotal = 0; 1334 return; 1335 } 1336 1337 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); 1338 cp->SG[0].Addr.lower = 1339 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1340 cp->SG[0].Addr.upper = 1341 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1342 cp->SG[0].Len = buflen; 1343 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ 1344 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ 1345 } 1346 1347 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1348 struct CommandList *c) 1349 { 1350 DECLARE_COMPLETION_ONSTACK(wait); 1351 1352 c->waiting = &wait; 1353 enqueue_cmd_and_start_io(h, c); 1354 wait_for_completion(&wait); 1355 } 1356 1357 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 1358 struct CommandList *c) 1359 { 1360 unsigned long flags; 1361 1362 /* If controller lockup detected, fake a hardware error. */ 1363 spin_lock_irqsave(&h->lock, flags); 1364 if (unlikely(h->lockup_detected)) { 1365 spin_unlock_irqrestore(&h->lock, flags); 1366 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 1367 } else { 1368 spin_unlock_irqrestore(&h->lock, flags); 1369 hpsa_scsi_do_simple_cmd_core(h, c); 1370 } 1371 } 1372 1373 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 1374 struct CommandList *c, int data_direction) 1375 { 1376 int retry_count = 0; 1377 1378 do { 1379 memset(c->err_info, 0, sizeof(*c->err_info)); 1380 hpsa_scsi_do_simple_cmd_core(h, c); 1381 retry_count++; 1382 } while (check_for_unit_attention(h, c) && retry_count <= 3); 1383 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 1384 } 1385 1386 static void hpsa_scsi_interpret_error(struct CommandList *cp) 1387 { 1388 struct ErrorInfo *ei; 1389 struct device *d = &cp->h->pdev->dev; 1390 1391 ei = cp->err_info; 1392 switch (ei->CommandStatus) { 1393 case CMD_TARGET_STATUS: 1394 dev_warn(d, "cmd %p has completed with errors\n", cp); 1395 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, 1396 ei->ScsiStatus); 1397 if (ei->ScsiStatus == 0) 1398 dev_warn(d, "SCSI status is abnormally zero. " 1399 "(probably indicates selection timeout " 1400 "reported incorrectly due to a known " 1401 "firmware bug, circa July, 2001.)\n"); 1402 break; 1403 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1404 dev_info(d, "UNDERRUN\n"); 1405 break; 1406 case CMD_DATA_OVERRUN: 1407 dev_warn(d, "cp %p has completed with data overrun\n", cp); 1408 break; 1409 case CMD_INVALID: { 1410 /* controller unfortunately reports SCSI passthru's 1411 * to non-existent targets as invalid commands. 1412 */ 1413 dev_warn(d, "cp %p is reported invalid (probably means " 1414 "target device no longer present)\n", cp); 1415 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); 1416 print_cmd(cp); */ 1417 } 1418 break; 1419 case CMD_PROTOCOL_ERR: 1420 dev_warn(d, "cp %p has protocol error \n", cp); 1421 break; 1422 case CMD_HARDWARE_ERR: 1423 /* cmd->result = DID_ERROR << 16; */ 1424 dev_warn(d, "cp %p had hardware error\n", cp); 1425 break; 1426 case CMD_CONNECTION_LOST: 1427 dev_warn(d, "cp %p had connection lost\n", cp); 1428 break; 1429 case CMD_ABORTED: 1430 dev_warn(d, "cp %p was aborted\n", cp); 1431 break; 1432 case CMD_ABORT_FAILED: 1433 dev_warn(d, "cp %p reports abort failed\n", cp); 1434 break; 1435 case CMD_UNSOLICITED_ABORT: 1436 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); 1437 break; 1438 case CMD_TIMEOUT: 1439 dev_warn(d, "cp %p timed out\n", cp); 1440 break; 1441 case CMD_UNABORTABLE: 1442 dev_warn(d, "Command unabortable\n"); 1443 break; 1444 default: 1445 dev_warn(d, "cp %p returned unknown status %x\n", cp, 1446 ei->CommandStatus); 1447 } 1448 } 1449 1450 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 1451 unsigned char page, unsigned char *buf, 1452 unsigned char bufsize) 1453 { 1454 int rc = IO_OK; 1455 struct CommandList *c; 1456 struct ErrorInfo *ei; 1457 1458 c = cmd_special_alloc(h); 1459 1460 if (c == NULL) { /* trouble... */ 1461 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1462 return -ENOMEM; 1463 } 1464 1465 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); 1466 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1467 ei = c->err_info; 1468 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 1469 hpsa_scsi_interpret_error(c); 1470 rc = -1; 1471 } 1472 cmd_special_free(h, c); 1473 return rc; 1474 } 1475 1476 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) 1477 { 1478 int rc = IO_OK; 1479 struct CommandList *c; 1480 struct ErrorInfo *ei; 1481 1482 c = cmd_special_alloc(h); 1483 1484 if (c == NULL) { /* trouble... */ 1485 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1486 return -ENOMEM; 1487 } 1488 1489 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); 1490 hpsa_scsi_do_simple_cmd_core(h, c); 1491 /* no unmap needed here because no data xfer. */ 1492 1493 ei = c->err_info; 1494 if (ei->CommandStatus != 0) { 1495 hpsa_scsi_interpret_error(c); 1496 rc = -1; 1497 } 1498 cmd_special_free(h, c); 1499 return rc; 1500 } 1501 1502 static void hpsa_get_raid_level(struct ctlr_info *h, 1503 unsigned char *scsi3addr, unsigned char *raid_level) 1504 { 1505 int rc; 1506 unsigned char *buf; 1507 1508 *raid_level = RAID_UNKNOWN; 1509 buf = kzalloc(64, GFP_KERNEL); 1510 if (!buf) 1511 return; 1512 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); 1513 if (rc == 0) 1514 *raid_level = buf[8]; 1515 if (*raid_level > RAID_UNKNOWN) 1516 *raid_level = RAID_UNKNOWN; 1517 kfree(buf); 1518 return; 1519 } 1520 1521 /* Get the device id from inquiry page 0x83 */ 1522 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 1523 unsigned char *device_id, int buflen) 1524 { 1525 int rc; 1526 unsigned char *buf; 1527 1528 if (buflen > 16) 1529 buflen = 16; 1530 buf = kzalloc(64, GFP_KERNEL); 1531 if (!buf) 1532 return -1; 1533 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); 1534 if (rc == 0) 1535 memcpy(device_id, &buf[8], buflen); 1536 kfree(buf); 1537 return rc != 0; 1538 } 1539 1540 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 1541 struct ReportLUNdata *buf, int bufsize, 1542 int extended_response) 1543 { 1544 int rc = IO_OK; 1545 struct CommandList *c; 1546 unsigned char scsi3addr[8]; 1547 struct ErrorInfo *ei; 1548 1549 c = cmd_special_alloc(h); 1550 if (c == NULL) { /* trouble... */ 1551 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1552 return -1; 1553 } 1554 /* address the controller */ 1555 memset(scsi3addr, 0, sizeof(scsi3addr)); 1556 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 1557 buf, bufsize, 0, scsi3addr, TYPE_CMD); 1558 if (extended_response) 1559 c->Request.CDB[1] = extended_response; 1560 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1561 ei = c->err_info; 1562 if (ei->CommandStatus != 0 && 1563 ei->CommandStatus != CMD_DATA_UNDERRUN) { 1564 hpsa_scsi_interpret_error(c); 1565 rc = -1; 1566 } 1567 cmd_special_free(h, c); 1568 return rc; 1569 } 1570 1571 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 1572 struct ReportLUNdata *buf, 1573 int bufsize, int extended_response) 1574 { 1575 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 1576 } 1577 1578 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 1579 struct ReportLUNdata *buf, int bufsize) 1580 { 1581 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 1582 } 1583 1584 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 1585 int bus, int target, int lun) 1586 { 1587 device->bus = bus; 1588 device->target = target; 1589 device->lun = lun; 1590 } 1591 1592 static int hpsa_update_device_info(struct ctlr_info *h, 1593 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 1594 unsigned char *is_OBDR_device) 1595 { 1596 1597 #define OBDR_SIG_OFFSET 43 1598 #define OBDR_TAPE_SIG "$DR-10" 1599 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 1600 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 1601 1602 unsigned char *inq_buff; 1603 unsigned char *obdr_sig; 1604 1605 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1606 if (!inq_buff) 1607 goto bail_out; 1608 1609 /* Do an inquiry to the device to see what it is. */ 1610 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 1611 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 1612 /* Inquiry failed (msg printed already) */ 1613 dev_err(&h->pdev->dev, 1614 "hpsa_update_device_info: inquiry failed\n"); 1615 goto bail_out; 1616 } 1617 1618 this_device->devtype = (inq_buff[0] & 0x1f); 1619 memcpy(this_device->scsi3addr, scsi3addr, 8); 1620 memcpy(this_device->vendor, &inq_buff[8], 1621 sizeof(this_device->vendor)); 1622 memcpy(this_device->model, &inq_buff[16], 1623 sizeof(this_device->model)); 1624 memset(this_device->device_id, 0, 1625 sizeof(this_device->device_id)); 1626 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 1627 sizeof(this_device->device_id)); 1628 1629 if (this_device->devtype == TYPE_DISK && 1630 is_logical_dev_addr_mode(scsi3addr)) 1631 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 1632 else 1633 this_device->raid_level = RAID_UNKNOWN; 1634 1635 if (is_OBDR_device) { 1636 /* See if this is a One-Button-Disaster-Recovery device 1637 * by looking for "$DR-10" at offset 43 in inquiry data. 1638 */ 1639 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 1640 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 1641 strncmp(obdr_sig, OBDR_TAPE_SIG, 1642 OBDR_SIG_LEN) == 0); 1643 } 1644 1645 kfree(inq_buff); 1646 return 0; 1647 1648 bail_out: 1649 kfree(inq_buff); 1650 return 1; 1651 } 1652 1653 static unsigned char *ext_target_model[] = { 1654 "MSA2012", 1655 "MSA2024", 1656 "MSA2312", 1657 "MSA2324", 1658 "P2000 G3 SAS", 1659 NULL, 1660 }; 1661 1662 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 1663 { 1664 int i; 1665 1666 for (i = 0; ext_target_model[i]; i++) 1667 if (strncmp(device->model, ext_target_model[i], 1668 strlen(ext_target_model[i])) == 0) 1669 return 1; 1670 return 0; 1671 } 1672 1673 /* Helper function to assign bus, target, lun mapping of devices. 1674 * Puts non-external target logical volumes on bus 0, external target logical 1675 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 1676 * Logical drive target and lun are assigned at this time, but 1677 * physical device lun and target assignment are deferred (assigned 1678 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 1679 */ 1680 static void figure_bus_target_lun(struct ctlr_info *h, 1681 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) 1682 { 1683 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 1684 1685 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 1686 /* physical device, target and lun filled in later */ 1687 if (is_hba_lunid(lunaddrbytes)) 1688 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff); 1689 else 1690 /* defer target, lun assignment for physical devices */ 1691 hpsa_set_bus_target_lun(device, 2, -1, -1); 1692 return; 1693 } 1694 /* It's a logical device */ 1695 if (is_ext_target(h, device)) { 1696 /* external target way, put logicals on bus 1 1697 * and match target/lun numbers box 1698 * reports, other smart array, bus 0, target 0, match lunid 1699 */ 1700 hpsa_set_bus_target_lun(device, 1701 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff); 1702 return; 1703 } 1704 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff); 1705 } 1706 1707 /* 1708 * If there is no lun 0 on a target, linux won't find any devices. 1709 * For the external targets (arrays), we have to manually detect the enclosure 1710 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 1711 * it for some reason. *tmpdevice is the target we're adding, 1712 * this_device is a pointer into the current element of currentsd[] 1713 * that we're building up in update_scsi_devices(), below. 1714 * lunzerobits is a bitmap that tracks which targets already have a 1715 * lun 0 assigned. 1716 * Returns 1 if an enclosure was added, 0 if not. 1717 */ 1718 static int add_ext_target_dev(struct ctlr_info *h, 1719 struct hpsa_scsi_dev_t *tmpdevice, 1720 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 1721 unsigned long lunzerobits[], int *n_ext_target_devs) 1722 { 1723 unsigned char scsi3addr[8]; 1724 1725 if (test_bit(tmpdevice->target, lunzerobits)) 1726 return 0; /* There is already a lun 0 on this target. */ 1727 1728 if (!is_logical_dev_addr_mode(lunaddrbytes)) 1729 return 0; /* It's the logical targets that may lack lun 0. */ 1730 1731 if (!is_ext_target(h, tmpdevice)) 1732 return 0; /* Only external target devices have this problem. */ 1733 1734 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */ 1735 return 0; 1736 1737 memset(scsi3addr, 0, 8); 1738 scsi3addr[3] = tmpdevice->target; 1739 if (is_hba_lunid(scsi3addr)) 1740 return 0; /* Don't add the RAID controller here. */ 1741 1742 if (is_scsi_rev_5(h)) 1743 return 0; /* p1210m doesn't need to do this. */ 1744 1745 if (*n_ext_target_devs >= MAX_EXT_TARGETS) { 1746 dev_warn(&h->pdev->dev, "Maximum number of external " 1747 "target devices exceeded. Check your hardware " 1748 "configuration."); 1749 return 0; 1750 } 1751 1752 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 1753 return 0; 1754 (*n_ext_target_devs)++; 1755 hpsa_set_bus_target_lun(this_device, 1756 tmpdevice->bus, tmpdevice->target, 0); 1757 set_bit(tmpdevice->target, lunzerobits); 1758 return 1; 1759 } 1760 1761 /* 1762 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 1763 * logdev. The number of luns in physdev and logdev are returned in 1764 * *nphysicals and *nlogicals, respectively. 1765 * Returns 0 on success, -1 otherwise. 1766 */ 1767 static int hpsa_gather_lun_info(struct ctlr_info *h, 1768 int reportlunsize, 1769 struct ReportLUNdata *physdev, u32 *nphysicals, 1770 struct ReportLUNdata *logdev, u32 *nlogicals) 1771 { 1772 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { 1773 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 1774 return -1; 1775 } 1776 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; 1777 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 1778 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 1779 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1780 *nphysicals - HPSA_MAX_PHYS_LUN); 1781 *nphysicals = HPSA_MAX_PHYS_LUN; 1782 } 1783 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { 1784 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 1785 return -1; 1786 } 1787 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 1788 /* Reject Logicals in excess of our max capability. */ 1789 if (*nlogicals > HPSA_MAX_LUN) { 1790 dev_warn(&h->pdev->dev, 1791 "maximum logical LUNs (%d) exceeded. " 1792 "%d LUNs ignored.\n", HPSA_MAX_LUN, 1793 *nlogicals - HPSA_MAX_LUN); 1794 *nlogicals = HPSA_MAX_LUN; 1795 } 1796 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 1797 dev_warn(&h->pdev->dev, 1798 "maximum logical + physical LUNs (%d) exceeded. " 1799 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1800 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 1801 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 1802 } 1803 return 0; 1804 } 1805 1806 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 1807 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, 1808 struct ReportLUNdata *logdev_list) 1809 { 1810 /* Helper function, figure out where the LUN ID info is coming from 1811 * given index i, lists of physical and logical devices, where in 1812 * the list the raid controller is supposed to appear (first or last) 1813 */ 1814 1815 int logicals_start = nphysicals + (raid_ctlr_position == 0); 1816 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 1817 1818 if (i == raid_ctlr_position) 1819 return RAID_CTLR_LUNID; 1820 1821 if (i < logicals_start) 1822 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 1823 1824 if (i < last_device) 1825 return &logdev_list->LUN[i - nphysicals - 1826 (raid_ctlr_position == 0)][0]; 1827 BUG(); 1828 return NULL; 1829 } 1830 1831 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 1832 { 1833 /* the idea here is we could get notified 1834 * that some devices have changed, so we do a report 1835 * physical luns and report logical luns cmd, and adjust 1836 * our list of devices accordingly. 1837 * 1838 * The scsi3addr's of devices won't change so long as the 1839 * adapter is not reset. That means we can rescan and 1840 * tell which devices we already know about, vs. new 1841 * devices, vs. disappearing devices. 1842 */ 1843 struct ReportLUNdata *physdev_list = NULL; 1844 struct ReportLUNdata *logdev_list = NULL; 1845 u32 nphysicals = 0; 1846 u32 nlogicals = 0; 1847 u32 ndev_allocated = 0; 1848 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 1849 int ncurrent = 0; 1850 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; 1851 int i, n_ext_target_devs, ndevs_to_allocate; 1852 int raid_ctlr_position; 1853 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 1854 1855 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 1856 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1857 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1858 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 1859 1860 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 1861 dev_err(&h->pdev->dev, "out of memory\n"); 1862 goto out; 1863 } 1864 memset(lunzerobits, 0, sizeof(lunzerobits)); 1865 1866 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, 1867 logdev_list, &nlogicals)) 1868 goto out; 1869 1870 /* We might see up to the maximum number of logical and physical disks 1871 * plus external target devices, and a device for the local RAID 1872 * controller. 1873 */ 1874 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; 1875 1876 /* Allocate the per device structures */ 1877 for (i = 0; i < ndevs_to_allocate; i++) { 1878 if (i >= HPSA_MAX_DEVICES) { 1879 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." 1880 " %d devices ignored.\n", HPSA_MAX_DEVICES, 1881 ndevs_to_allocate - HPSA_MAX_DEVICES); 1882 break; 1883 } 1884 1885 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 1886 if (!currentsd[i]) { 1887 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 1888 __FILE__, __LINE__); 1889 goto out; 1890 } 1891 ndev_allocated++; 1892 } 1893 1894 if (unlikely(is_scsi_rev_5(h))) 1895 raid_ctlr_position = 0; 1896 else 1897 raid_ctlr_position = nphysicals + nlogicals; 1898 1899 /* adjust our table of devices */ 1900 n_ext_target_devs = 0; 1901 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1902 u8 *lunaddrbytes, is_OBDR = 0; 1903 1904 /* Figure out where the LUN ID info is coming from */ 1905 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 1906 i, nphysicals, nlogicals, physdev_list, logdev_list); 1907 /* skip masked physical devices. */ 1908 if (lunaddrbytes[3] & 0xC0 && 1909 i < nphysicals + (raid_ctlr_position == 0)) 1910 continue; 1911 1912 /* Get device type, vendor, model, device id */ 1913 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 1914 &is_OBDR)) 1915 continue; /* skip it if we can't talk to it. */ 1916 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 1917 this_device = currentsd[ncurrent]; 1918 1919 /* 1920 * For external target devices, we have to insert a LUN 0 which 1921 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 1922 * is nonetheless an enclosure device there. We have to 1923 * present that otherwise linux won't find anything if 1924 * there is no lun 0. 1925 */ 1926 if (add_ext_target_dev(h, tmpdevice, this_device, 1927 lunaddrbytes, lunzerobits, 1928 &n_ext_target_devs)) { 1929 ncurrent++; 1930 this_device = currentsd[ncurrent]; 1931 } 1932 1933 *this_device = *tmpdevice; 1934 1935 switch (this_device->devtype) { 1936 case TYPE_ROM: 1937 /* We don't *really* support actual CD-ROM devices, 1938 * just "One Button Disaster Recovery" tape drive 1939 * which temporarily pretends to be a CD-ROM drive. 1940 * So we check that the device is really an OBDR tape 1941 * device by checking for "$DR-10" in bytes 43-48 of 1942 * the inquiry data. 1943 */ 1944 if (is_OBDR) 1945 ncurrent++; 1946 break; 1947 case TYPE_DISK: 1948 if (i < nphysicals) 1949 break; 1950 ncurrent++; 1951 break; 1952 case TYPE_TAPE: 1953 case TYPE_MEDIUM_CHANGER: 1954 ncurrent++; 1955 break; 1956 case TYPE_RAID: 1957 /* Only present the Smartarray HBA as a RAID controller. 1958 * If it's a RAID controller other than the HBA itself 1959 * (an external RAID controller, MSA500 or similar) 1960 * don't present it. 1961 */ 1962 if (!is_hba_lunid(lunaddrbytes)) 1963 break; 1964 ncurrent++; 1965 break; 1966 default: 1967 break; 1968 } 1969 if (ncurrent >= HPSA_MAX_DEVICES) 1970 break; 1971 } 1972 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 1973 out: 1974 kfree(tmpdevice); 1975 for (i = 0; i < ndev_allocated; i++) 1976 kfree(currentsd[i]); 1977 kfree(currentsd); 1978 kfree(physdev_list); 1979 kfree(logdev_list); 1980 } 1981 1982 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 1983 * dma mapping and fills in the scatter gather entries of the 1984 * hpsa command, cp. 1985 */ 1986 static int hpsa_scatter_gather(struct ctlr_info *h, 1987 struct CommandList *cp, 1988 struct scsi_cmnd *cmd) 1989 { 1990 unsigned int len; 1991 struct scatterlist *sg; 1992 u64 addr64; 1993 int use_sg, i, sg_index, chained; 1994 struct SGDescriptor *curr_sg; 1995 1996 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 1997 1998 use_sg = scsi_dma_map(cmd); 1999 if (use_sg < 0) 2000 return use_sg; 2001 2002 if (!use_sg) 2003 goto sglist_finished; 2004 2005 curr_sg = cp->SG; 2006 chained = 0; 2007 sg_index = 0; 2008 scsi_for_each_sg(cmd, sg, use_sg, i) { 2009 if (i == h->max_cmd_sg_entries - 1 && 2010 use_sg > h->max_cmd_sg_entries) { 2011 chained = 1; 2012 curr_sg = h->cmd_sg_list[cp->cmdindex]; 2013 sg_index = 0; 2014 } 2015 addr64 = (u64) sg_dma_address(sg); 2016 len = sg_dma_len(sg); 2017 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 2018 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 2019 curr_sg->Len = len; 2020 curr_sg->Ext = 0; /* we are not chaining */ 2021 curr_sg++; 2022 } 2023 2024 if (use_sg + chained > h->maxSG) 2025 h->maxSG = use_sg + chained; 2026 2027 if (chained) { 2028 cp->Header.SGList = h->max_cmd_sg_entries; 2029 cp->Header.SGTotal = (u16) (use_sg + 1); 2030 hpsa_map_sg_chain_block(h, cp); 2031 return 0; 2032 } 2033 2034 sglist_finished: 2035 2036 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 2037 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ 2038 return 0; 2039 } 2040 2041 2042 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 2043 void (*done)(struct scsi_cmnd *)) 2044 { 2045 struct ctlr_info *h; 2046 struct hpsa_scsi_dev_t *dev; 2047 unsigned char scsi3addr[8]; 2048 struct CommandList *c; 2049 unsigned long flags; 2050 2051 /* Get the ptr to our adapter structure out of cmd->host. */ 2052 h = sdev_to_hba(cmd->device); 2053 dev = cmd->device->hostdata; 2054 if (!dev) { 2055 cmd->result = DID_NO_CONNECT << 16; 2056 done(cmd); 2057 return 0; 2058 } 2059 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 2060 2061 spin_lock_irqsave(&h->lock, flags); 2062 if (unlikely(h->lockup_detected)) { 2063 spin_unlock_irqrestore(&h->lock, flags); 2064 cmd->result = DID_ERROR << 16; 2065 done(cmd); 2066 return 0; 2067 } 2068 /* Need a lock as this is being allocated from the pool */ 2069 c = cmd_alloc(h); 2070 spin_unlock_irqrestore(&h->lock, flags); 2071 if (c == NULL) { /* trouble... */ 2072 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2073 return SCSI_MLQUEUE_HOST_BUSY; 2074 } 2075 2076 /* Fill in the command list header */ 2077 2078 cmd->scsi_done = done; /* save this for use by completion code */ 2079 2080 /* save c in case we have to abort it */ 2081 cmd->host_scribble = (unsigned char *) c; 2082 2083 c->cmd_type = CMD_SCSI; 2084 c->scsi_cmd = cmd; 2085 c->Header.ReplyQueue = 0; /* unused in simple mode */ 2086 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 2087 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 2088 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; 2089 2090 /* Fill in the request block... */ 2091 2092 c->Request.Timeout = 0; 2093 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 2094 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 2095 c->Request.CDBLen = cmd->cmd_len; 2096 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 2097 c->Request.Type.Type = TYPE_CMD; 2098 c->Request.Type.Attribute = ATTR_SIMPLE; 2099 switch (cmd->sc_data_direction) { 2100 case DMA_TO_DEVICE: 2101 c->Request.Type.Direction = XFER_WRITE; 2102 break; 2103 case DMA_FROM_DEVICE: 2104 c->Request.Type.Direction = XFER_READ; 2105 break; 2106 case DMA_NONE: 2107 c->Request.Type.Direction = XFER_NONE; 2108 break; 2109 case DMA_BIDIRECTIONAL: 2110 /* This can happen if a buggy application does a scsi passthru 2111 * and sets both inlen and outlen to non-zero. ( see 2112 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 2113 */ 2114 2115 c->Request.Type.Direction = XFER_RSVD; 2116 /* This is technically wrong, and hpsa controllers should 2117 * reject it with CMD_INVALID, which is the most correct 2118 * response, but non-fibre backends appear to let it 2119 * slide by, and give the same results as if this field 2120 * were set correctly. Either way is acceptable for 2121 * our purposes here. 2122 */ 2123 2124 break; 2125 2126 default: 2127 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 2128 cmd->sc_data_direction); 2129 BUG(); 2130 break; 2131 } 2132 2133 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 2134 cmd_free(h, c); 2135 return SCSI_MLQUEUE_HOST_BUSY; 2136 } 2137 enqueue_cmd_and_start_io(h, c); 2138 /* the cmd'll come back via intr handler in complete_scsi_command() */ 2139 return 0; 2140 } 2141 2142 static DEF_SCSI_QCMD(hpsa_scsi_queue_command) 2143 2144 static void hpsa_scan_start(struct Scsi_Host *sh) 2145 { 2146 struct ctlr_info *h = shost_to_hba(sh); 2147 unsigned long flags; 2148 2149 /* wait until any scan already in progress is finished. */ 2150 while (1) { 2151 spin_lock_irqsave(&h->scan_lock, flags); 2152 if (h->scan_finished) 2153 break; 2154 spin_unlock_irqrestore(&h->scan_lock, flags); 2155 wait_event(h->scan_wait_queue, h->scan_finished); 2156 /* Note: We don't need to worry about a race between this 2157 * thread and driver unload because the midlayer will 2158 * have incremented the reference count, so unload won't 2159 * happen if we're in here. 2160 */ 2161 } 2162 h->scan_finished = 0; /* mark scan as in progress */ 2163 spin_unlock_irqrestore(&h->scan_lock, flags); 2164 2165 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 2166 2167 spin_lock_irqsave(&h->scan_lock, flags); 2168 h->scan_finished = 1; /* mark scan as finished. */ 2169 wake_up_all(&h->scan_wait_queue); 2170 spin_unlock_irqrestore(&h->scan_lock, flags); 2171 } 2172 2173 static int hpsa_scan_finished(struct Scsi_Host *sh, 2174 unsigned long elapsed_time) 2175 { 2176 struct ctlr_info *h = shost_to_hba(sh); 2177 unsigned long flags; 2178 int finished; 2179 2180 spin_lock_irqsave(&h->scan_lock, flags); 2181 finished = h->scan_finished; 2182 spin_unlock_irqrestore(&h->scan_lock, flags); 2183 return finished; 2184 } 2185 2186 static int hpsa_change_queue_depth(struct scsi_device *sdev, 2187 int qdepth, int reason) 2188 { 2189 struct ctlr_info *h = sdev_to_hba(sdev); 2190 2191 if (reason != SCSI_QDEPTH_DEFAULT) 2192 return -ENOTSUPP; 2193 2194 if (qdepth < 1) 2195 qdepth = 1; 2196 else 2197 if (qdepth > h->nr_cmds) 2198 qdepth = h->nr_cmds; 2199 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2200 return sdev->queue_depth; 2201 } 2202 2203 static void hpsa_unregister_scsi(struct ctlr_info *h) 2204 { 2205 /* we are being forcibly unloaded, and may not refuse. */ 2206 scsi_remove_host(h->scsi_host); 2207 scsi_host_put(h->scsi_host); 2208 h->scsi_host = NULL; 2209 } 2210 2211 static int hpsa_register_scsi(struct ctlr_info *h) 2212 { 2213 struct Scsi_Host *sh; 2214 int error; 2215 2216 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 2217 if (sh == NULL) 2218 goto fail; 2219 2220 sh->io_port = 0; 2221 sh->n_io_port = 0; 2222 sh->this_id = -1; 2223 sh->max_channel = 3; 2224 sh->max_cmd_len = MAX_COMMAND_SIZE; 2225 sh->max_lun = HPSA_MAX_LUN; 2226 sh->max_id = HPSA_MAX_LUN; 2227 sh->can_queue = h->nr_cmds; 2228 sh->cmd_per_lun = h->nr_cmds; 2229 sh->sg_tablesize = h->maxsgentries; 2230 h->scsi_host = sh; 2231 sh->hostdata[0] = (unsigned long) h; 2232 sh->irq = h->intr[h->intr_mode]; 2233 sh->unique_id = sh->irq; 2234 error = scsi_add_host(sh, &h->pdev->dev); 2235 if (error) 2236 goto fail_host_put; 2237 scsi_scan_host(sh); 2238 return 0; 2239 2240 fail_host_put: 2241 dev_err(&h->pdev->dev, "%s: scsi_add_host" 2242 " failed for controller %d\n", __func__, h->ctlr); 2243 scsi_host_put(sh); 2244 return error; 2245 fail: 2246 dev_err(&h->pdev->dev, "%s: scsi_host_alloc" 2247 " failed for controller %d\n", __func__, h->ctlr); 2248 return -ENOMEM; 2249 } 2250 2251 static int wait_for_device_to_become_ready(struct ctlr_info *h, 2252 unsigned char lunaddr[]) 2253 { 2254 int rc = 0; 2255 int count = 0; 2256 int waittime = 1; /* seconds */ 2257 struct CommandList *c; 2258 2259 c = cmd_special_alloc(h); 2260 if (!c) { 2261 dev_warn(&h->pdev->dev, "out of memory in " 2262 "wait_for_device_to_become_ready.\n"); 2263 return IO_ERROR; 2264 } 2265 2266 /* Send test unit ready until device ready, or give up. */ 2267 while (count < HPSA_TUR_RETRY_LIMIT) { 2268 2269 /* Wait for a bit. do this first, because if we send 2270 * the TUR right away, the reset will just abort it. 2271 */ 2272 msleep(1000 * waittime); 2273 count++; 2274 2275 /* Increase wait time with each try, up to a point. */ 2276 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 2277 waittime = waittime * 2; 2278 2279 /* Send the Test Unit Ready */ 2280 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); 2281 hpsa_scsi_do_simple_cmd_core(h, c); 2282 /* no unmap needed here because no data xfer. */ 2283 2284 if (c->err_info->CommandStatus == CMD_SUCCESS) 2285 break; 2286 2287 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 2288 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 2289 (c->err_info->SenseInfo[2] == NO_SENSE || 2290 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 2291 break; 2292 2293 dev_warn(&h->pdev->dev, "waiting %d secs " 2294 "for device to become ready.\n", waittime); 2295 rc = 1; /* device not ready. */ 2296 } 2297 2298 if (rc) 2299 dev_warn(&h->pdev->dev, "giving up on device.\n"); 2300 else 2301 dev_warn(&h->pdev->dev, "device is ready.\n"); 2302 2303 cmd_special_free(h, c); 2304 return rc; 2305 } 2306 2307 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 2308 * complaining. Doing a host- or bus-reset can't do anything good here. 2309 */ 2310 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 2311 { 2312 int rc; 2313 struct ctlr_info *h; 2314 struct hpsa_scsi_dev_t *dev; 2315 2316 /* find the controller to which the command to be aborted was sent */ 2317 h = sdev_to_hba(scsicmd->device); 2318 if (h == NULL) /* paranoia */ 2319 return FAILED; 2320 dev = scsicmd->device->hostdata; 2321 if (!dev) { 2322 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 2323 "device lookup failed.\n"); 2324 return FAILED; 2325 } 2326 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 2327 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 2328 /* send a reset to the SCSI LUN which the command was sent to */ 2329 rc = hpsa_send_reset(h, dev->scsi3addr); 2330 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 2331 return SUCCESS; 2332 2333 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 2334 return FAILED; 2335 } 2336 2337 /* 2338 * For operations that cannot sleep, a command block is allocated at init, 2339 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 2340 * which ones are free or in use. Lock must be held when calling this. 2341 * cmd_free() is the complement. 2342 */ 2343 static struct CommandList *cmd_alloc(struct ctlr_info *h) 2344 { 2345 struct CommandList *c; 2346 int i; 2347 union u64bit temp64; 2348 dma_addr_t cmd_dma_handle, err_dma_handle; 2349 2350 do { 2351 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 2352 if (i == h->nr_cmds) 2353 return NULL; 2354 } while (test_and_set_bit 2355 (i & (BITS_PER_LONG - 1), 2356 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 2357 c = h->cmd_pool + i; 2358 memset(c, 0, sizeof(*c)); 2359 cmd_dma_handle = h->cmd_pool_dhandle 2360 + i * sizeof(*c); 2361 c->err_info = h->errinfo_pool + i; 2362 memset(c->err_info, 0, sizeof(*c->err_info)); 2363 err_dma_handle = h->errinfo_pool_dhandle 2364 + i * sizeof(*c->err_info); 2365 h->nr_allocs++; 2366 2367 c->cmdindex = i; 2368 2369 INIT_LIST_HEAD(&c->list); 2370 c->busaddr = (u32) cmd_dma_handle; 2371 temp64.val = (u64) err_dma_handle; 2372 c->ErrDesc.Addr.lower = temp64.val32.lower; 2373 c->ErrDesc.Addr.upper = temp64.val32.upper; 2374 c->ErrDesc.Len = sizeof(*c->err_info); 2375 2376 c->h = h; 2377 return c; 2378 } 2379 2380 /* For operations that can wait for kmalloc to possibly sleep, 2381 * this routine can be called. Lock need not be held to call 2382 * cmd_special_alloc. cmd_special_free() is the complement. 2383 */ 2384 static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 2385 { 2386 struct CommandList *c; 2387 union u64bit temp64; 2388 dma_addr_t cmd_dma_handle, err_dma_handle; 2389 2390 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 2391 if (c == NULL) 2392 return NULL; 2393 memset(c, 0, sizeof(*c)); 2394 2395 c->cmdindex = -1; 2396 2397 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), 2398 &err_dma_handle); 2399 2400 if (c->err_info == NULL) { 2401 pci_free_consistent(h->pdev, 2402 sizeof(*c), c, cmd_dma_handle); 2403 return NULL; 2404 } 2405 memset(c->err_info, 0, sizeof(*c->err_info)); 2406 2407 INIT_LIST_HEAD(&c->list); 2408 c->busaddr = (u32) cmd_dma_handle; 2409 temp64.val = (u64) err_dma_handle; 2410 c->ErrDesc.Addr.lower = temp64.val32.lower; 2411 c->ErrDesc.Addr.upper = temp64.val32.upper; 2412 c->ErrDesc.Len = sizeof(*c->err_info); 2413 2414 c->h = h; 2415 return c; 2416 } 2417 2418 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 2419 { 2420 int i; 2421 2422 i = c - h->cmd_pool; 2423 clear_bit(i & (BITS_PER_LONG - 1), 2424 h->cmd_pool_bits + (i / BITS_PER_LONG)); 2425 h->nr_frees++; 2426 } 2427 2428 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 2429 { 2430 union u64bit temp64; 2431 2432 temp64.val32.lower = c->ErrDesc.Addr.lower; 2433 temp64.val32.upper = c->ErrDesc.Addr.upper; 2434 pci_free_consistent(h->pdev, sizeof(*c->err_info), 2435 c->err_info, (dma_addr_t) temp64.val); 2436 pci_free_consistent(h->pdev, sizeof(*c), 2437 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 2438 } 2439 2440 #ifdef CONFIG_COMPAT 2441 2442 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 2443 { 2444 IOCTL32_Command_struct __user *arg32 = 2445 (IOCTL32_Command_struct __user *) arg; 2446 IOCTL_Command_struct arg64; 2447 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 2448 int err; 2449 u32 cp; 2450 2451 memset(&arg64, 0, sizeof(arg64)); 2452 err = 0; 2453 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2454 sizeof(arg64.LUN_info)); 2455 err |= copy_from_user(&arg64.Request, &arg32->Request, 2456 sizeof(arg64.Request)); 2457 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 2458 sizeof(arg64.error_info)); 2459 err |= get_user(arg64.buf_size, &arg32->buf_size); 2460 err |= get_user(cp, &arg32->buf); 2461 arg64.buf = compat_ptr(cp); 2462 err |= copy_to_user(p, &arg64, sizeof(arg64)); 2463 2464 if (err) 2465 return -EFAULT; 2466 2467 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); 2468 if (err) 2469 return err; 2470 err |= copy_in_user(&arg32->error_info, &p->error_info, 2471 sizeof(arg32->error_info)); 2472 if (err) 2473 return -EFAULT; 2474 return err; 2475 } 2476 2477 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 2478 int cmd, void *arg) 2479 { 2480 BIG_IOCTL32_Command_struct __user *arg32 = 2481 (BIG_IOCTL32_Command_struct __user *) arg; 2482 BIG_IOCTL_Command_struct arg64; 2483 BIG_IOCTL_Command_struct __user *p = 2484 compat_alloc_user_space(sizeof(arg64)); 2485 int err; 2486 u32 cp; 2487 2488 memset(&arg64, 0, sizeof(arg64)); 2489 err = 0; 2490 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2491 sizeof(arg64.LUN_info)); 2492 err |= copy_from_user(&arg64.Request, &arg32->Request, 2493 sizeof(arg64.Request)); 2494 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 2495 sizeof(arg64.error_info)); 2496 err |= get_user(arg64.buf_size, &arg32->buf_size); 2497 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 2498 err |= get_user(cp, &arg32->buf); 2499 arg64.buf = compat_ptr(cp); 2500 err |= copy_to_user(p, &arg64, sizeof(arg64)); 2501 2502 if (err) 2503 return -EFAULT; 2504 2505 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 2506 if (err) 2507 return err; 2508 err |= copy_in_user(&arg32->error_info, &p->error_info, 2509 sizeof(arg32->error_info)); 2510 if (err) 2511 return -EFAULT; 2512 return err; 2513 } 2514 2515 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) 2516 { 2517 switch (cmd) { 2518 case CCISS_GETPCIINFO: 2519 case CCISS_GETINTINFO: 2520 case CCISS_SETINTINFO: 2521 case CCISS_GETNODENAME: 2522 case CCISS_SETNODENAME: 2523 case CCISS_GETHEARTBEAT: 2524 case CCISS_GETBUSTYPES: 2525 case CCISS_GETFIRMVER: 2526 case CCISS_GETDRIVVER: 2527 case CCISS_REVALIDVOLS: 2528 case CCISS_DEREGDISK: 2529 case CCISS_REGNEWDISK: 2530 case CCISS_REGNEWD: 2531 case CCISS_RESCANDISK: 2532 case CCISS_GETLUNINFO: 2533 return hpsa_ioctl(dev, cmd, arg); 2534 2535 case CCISS_PASSTHRU32: 2536 return hpsa_ioctl32_passthru(dev, cmd, arg); 2537 case CCISS_BIG_PASSTHRU32: 2538 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 2539 2540 default: 2541 return -ENOIOCTLCMD; 2542 } 2543 } 2544 #endif 2545 2546 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 2547 { 2548 struct hpsa_pci_info pciinfo; 2549 2550 if (!argp) 2551 return -EINVAL; 2552 pciinfo.domain = pci_domain_nr(h->pdev->bus); 2553 pciinfo.bus = h->pdev->bus->number; 2554 pciinfo.dev_fn = h->pdev->devfn; 2555 pciinfo.board_id = h->board_id; 2556 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 2557 return -EFAULT; 2558 return 0; 2559 } 2560 2561 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 2562 { 2563 DriverVer_type DriverVer; 2564 unsigned char vmaj, vmin, vsubmin; 2565 int rc; 2566 2567 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 2568 &vmaj, &vmin, &vsubmin); 2569 if (rc != 3) { 2570 dev_info(&h->pdev->dev, "driver version string '%s' " 2571 "unrecognized.", HPSA_DRIVER_VERSION); 2572 vmaj = 0; 2573 vmin = 0; 2574 vsubmin = 0; 2575 } 2576 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 2577 if (!argp) 2578 return -EINVAL; 2579 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 2580 return -EFAULT; 2581 return 0; 2582 } 2583 2584 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 2585 { 2586 IOCTL_Command_struct iocommand; 2587 struct CommandList *c; 2588 char *buff = NULL; 2589 union u64bit temp64; 2590 2591 if (!argp) 2592 return -EINVAL; 2593 if (!capable(CAP_SYS_RAWIO)) 2594 return -EPERM; 2595 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 2596 return -EFAULT; 2597 if ((iocommand.buf_size < 1) && 2598 (iocommand.Request.Type.Direction != XFER_NONE)) { 2599 return -EINVAL; 2600 } 2601 if (iocommand.buf_size > 0) { 2602 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 2603 if (buff == NULL) 2604 return -EFAULT; 2605 if (iocommand.Request.Type.Direction == XFER_WRITE) { 2606 /* Copy the data into the buffer we created */ 2607 if (copy_from_user(buff, iocommand.buf, 2608 iocommand.buf_size)) { 2609 kfree(buff); 2610 return -EFAULT; 2611 } 2612 } else { 2613 memset(buff, 0, iocommand.buf_size); 2614 } 2615 } 2616 c = cmd_special_alloc(h); 2617 if (c == NULL) { 2618 kfree(buff); 2619 return -ENOMEM; 2620 } 2621 /* Fill in the command type */ 2622 c->cmd_type = CMD_IOCTL_PEND; 2623 /* Fill in Command Header */ 2624 c->Header.ReplyQueue = 0; /* unused in simple mode */ 2625 if (iocommand.buf_size > 0) { /* buffer to fill */ 2626 c->Header.SGList = 1; 2627 c->Header.SGTotal = 1; 2628 } else { /* no buffers to fill */ 2629 c->Header.SGList = 0; 2630 c->Header.SGTotal = 0; 2631 } 2632 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 2633 /* use the kernel address the cmd block for tag */ 2634 c->Header.Tag.lower = c->busaddr; 2635 2636 /* Fill in Request block */ 2637 memcpy(&c->Request, &iocommand.Request, 2638 sizeof(c->Request)); 2639 2640 /* Fill in the scatter gather information */ 2641 if (iocommand.buf_size > 0) { 2642 temp64.val = pci_map_single(h->pdev, buff, 2643 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 2644 c->SG[0].Addr.lower = temp64.val32.lower; 2645 c->SG[0].Addr.upper = temp64.val32.upper; 2646 c->SG[0].Len = iocommand.buf_size; 2647 c->SG[0].Ext = 0; /* we are not chaining*/ 2648 } 2649 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 2650 if (iocommand.buf_size > 0) 2651 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 2652 check_ioctl_unit_attention(h, c); 2653 2654 /* Copy the error information out */ 2655 memcpy(&iocommand.error_info, c->err_info, 2656 sizeof(iocommand.error_info)); 2657 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 2658 kfree(buff); 2659 cmd_special_free(h, c); 2660 return -EFAULT; 2661 } 2662 if (iocommand.Request.Type.Direction == XFER_READ && 2663 iocommand.buf_size > 0) { 2664 /* Copy the data out of the buffer we created */ 2665 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 2666 kfree(buff); 2667 cmd_special_free(h, c); 2668 return -EFAULT; 2669 } 2670 } 2671 kfree(buff); 2672 cmd_special_free(h, c); 2673 return 0; 2674 } 2675 2676 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 2677 { 2678 BIG_IOCTL_Command_struct *ioc; 2679 struct CommandList *c; 2680 unsigned char **buff = NULL; 2681 int *buff_size = NULL; 2682 union u64bit temp64; 2683 BYTE sg_used = 0; 2684 int status = 0; 2685 int i; 2686 u32 left; 2687 u32 sz; 2688 BYTE __user *data_ptr; 2689 2690 if (!argp) 2691 return -EINVAL; 2692 if (!capable(CAP_SYS_RAWIO)) 2693 return -EPERM; 2694 ioc = (BIG_IOCTL_Command_struct *) 2695 kmalloc(sizeof(*ioc), GFP_KERNEL); 2696 if (!ioc) { 2697 status = -ENOMEM; 2698 goto cleanup1; 2699 } 2700 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 2701 status = -EFAULT; 2702 goto cleanup1; 2703 } 2704 if ((ioc->buf_size < 1) && 2705 (ioc->Request.Type.Direction != XFER_NONE)) { 2706 status = -EINVAL; 2707 goto cleanup1; 2708 } 2709 /* Check kmalloc limits using all SGs */ 2710 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 2711 status = -EINVAL; 2712 goto cleanup1; 2713 } 2714 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { 2715 status = -EINVAL; 2716 goto cleanup1; 2717 } 2718 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL); 2719 if (!buff) { 2720 status = -ENOMEM; 2721 goto cleanup1; 2722 } 2723 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL); 2724 if (!buff_size) { 2725 status = -ENOMEM; 2726 goto cleanup1; 2727 } 2728 left = ioc->buf_size; 2729 data_ptr = ioc->buf; 2730 while (left) { 2731 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 2732 buff_size[sg_used] = sz; 2733 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 2734 if (buff[sg_used] == NULL) { 2735 status = -ENOMEM; 2736 goto cleanup1; 2737 } 2738 if (ioc->Request.Type.Direction == XFER_WRITE) { 2739 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 2740 status = -ENOMEM; 2741 goto cleanup1; 2742 } 2743 } else 2744 memset(buff[sg_used], 0, sz); 2745 left -= sz; 2746 data_ptr += sz; 2747 sg_used++; 2748 } 2749 c = cmd_special_alloc(h); 2750 if (c == NULL) { 2751 status = -ENOMEM; 2752 goto cleanup1; 2753 } 2754 c->cmd_type = CMD_IOCTL_PEND; 2755 c->Header.ReplyQueue = 0; 2756 c->Header.SGList = c->Header.SGTotal = sg_used; 2757 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 2758 c->Header.Tag.lower = c->busaddr; 2759 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 2760 if (ioc->buf_size > 0) { 2761 int i; 2762 for (i = 0; i < sg_used; i++) { 2763 temp64.val = pci_map_single(h->pdev, buff[i], 2764 buff_size[i], PCI_DMA_BIDIRECTIONAL); 2765 c->SG[i].Addr.lower = temp64.val32.lower; 2766 c->SG[i].Addr.upper = temp64.val32.upper; 2767 c->SG[i].Len = buff_size[i]; 2768 /* we are not chaining */ 2769 c->SG[i].Ext = 0; 2770 } 2771 } 2772 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); 2773 if (sg_used) 2774 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 2775 check_ioctl_unit_attention(h, c); 2776 /* Copy the error information out */ 2777 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 2778 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 2779 cmd_special_free(h, c); 2780 status = -EFAULT; 2781 goto cleanup1; 2782 } 2783 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { 2784 /* Copy the data out of the buffer we created */ 2785 BYTE __user *ptr = ioc->buf; 2786 for (i = 0; i < sg_used; i++) { 2787 if (copy_to_user(ptr, buff[i], buff_size[i])) { 2788 cmd_special_free(h, c); 2789 status = -EFAULT; 2790 goto cleanup1; 2791 } 2792 ptr += buff_size[i]; 2793 } 2794 } 2795 cmd_special_free(h, c); 2796 status = 0; 2797 cleanup1: 2798 if (buff) { 2799 for (i = 0; i < sg_used; i++) 2800 kfree(buff[i]); 2801 kfree(buff); 2802 } 2803 kfree(buff_size); 2804 kfree(ioc); 2805 return status; 2806 } 2807 2808 static void check_ioctl_unit_attention(struct ctlr_info *h, 2809 struct CommandList *c) 2810 { 2811 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 2812 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 2813 (void) check_for_unit_attention(h, c); 2814 } 2815 /* 2816 * ioctl 2817 */ 2818 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) 2819 { 2820 struct ctlr_info *h; 2821 void __user *argp = (void __user *)arg; 2822 2823 h = sdev_to_hba(dev); 2824 2825 switch (cmd) { 2826 case CCISS_DEREGDISK: 2827 case CCISS_REGNEWDISK: 2828 case CCISS_REGNEWD: 2829 hpsa_scan_start(h->scsi_host); 2830 return 0; 2831 case CCISS_GETPCIINFO: 2832 return hpsa_getpciinfo_ioctl(h, argp); 2833 case CCISS_GETDRIVVER: 2834 return hpsa_getdrivver_ioctl(h, argp); 2835 case CCISS_PASSTHRU: 2836 return hpsa_passthru_ioctl(h, argp); 2837 case CCISS_BIG_PASSTHRU: 2838 return hpsa_big_passthru_ioctl(h, argp); 2839 default: 2840 return -ENOTTY; 2841 } 2842 } 2843 2844 static int __devinit hpsa_send_host_reset(struct ctlr_info *h, 2845 unsigned char *scsi3addr, u8 reset_type) 2846 { 2847 struct CommandList *c; 2848 2849 c = cmd_alloc(h); 2850 if (!c) 2851 return -ENOMEM; 2852 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 2853 RAID_CTLR_LUNID, TYPE_MSG); 2854 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 2855 c->waiting = NULL; 2856 enqueue_cmd_and_start_io(h, c); 2857 /* Don't wait for completion, the reset won't complete. Don't free 2858 * the command either. This is the last command we will send before 2859 * re-initializing everything, so it doesn't matter and won't leak. 2860 */ 2861 return 0; 2862 } 2863 2864 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 2865 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 2866 int cmd_type) 2867 { 2868 int pci_dir = XFER_NONE; 2869 2870 c->cmd_type = CMD_IOCTL_PEND; 2871 c->Header.ReplyQueue = 0; 2872 if (buff != NULL && size > 0) { 2873 c->Header.SGList = 1; 2874 c->Header.SGTotal = 1; 2875 } else { 2876 c->Header.SGList = 0; 2877 c->Header.SGTotal = 0; 2878 } 2879 c->Header.Tag.lower = c->busaddr; 2880 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 2881 2882 c->Request.Type.Type = cmd_type; 2883 if (cmd_type == TYPE_CMD) { 2884 switch (cmd) { 2885 case HPSA_INQUIRY: 2886 /* are we trying to read a vital product page */ 2887 if (page_code != 0) { 2888 c->Request.CDB[1] = 0x01; 2889 c->Request.CDB[2] = page_code; 2890 } 2891 c->Request.CDBLen = 6; 2892 c->Request.Type.Attribute = ATTR_SIMPLE; 2893 c->Request.Type.Direction = XFER_READ; 2894 c->Request.Timeout = 0; 2895 c->Request.CDB[0] = HPSA_INQUIRY; 2896 c->Request.CDB[4] = size & 0xFF; 2897 break; 2898 case HPSA_REPORT_LOG: 2899 case HPSA_REPORT_PHYS: 2900 /* Talking to controller so It's a physical command 2901 mode = 00 target = 0. Nothing to write. 2902 */ 2903 c->Request.CDBLen = 12; 2904 c->Request.Type.Attribute = ATTR_SIMPLE; 2905 c->Request.Type.Direction = XFER_READ; 2906 c->Request.Timeout = 0; 2907 c->Request.CDB[0] = cmd; 2908 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 2909 c->Request.CDB[7] = (size >> 16) & 0xFF; 2910 c->Request.CDB[8] = (size >> 8) & 0xFF; 2911 c->Request.CDB[9] = size & 0xFF; 2912 break; 2913 case HPSA_CACHE_FLUSH: 2914 c->Request.CDBLen = 12; 2915 c->Request.Type.Attribute = ATTR_SIMPLE; 2916 c->Request.Type.Direction = XFER_WRITE; 2917 c->Request.Timeout = 0; 2918 c->Request.CDB[0] = BMIC_WRITE; 2919 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2920 c->Request.CDB[7] = (size >> 8) & 0xFF; 2921 c->Request.CDB[8] = size & 0xFF; 2922 break; 2923 case TEST_UNIT_READY: 2924 c->Request.CDBLen = 6; 2925 c->Request.Type.Attribute = ATTR_SIMPLE; 2926 c->Request.Type.Direction = XFER_NONE; 2927 c->Request.Timeout = 0; 2928 break; 2929 default: 2930 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 2931 BUG(); 2932 return; 2933 } 2934 } else if (cmd_type == TYPE_MSG) { 2935 switch (cmd) { 2936 2937 case HPSA_DEVICE_RESET_MSG: 2938 c->Request.CDBLen = 16; 2939 c->Request.Type.Type = 1; /* It is a MSG not a CMD */ 2940 c->Request.Type.Attribute = ATTR_SIMPLE; 2941 c->Request.Type.Direction = XFER_NONE; 2942 c->Request.Timeout = 0; /* Don't time out */ 2943 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 2944 c->Request.CDB[0] = cmd; 2945 c->Request.CDB[1] = 0x03; /* Reset target above */ 2946 /* If bytes 4-7 are zero, it means reset the */ 2947 /* LunID device */ 2948 c->Request.CDB[4] = 0x00; 2949 c->Request.CDB[5] = 0x00; 2950 c->Request.CDB[6] = 0x00; 2951 c->Request.CDB[7] = 0x00; 2952 break; 2953 2954 default: 2955 dev_warn(&h->pdev->dev, "unknown message type %d\n", 2956 cmd); 2957 BUG(); 2958 } 2959 } else { 2960 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 2961 BUG(); 2962 } 2963 2964 switch (c->Request.Type.Direction) { 2965 case XFER_READ: 2966 pci_dir = PCI_DMA_FROMDEVICE; 2967 break; 2968 case XFER_WRITE: 2969 pci_dir = PCI_DMA_TODEVICE; 2970 break; 2971 case XFER_NONE: 2972 pci_dir = PCI_DMA_NONE; 2973 break; 2974 default: 2975 pci_dir = PCI_DMA_BIDIRECTIONAL; 2976 } 2977 2978 hpsa_map_one(h->pdev, c, buff, size, pci_dir); 2979 2980 return; 2981 } 2982 2983 /* 2984 * Map (physical) PCI mem into (virtual) kernel space 2985 */ 2986 static void __iomem *remap_pci_mem(ulong base, ulong size) 2987 { 2988 ulong page_base = ((ulong) base) & PAGE_MASK; 2989 ulong page_offs = ((ulong) base) - page_base; 2990 void __iomem *page_remapped = ioremap(page_base, page_offs + size); 2991 2992 return page_remapped ? (page_remapped + page_offs) : NULL; 2993 } 2994 2995 /* Takes cmds off the submission queue and sends them to the hardware, 2996 * then puts them on the queue of cmds waiting for completion. 2997 */ 2998 static void start_io(struct ctlr_info *h) 2999 { 3000 struct CommandList *c; 3001 3002 while (!list_empty(&h->reqQ)) { 3003 c = list_entry(h->reqQ.next, struct CommandList, list); 3004 /* can't do anything if fifo is full */ 3005 if ((h->access.fifo_full(h))) { 3006 dev_warn(&h->pdev->dev, "fifo full\n"); 3007 break; 3008 } 3009 3010 /* Get the first entry from the Request Q */ 3011 removeQ(c); 3012 h->Qdepth--; 3013 3014 /* Tell the controller execute command */ 3015 h->access.submit_command(h, c); 3016 3017 /* Put job onto the completed Q */ 3018 addQ(&h->cmpQ, c); 3019 } 3020 } 3021 3022 static inline unsigned long get_next_completion(struct ctlr_info *h) 3023 { 3024 return h->access.command_completed(h); 3025 } 3026 3027 static inline bool interrupt_pending(struct ctlr_info *h) 3028 { 3029 return h->access.intr_pending(h); 3030 } 3031 3032 static inline long interrupt_not_for_us(struct ctlr_info *h) 3033 { 3034 return (h->access.intr_pending(h) == 0) || 3035 (h->interrupts_enabled == 0); 3036 } 3037 3038 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 3039 u32 raw_tag) 3040 { 3041 if (unlikely(tag_index >= h->nr_cmds)) { 3042 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 3043 return 1; 3044 } 3045 return 0; 3046 } 3047 3048 static inline void finish_cmd(struct CommandList *c, u32 raw_tag) 3049 { 3050 removeQ(c); 3051 if (likely(c->cmd_type == CMD_SCSI)) 3052 complete_scsi_command(c); 3053 else if (c->cmd_type == CMD_IOCTL_PEND) 3054 complete(c->waiting); 3055 } 3056 3057 static inline u32 hpsa_tag_contains_index(u32 tag) 3058 { 3059 return tag & DIRECT_LOOKUP_BIT; 3060 } 3061 3062 static inline u32 hpsa_tag_to_index(u32 tag) 3063 { 3064 return tag >> DIRECT_LOOKUP_SHIFT; 3065 } 3066 3067 3068 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) 3069 { 3070 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 3071 #define HPSA_SIMPLE_ERROR_BITS 0x03 3072 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 3073 return tag & ~HPSA_SIMPLE_ERROR_BITS; 3074 return tag & ~HPSA_PERF_ERROR_BITS; 3075 } 3076 3077 /* process completion of an indexed ("direct lookup") command */ 3078 static inline u32 process_indexed_cmd(struct ctlr_info *h, 3079 u32 raw_tag) 3080 { 3081 u32 tag_index; 3082 struct CommandList *c; 3083 3084 tag_index = hpsa_tag_to_index(raw_tag); 3085 if (bad_tag(h, tag_index, raw_tag)) 3086 return next_command(h); 3087 c = h->cmd_pool + tag_index; 3088 finish_cmd(c, raw_tag); 3089 return next_command(h); 3090 } 3091 3092 /* process completion of a non-indexed command */ 3093 static inline u32 process_nonindexed_cmd(struct ctlr_info *h, 3094 u32 raw_tag) 3095 { 3096 u32 tag; 3097 struct CommandList *c = NULL; 3098 3099 tag = hpsa_tag_discard_error_bits(h, raw_tag); 3100 list_for_each_entry(c, &h->cmpQ, list) { 3101 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 3102 finish_cmd(c, raw_tag); 3103 return next_command(h); 3104 } 3105 } 3106 bad_tag(h, h->nr_cmds + 1, raw_tag); 3107 return next_command(h); 3108 } 3109 3110 /* Some controllers, like p400, will give us one interrupt 3111 * after a soft reset, even if we turned interrupts off. 3112 * Only need to check for this in the hpsa_xxx_discard_completions 3113 * functions. 3114 */ 3115 static int ignore_bogus_interrupt(struct ctlr_info *h) 3116 { 3117 if (likely(!reset_devices)) 3118 return 0; 3119 3120 if (likely(h->interrupts_enabled)) 3121 return 0; 3122 3123 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 3124 "(known firmware bug.) Ignoring.\n"); 3125 3126 return 1; 3127 } 3128 3129 static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id) 3130 { 3131 struct ctlr_info *h = dev_id; 3132 unsigned long flags; 3133 u32 raw_tag; 3134 3135 if (ignore_bogus_interrupt(h)) 3136 return IRQ_NONE; 3137 3138 if (interrupt_not_for_us(h)) 3139 return IRQ_NONE; 3140 spin_lock_irqsave(&h->lock, flags); 3141 h->last_intr_timestamp = get_jiffies_64(); 3142 while (interrupt_pending(h)) { 3143 raw_tag = get_next_completion(h); 3144 while (raw_tag != FIFO_EMPTY) 3145 raw_tag = next_command(h); 3146 } 3147 spin_unlock_irqrestore(&h->lock, flags); 3148 return IRQ_HANDLED; 3149 } 3150 3151 static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id) 3152 { 3153 struct ctlr_info *h = dev_id; 3154 unsigned long flags; 3155 u32 raw_tag; 3156 3157 if (ignore_bogus_interrupt(h)) 3158 return IRQ_NONE; 3159 3160 spin_lock_irqsave(&h->lock, flags); 3161 h->last_intr_timestamp = get_jiffies_64(); 3162 raw_tag = get_next_completion(h); 3163 while (raw_tag != FIFO_EMPTY) 3164 raw_tag = next_command(h); 3165 spin_unlock_irqrestore(&h->lock, flags); 3166 return IRQ_HANDLED; 3167 } 3168 3169 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) 3170 { 3171 struct ctlr_info *h = dev_id; 3172 unsigned long flags; 3173 u32 raw_tag; 3174 3175 if (interrupt_not_for_us(h)) 3176 return IRQ_NONE; 3177 spin_lock_irqsave(&h->lock, flags); 3178 h->last_intr_timestamp = get_jiffies_64(); 3179 while (interrupt_pending(h)) { 3180 raw_tag = get_next_completion(h); 3181 while (raw_tag != FIFO_EMPTY) { 3182 if (hpsa_tag_contains_index(raw_tag)) 3183 raw_tag = process_indexed_cmd(h, raw_tag); 3184 else 3185 raw_tag = process_nonindexed_cmd(h, raw_tag); 3186 } 3187 } 3188 spin_unlock_irqrestore(&h->lock, flags); 3189 return IRQ_HANDLED; 3190 } 3191 3192 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) 3193 { 3194 struct ctlr_info *h = dev_id; 3195 unsigned long flags; 3196 u32 raw_tag; 3197 3198 spin_lock_irqsave(&h->lock, flags); 3199 h->last_intr_timestamp = get_jiffies_64(); 3200 raw_tag = get_next_completion(h); 3201 while (raw_tag != FIFO_EMPTY) { 3202 if (hpsa_tag_contains_index(raw_tag)) 3203 raw_tag = process_indexed_cmd(h, raw_tag); 3204 else 3205 raw_tag = process_nonindexed_cmd(h, raw_tag); 3206 } 3207 spin_unlock_irqrestore(&h->lock, flags); 3208 return IRQ_HANDLED; 3209 } 3210 3211 /* Send a message CDB to the firmware. Careful, this only works 3212 * in simple mode, not performant mode due to the tag lookup. 3213 * We only ever use this immediately after a controller reset. 3214 */ 3215 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 3216 unsigned char type) 3217 { 3218 struct Command { 3219 struct CommandListHeader CommandHeader; 3220 struct RequestBlock Request; 3221 struct ErrDescriptor ErrorDescriptor; 3222 }; 3223 struct Command *cmd; 3224 static const size_t cmd_sz = sizeof(*cmd) + 3225 sizeof(cmd->ErrorDescriptor); 3226 dma_addr_t paddr64; 3227 uint32_t paddr32, tag; 3228 void __iomem *vaddr; 3229 int i, err; 3230 3231 vaddr = pci_ioremap_bar(pdev, 0); 3232 if (vaddr == NULL) 3233 return -ENOMEM; 3234 3235 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 3236 * CCISS commands, so they must be allocated from the lower 4GiB of 3237 * memory. 3238 */ 3239 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3240 if (err) { 3241 iounmap(vaddr); 3242 return -ENOMEM; 3243 } 3244 3245 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 3246 if (cmd == NULL) { 3247 iounmap(vaddr); 3248 return -ENOMEM; 3249 } 3250 3251 /* This must fit, because of the 32-bit consistent DMA mask. Also, 3252 * although there's no guarantee, we assume that the address is at 3253 * least 4-byte aligned (most likely, it's page-aligned). 3254 */ 3255 paddr32 = paddr64; 3256 3257 cmd->CommandHeader.ReplyQueue = 0; 3258 cmd->CommandHeader.SGList = 0; 3259 cmd->CommandHeader.SGTotal = 0; 3260 cmd->CommandHeader.Tag.lower = paddr32; 3261 cmd->CommandHeader.Tag.upper = 0; 3262 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 3263 3264 cmd->Request.CDBLen = 16; 3265 cmd->Request.Type.Type = TYPE_MSG; 3266 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 3267 cmd->Request.Type.Direction = XFER_NONE; 3268 cmd->Request.Timeout = 0; /* Don't time out */ 3269 cmd->Request.CDB[0] = opcode; 3270 cmd->Request.CDB[1] = type; 3271 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 3272 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); 3273 cmd->ErrorDescriptor.Addr.upper = 0; 3274 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); 3275 3276 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 3277 3278 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 3279 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 3280 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) 3281 break; 3282 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 3283 } 3284 3285 iounmap(vaddr); 3286 3287 /* we leak the DMA buffer here ... no choice since the controller could 3288 * still complete the command. 3289 */ 3290 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 3291 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 3292 opcode, type); 3293 return -ETIMEDOUT; 3294 } 3295 3296 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 3297 3298 if (tag & HPSA_ERROR_BIT) { 3299 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 3300 opcode, type); 3301 return -EIO; 3302 } 3303 3304 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 3305 opcode, type); 3306 return 0; 3307 } 3308 3309 #define hpsa_noop(p) hpsa_message(p, 3, 0) 3310 3311 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 3312 void * __iomem vaddr, u32 use_doorbell) 3313 { 3314 u16 pmcsr; 3315 int pos; 3316 3317 if (use_doorbell) { 3318 /* For everything after the P600, the PCI power state method 3319 * of resetting the controller doesn't work, so we have this 3320 * other way using the doorbell register. 3321 */ 3322 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 3323 writel(use_doorbell, vaddr + SA5_DOORBELL); 3324 } else { /* Try to do it the PCI power state way */ 3325 3326 /* Quoting from the Open CISS Specification: "The Power 3327 * Management Control/Status Register (CSR) controls the power 3328 * state of the device. The normal operating state is D0, 3329 * CSR=00h. The software off state is D3, CSR=03h. To reset 3330 * the controller, place the interface device in D3 then to D0, 3331 * this causes a secondary PCI reset which will reset the 3332 * controller." */ 3333 3334 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 3335 if (pos == 0) { 3336 dev_err(&pdev->dev, 3337 "hpsa_reset_controller: " 3338 "PCI PM not supported\n"); 3339 return -ENODEV; 3340 } 3341 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 3342 /* enter the D3hot power management state */ 3343 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 3344 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3345 pmcsr |= PCI_D3hot; 3346 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3347 3348 msleep(500); 3349 3350 /* enter the D0 power management state */ 3351 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3352 pmcsr |= PCI_D0; 3353 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3354 3355 /* 3356 * The P600 requires a small delay when changing states. 3357 * Otherwise we may think the board did not reset and we bail. 3358 * This for kdump only and is particular to the P600. 3359 */ 3360 msleep(500); 3361 } 3362 return 0; 3363 } 3364 3365 static __devinit void init_driver_version(char *driver_version, int len) 3366 { 3367 memset(driver_version, 0, len); 3368 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); 3369 } 3370 3371 static __devinit int write_driver_ver_to_cfgtable( 3372 struct CfgTable __iomem *cfgtable) 3373 { 3374 char *driver_version; 3375 int i, size = sizeof(cfgtable->driver_version); 3376 3377 driver_version = kmalloc(size, GFP_KERNEL); 3378 if (!driver_version) 3379 return -ENOMEM; 3380 3381 init_driver_version(driver_version, size); 3382 for (i = 0; i < size; i++) 3383 writeb(driver_version[i], &cfgtable->driver_version[i]); 3384 kfree(driver_version); 3385 return 0; 3386 } 3387 3388 static __devinit void read_driver_ver_from_cfgtable( 3389 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver) 3390 { 3391 int i; 3392 3393 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 3394 driver_ver[i] = readb(&cfgtable->driver_version[i]); 3395 } 3396 3397 static __devinit int controller_reset_failed( 3398 struct CfgTable __iomem *cfgtable) 3399 { 3400 3401 char *driver_ver, *old_driver_ver; 3402 int rc, size = sizeof(cfgtable->driver_version); 3403 3404 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 3405 if (!old_driver_ver) 3406 return -ENOMEM; 3407 driver_ver = old_driver_ver + size; 3408 3409 /* After a reset, the 32 bytes of "driver version" in the cfgtable 3410 * should have been changed, otherwise we know the reset failed. 3411 */ 3412 init_driver_version(old_driver_ver, size); 3413 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 3414 rc = !memcmp(driver_ver, old_driver_ver, size); 3415 kfree(old_driver_ver); 3416 return rc; 3417 } 3418 /* This does a hard reset of the controller using PCI power management 3419 * states or the using the doorbell register. 3420 */ 3421 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 3422 { 3423 u64 cfg_offset; 3424 u32 cfg_base_addr; 3425 u64 cfg_base_addr_index; 3426 void __iomem *vaddr; 3427 unsigned long paddr; 3428 u32 misc_fw_support; 3429 int rc; 3430 struct CfgTable __iomem *cfgtable; 3431 u32 use_doorbell; 3432 u32 board_id; 3433 u16 command_register; 3434 3435 /* For controllers as old as the P600, this is very nearly 3436 * the same thing as 3437 * 3438 * pci_save_state(pci_dev); 3439 * pci_set_power_state(pci_dev, PCI_D3hot); 3440 * pci_set_power_state(pci_dev, PCI_D0); 3441 * pci_restore_state(pci_dev); 3442 * 3443 * For controllers newer than the P600, the pci power state 3444 * method of resetting doesn't work so we have another way 3445 * using the doorbell register. 3446 */ 3447 3448 rc = hpsa_lookup_board_id(pdev, &board_id); 3449 if (rc < 0 || !ctlr_is_resettable(board_id)) { 3450 dev_warn(&pdev->dev, "Not resetting device.\n"); 3451 return -ENODEV; 3452 } 3453 3454 /* if controller is soft- but not hard resettable... */ 3455 if (!ctlr_is_hard_resettable(board_id)) 3456 return -ENOTSUPP; /* try soft reset later. */ 3457 3458 /* Save the PCI command register */ 3459 pci_read_config_word(pdev, 4, &command_register); 3460 /* Turn the board off. This is so that later pci_restore_state() 3461 * won't turn the board on before the rest of config space is ready. 3462 */ 3463 pci_disable_device(pdev); 3464 pci_save_state(pdev); 3465 3466 /* find the first memory BAR, so we can find the cfg table */ 3467 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 3468 if (rc) 3469 return rc; 3470 vaddr = remap_pci_mem(paddr, 0x250); 3471 if (!vaddr) 3472 return -ENOMEM; 3473 3474 /* find cfgtable in order to check if reset via doorbell is supported */ 3475 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 3476 &cfg_base_addr_index, &cfg_offset); 3477 if (rc) 3478 goto unmap_vaddr; 3479 cfgtable = remap_pci_mem(pci_resource_start(pdev, 3480 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 3481 if (!cfgtable) { 3482 rc = -ENOMEM; 3483 goto unmap_vaddr; 3484 } 3485 rc = write_driver_ver_to_cfgtable(cfgtable); 3486 if (rc) 3487 goto unmap_vaddr; 3488 3489 /* If reset via doorbell register is supported, use that. 3490 * There are two such methods. Favor the newest method. 3491 */ 3492 misc_fw_support = readl(&cfgtable->misc_fw_support); 3493 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 3494 if (use_doorbell) { 3495 use_doorbell = DOORBELL_CTLR_RESET2; 3496 } else { 3497 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3498 if (use_doorbell) { 3499 dev_warn(&pdev->dev, "Soft reset not supported. " 3500 "Firmware update is required.\n"); 3501 rc = -ENOTSUPP; /* try soft reset */ 3502 goto unmap_cfgtable; 3503 } 3504 } 3505 3506 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3507 if (rc) 3508 goto unmap_cfgtable; 3509 3510 pci_restore_state(pdev); 3511 rc = pci_enable_device(pdev); 3512 if (rc) { 3513 dev_warn(&pdev->dev, "failed to enable device.\n"); 3514 goto unmap_cfgtable; 3515 } 3516 pci_write_config_word(pdev, 4, command_register); 3517 3518 /* Some devices (notably the HP Smart Array 5i Controller) 3519 need a little pause here */ 3520 msleep(HPSA_POST_RESET_PAUSE_MSECS); 3521 3522 /* Wait for board to become not ready, then ready. */ 3523 dev_info(&pdev->dev, "Waiting for board to reset.\n"); 3524 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 3525 if (rc) { 3526 dev_warn(&pdev->dev, 3527 "failed waiting for board to reset." 3528 " Will try soft reset.\n"); 3529 rc = -ENOTSUPP; /* Not expected, but try soft reset later */ 3530 goto unmap_cfgtable; 3531 } 3532 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 3533 if (rc) { 3534 dev_warn(&pdev->dev, 3535 "failed waiting for board to become ready " 3536 "after hard reset\n"); 3537 goto unmap_cfgtable; 3538 } 3539 3540 rc = controller_reset_failed(vaddr); 3541 if (rc < 0) 3542 goto unmap_cfgtable; 3543 if (rc) { 3544 dev_warn(&pdev->dev, "Unable to successfully reset " 3545 "controller. Will try soft reset.\n"); 3546 rc = -ENOTSUPP; 3547 } else { 3548 dev_info(&pdev->dev, "board ready after hard reset.\n"); 3549 } 3550 3551 unmap_cfgtable: 3552 iounmap(cfgtable); 3553 3554 unmap_vaddr: 3555 iounmap(vaddr); 3556 return rc; 3557 } 3558 3559 /* 3560 * We cannot read the structure directly, for portability we must use 3561 * the io functions. 3562 * This is for debug only. 3563 */ 3564 static void print_cfg_table(struct device *dev, struct CfgTable *tb) 3565 { 3566 #ifdef HPSA_DEBUG 3567 int i; 3568 char temp_name[17]; 3569 3570 dev_info(dev, "Controller Configuration information\n"); 3571 dev_info(dev, "------------------------------------\n"); 3572 for (i = 0; i < 4; i++) 3573 temp_name[i] = readb(&(tb->Signature[i])); 3574 temp_name[4] = '\0'; 3575 dev_info(dev, " Signature = %s\n", temp_name); 3576 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 3577 dev_info(dev, " Transport methods supported = 0x%x\n", 3578 readl(&(tb->TransportSupport))); 3579 dev_info(dev, " Transport methods active = 0x%x\n", 3580 readl(&(tb->TransportActive))); 3581 dev_info(dev, " Requested transport Method = 0x%x\n", 3582 readl(&(tb->HostWrite.TransportRequest))); 3583 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 3584 readl(&(tb->HostWrite.CoalIntDelay))); 3585 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 3586 readl(&(tb->HostWrite.CoalIntCount))); 3587 dev_info(dev, " Max outstanding commands = 0x%d\n", 3588 readl(&(tb->CmdsOutMax))); 3589 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 3590 for (i = 0; i < 16; i++) 3591 temp_name[i] = readb(&(tb->ServerName[i])); 3592 temp_name[16] = '\0'; 3593 dev_info(dev, " Server Name = %s\n", temp_name); 3594 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 3595 readl(&(tb->HeartBeat))); 3596 #endif /* HPSA_DEBUG */ 3597 } 3598 3599 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 3600 { 3601 int i, offset, mem_type, bar_type; 3602 3603 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 3604 return 0; 3605 offset = 0; 3606 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3607 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 3608 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 3609 offset += 4; 3610 else { 3611 mem_type = pci_resource_flags(pdev, i) & 3612 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 3613 switch (mem_type) { 3614 case PCI_BASE_ADDRESS_MEM_TYPE_32: 3615 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 3616 offset += 4; /* 32 bit */ 3617 break; 3618 case PCI_BASE_ADDRESS_MEM_TYPE_64: 3619 offset += 8; 3620 break; 3621 default: /* reserved in PCI 2.2 */ 3622 dev_warn(&pdev->dev, 3623 "base address is invalid\n"); 3624 return -1; 3625 break; 3626 } 3627 } 3628 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 3629 return i + 1; 3630 } 3631 return -1; 3632 } 3633 3634 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 3635 * controllers that are capable. If not, we use IO-APIC mode. 3636 */ 3637 3638 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) 3639 { 3640 #ifdef CONFIG_PCI_MSI 3641 int err; 3642 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1}, 3643 {0, 2}, {0, 3} 3644 }; 3645 3646 /* Some boards advertise MSI but don't really support it */ 3647 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 3648 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 3649 goto default_int_mode; 3650 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 3651 dev_info(&h->pdev->dev, "MSIX\n"); 3652 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4); 3653 if (!err) { 3654 h->intr[0] = hpsa_msix_entries[0].vector; 3655 h->intr[1] = hpsa_msix_entries[1].vector; 3656 h->intr[2] = hpsa_msix_entries[2].vector; 3657 h->intr[3] = hpsa_msix_entries[3].vector; 3658 h->msix_vector = 1; 3659 return; 3660 } 3661 if (err > 0) { 3662 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 3663 "available\n", err); 3664 goto default_int_mode; 3665 } else { 3666 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", 3667 err); 3668 goto default_int_mode; 3669 } 3670 } 3671 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 3672 dev_info(&h->pdev->dev, "MSI\n"); 3673 if (!pci_enable_msi(h->pdev)) 3674 h->msi_vector = 1; 3675 else 3676 dev_warn(&h->pdev->dev, "MSI init failed\n"); 3677 } 3678 default_int_mode: 3679 #endif /* CONFIG_PCI_MSI */ 3680 /* if we get here we're going to use the default interrupt mode */ 3681 h->intr[h->intr_mode] = h->pdev->irq; 3682 } 3683 3684 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 3685 { 3686 int i; 3687 u32 subsystem_vendor_id, subsystem_device_id; 3688 3689 subsystem_vendor_id = pdev->subsystem_vendor; 3690 subsystem_device_id = pdev->subsystem_device; 3691 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 3692 subsystem_vendor_id; 3693 3694 for (i = 0; i < ARRAY_SIZE(products); i++) 3695 if (*board_id == products[i].board_id) 3696 return i; 3697 3698 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 3699 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 3700 !hpsa_allow_any) { 3701 dev_warn(&pdev->dev, "unrecognized board ID: " 3702 "0x%08x, ignoring.\n", *board_id); 3703 return -ENODEV; 3704 } 3705 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 3706 } 3707 3708 static inline bool hpsa_board_disabled(struct pci_dev *pdev) 3709 { 3710 u16 command; 3711 3712 (void) pci_read_config_word(pdev, PCI_COMMAND, &command); 3713 return ((command & PCI_COMMAND_MEMORY) == 0); 3714 } 3715 3716 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 3717 unsigned long *memory_bar) 3718 { 3719 int i; 3720 3721 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 3722 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3723 /* addressing mode bits already removed */ 3724 *memory_bar = pci_resource_start(pdev, i); 3725 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 3726 *memory_bar); 3727 return 0; 3728 } 3729 dev_warn(&pdev->dev, "no memory BAR found\n"); 3730 return -ENODEV; 3731 } 3732 3733 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 3734 void __iomem *vaddr, int wait_for_ready) 3735 { 3736 int i, iterations; 3737 u32 scratchpad; 3738 if (wait_for_ready) 3739 iterations = HPSA_BOARD_READY_ITERATIONS; 3740 else 3741 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 3742 3743 for (i = 0; i < iterations; i++) { 3744 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 3745 if (wait_for_ready) { 3746 if (scratchpad == HPSA_FIRMWARE_READY) 3747 return 0; 3748 } else { 3749 if (scratchpad != HPSA_FIRMWARE_READY) 3750 return 0; 3751 } 3752 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 3753 } 3754 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 3755 return -ENODEV; 3756 } 3757 3758 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 3759 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 3760 u64 *cfg_offset) 3761 { 3762 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 3763 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 3764 *cfg_base_addr &= (u32) 0x0000ffff; 3765 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 3766 if (*cfg_base_addr_index == -1) { 3767 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 3768 return -ENODEV; 3769 } 3770 return 0; 3771 } 3772 3773 static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) 3774 { 3775 u64 cfg_offset; 3776 u32 cfg_base_addr; 3777 u64 cfg_base_addr_index; 3778 u32 trans_offset; 3779 int rc; 3780 3781 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 3782 &cfg_base_addr_index, &cfg_offset); 3783 if (rc) 3784 return rc; 3785 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 3786 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 3787 if (!h->cfgtable) 3788 return -ENOMEM; 3789 rc = write_driver_ver_to_cfgtable(h->cfgtable); 3790 if (rc) 3791 return rc; 3792 /* Find performant mode table. */ 3793 trans_offset = readl(&h->cfgtable->TransMethodOffset); 3794 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 3795 cfg_base_addr_index)+cfg_offset+trans_offset, 3796 sizeof(*h->transtable)); 3797 if (!h->transtable) 3798 return -ENOMEM; 3799 return 0; 3800 } 3801 3802 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 3803 { 3804 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3805 3806 /* Limit commands in memory limited kdump scenario. */ 3807 if (reset_devices && h->max_commands > 32) 3808 h->max_commands = 32; 3809 3810 if (h->max_commands < 16) { 3811 dev_warn(&h->pdev->dev, "Controller reports " 3812 "max supported commands of %d, an obvious lie. " 3813 "Using 16. Ensure that firmware is up to date.\n", 3814 h->max_commands); 3815 h->max_commands = 16; 3816 } 3817 } 3818 3819 /* Interrogate the hardware for some limits: 3820 * max commands, max SG elements without chaining, and with chaining, 3821 * SG chain block size, etc. 3822 */ 3823 static void __devinit hpsa_find_board_params(struct ctlr_info *h) 3824 { 3825 hpsa_get_max_perf_mode_cmds(h); 3826 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 3827 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 3828 /* 3829 * Limit in-command s/g elements to 32 save dma'able memory. 3830 * Howvever spec says if 0, use 31 3831 */ 3832 h->max_cmd_sg_entries = 31; 3833 if (h->maxsgentries > 512) { 3834 h->max_cmd_sg_entries = 32; 3835 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 3836 h->maxsgentries--; /* save one for chain pointer */ 3837 } else { 3838 h->maxsgentries = 31; /* default to traditional values */ 3839 h->chainsize = 0; 3840 } 3841 } 3842 3843 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 3844 { 3845 if ((readb(&h->cfgtable->Signature[0]) != 'C') || 3846 (readb(&h->cfgtable->Signature[1]) != 'I') || 3847 (readb(&h->cfgtable->Signature[2]) != 'S') || 3848 (readb(&h->cfgtable->Signature[3]) != 'S')) { 3849 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 3850 return false; 3851 } 3852 return true; 3853 } 3854 3855 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 3856 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) 3857 { 3858 #ifdef CONFIG_X86 3859 u32 prefetch; 3860 3861 prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); 3862 prefetch |= 0x100; 3863 writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); 3864 #endif 3865 } 3866 3867 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 3868 * in a prefetch beyond physical memory. 3869 */ 3870 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 3871 { 3872 u32 dma_prefetch; 3873 3874 if (h->board_id != 0x3225103C) 3875 return; 3876 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 3877 dma_prefetch |= 0x8000; 3878 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 3879 } 3880 3881 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 3882 { 3883 int i; 3884 u32 doorbell_value; 3885 unsigned long flags; 3886 3887 /* under certain very rare conditions, this can take awhile. 3888 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 3889 * as we enter this code.) 3890 */ 3891 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3892 spin_lock_irqsave(&h->lock, flags); 3893 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 3894 spin_unlock_irqrestore(&h->lock, flags); 3895 if (!(doorbell_value & CFGTBL_ChangeReq)) 3896 break; 3897 /* delay and try again */ 3898 usleep_range(10000, 20000); 3899 } 3900 } 3901 3902 static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h) 3903 { 3904 u32 trans_support; 3905 3906 trans_support = readl(&(h->cfgtable->TransportSupport)); 3907 if (!(trans_support & SIMPLE_MODE)) 3908 return -ENOTSUPP; 3909 3910 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 3911 /* Update the field, and then ring the doorbell */ 3912 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 3913 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 3914 hpsa_wait_for_mode_change_ack(h); 3915 print_cfg_table(&h->pdev->dev, h->cfgtable); 3916 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { 3917 dev_warn(&h->pdev->dev, 3918 "unable to get board into simple mode\n"); 3919 return -ENODEV; 3920 } 3921 h->transMethod = CFGTBL_Trans_Simple; 3922 return 0; 3923 } 3924 3925 static int __devinit hpsa_pci_init(struct ctlr_info *h) 3926 { 3927 int prod_index, err; 3928 3929 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 3930 if (prod_index < 0) 3931 return -ENODEV; 3932 h->product_name = products[prod_index].product_name; 3933 h->access = *(products[prod_index].access); 3934 3935 if (hpsa_board_disabled(h->pdev)) { 3936 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 3937 return -ENODEV; 3938 } 3939 3940 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 3941 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 3942 3943 err = pci_enable_device(h->pdev); 3944 if (err) { 3945 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 3946 return err; 3947 } 3948 3949 err = pci_request_regions(h->pdev, HPSA); 3950 if (err) { 3951 dev_err(&h->pdev->dev, 3952 "cannot obtain PCI resources, aborting\n"); 3953 return err; 3954 } 3955 hpsa_interrupt_mode(h); 3956 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 3957 if (err) 3958 goto err_out_free_res; 3959 h->vaddr = remap_pci_mem(h->paddr, 0x250); 3960 if (!h->vaddr) { 3961 err = -ENOMEM; 3962 goto err_out_free_res; 3963 } 3964 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 3965 if (err) 3966 goto err_out_free_res; 3967 err = hpsa_find_cfgtables(h); 3968 if (err) 3969 goto err_out_free_res; 3970 hpsa_find_board_params(h); 3971 3972 if (!hpsa_CISS_signature_present(h)) { 3973 err = -ENODEV; 3974 goto err_out_free_res; 3975 } 3976 hpsa_enable_scsi_prefetch(h); 3977 hpsa_p600_dma_prefetch_quirk(h); 3978 err = hpsa_enter_simple_mode(h); 3979 if (err) 3980 goto err_out_free_res; 3981 return 0; 3982 3983 err_out_free_res: 3984 if (h->transtable) 3985 iounmap(h->transtable); 3986 if (h->cfgtable) 3987 iounmap(h->cfgtable); 3988 if (h->vaddr) 3989 iounmap(h->vaddr); 3990 /* 3991 * Deliberately omit pci_disable_device(): it does something nasty to 3992 * Smart Array controllers that pci_enable_device does not undo 3993 */ 3994 pci_release_regions(h->pdev); 3995 return err; 3996 } 3997 3998 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) 3999 { 4000 int rc; 4001 4002 #define HBA_INQUIRY_BYTE_COUNT 64 4003 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 4004 if (!h->hba_inquiry_data) 4005 return; 4006 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 4007 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 4008 if (rc != 0) { 4009 kfree(h->hba_inquiry_data); 4010 h->hba_inquiry_data = NULL; 4011 } 4012 } 4013 4014 static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) 4015 { 4016 int rc, i; 4017 4018 if (!reset_devices) 4019 return 0; 4020 4021 /* Reset the controller with a PCI power-cycle or via doorbell */ 4022 rc = hpsa_kdump_hard_reset_controller(pdev); 4023 4024 /* -ENOTSUPP here means we cannot reset the controller 4025 * but it's already (and still) up and running in 4026 * "performant mode". Or, it might be 640x, which can't reset 4027 * due to concerns about shared bbwc between 6402/6404 pair. 4028 */ 4029 if (rc == -ENOTSUPP) 4030 return rc; /* just try to do the kdump anyhow. */ 4031 if (rc) 4032 return -ENODEV; 4033 4034 /* Now try to get the controller to respond to a no-op */ 4035 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); 4036 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 4037 if (hpsa_noop(pdev) == 0) 4038 break; 4039 else 4040 dev_warn(&pdev->dev, "no-op failed%s\n", 4041 (i < 11 ? "; re-trying" : "")); 4042 } 4043 return 0; 4044 } 4045 4046 static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h) 4047 { 4048 h->cmd_pool_bits = kzalloc( 4049 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 4050 sizeof(unsigned long), GFP_KERNEL); 4051 h->cmd_pool = pci_alloc_consistent(h->pdev, 4052 h->nr_cmds * sizeof(*h->cmd_pool), 4053 &(h->cmd_pool_dhandle)); 4054 h->errinfo_pool = pci_alloc_consistent(h->pdev, 4055 h->nr_cmds * sizeof(*h->errinfo_pool), 4056 &(h->errinfo_pool_dhandle)); 4057 if ((h->cmd_pool_bits == NULL) 4058 || (h->cmd_pool == NULL) 4059 || (h->errinfo_pool == NULL)) { 4060 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 4061 return -ENOMEM; 4062 } 4063 return 0; 4064 } 4065 4066 static void hpsa_free_cmd_pool(struct ctlr_info *h) 4067 { 4068 kfree(h->cmd_pool_bits); 4069 if (h->cmd_pool) 4070 pci_free_consistent(h->pdev, 4071 h->nr_cmds * sizeof(struct CommandList), 4072 h->cmd_pool, h->cmd_pool_dhandle); 4073 if (h->errinfo_pool) 4074 pci_free_consistent(h->pdev, 4075 h->nr_cmds * sizeof(struct ErrorInfo), 4076 h->errinfo_pool, 4077 h->errinfo_pool_dhandle); 4078 } 4079 4080 static int hpsa_request_irq(struct ctlr_info *h, 4081 irqreturn_t (*msixhandler)(int, void *), 4082 irqreturn_t (*intxhandler)(int, void *)) 4083 { 4084 int rc; 4085 4086 if (h->msix_vector || h->msi_vector) 4087 rc = request_irq(h->intr[h->intr_mode], msixhandler, 4088 0, h->devname, h); 4089 else 4090 rc = request_irq(h->intr[h->intr_mode], intxhandler, 4091 IRQF_SHARED, h->devname, h); 4092 if (rc) { 4093 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 4094 h->intr[h->intr_mode], h->devname); 4095 return -ENODEV; 4096 } 4097 return 0; 4098 } 4099 4100 static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h) 4101 { 4102 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, 4103 HPSA_RESET_TYPE_CONTROLLER)) { 4104 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 4105 return -EIO; 4106 } 4107 4108 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 4109 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 4110 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 4111 return -1; 4112 } 4113 4114 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 4115 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 4116 dev_warn(&h->pdev->dev, "Board failed to become ready " 4117 "after soft reset.\n"); 4118 return -1; 4119 } 4120 4121 return 0; 4122 } 4123 4124 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 4125 { 4126 free_irq(h->intr[h->intr_mode], h); 4127 #ifdef CONFIG_PCI_MSI 4128 if (h->msix_vector) 4129 pci_disable_msix(h->pdev); 4130 else if (h->msi_vector) 4131 pci_disable_msi(h->pdev); 4132 #endif /* CONFIG_PCI_MSI */ 4133 hpsa_free_sg_chain_blocks(h); 4134 hpsa_free_cmd_pool(h); 4135 kfree(h->blockFetchTable); 4136 pci_free_consistent(h->pdev, h->reply_pool_size, 4137 h->reply_pool, h->reply_pool_dhandle); 4138 if (h->vaddr) 4139 iounmap(h->vaddr); 4140 if (h->transtable) 4141 iounmap(h->transtable); 4142 if (h->cfgtable) 4143 iounmap(h->cfgtable); 4144 pci_release_regions(h->pdev); 4145 kfree(h); 4146 } 4147 4148 static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h) 4149 { 4150 assert_spin_locked(&lockup_detector_lock); 4151 if (!hpsa_lockup_detector) 4152 return; 4153 if (h->lockup_detected) 4154 return; /* already stopped the lockup detector */ 4155 list_del(&h->lockup_list); 4156 } 4157 4158 /* Called when controller lockup detected. */ 4159 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) 4160 { 4161 struct CommandList *c = NULL; 4162 4163 assert_spin_locked(&h->lock); 4164 /* Mark all outstanding commands as failed and complete them. */ 4165 while (!list_empty(list)) { 4166 c = list_entry(list->next, struct CommandList, list); 4167 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 4168 finish_cmd(c, c->Header.Tag.lower); 4169 } 4170 } 4171 4172 static void controller_lockup_detected(struct ctlr_info *h) 4173 { 4174 unsigned long flags; 4175 4176 assert_spin_locked(&lockup_detector_lock); 4177 remove_ctlr_from_lockup_detector_list(h); 4178 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4179 spin_lock_irqsave(&h->lock, flags); 4180 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 4181 spin_unlock_irqrestore(&h->lock, flags); 4182 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 4183 h->lockup_detected); 4184 pci_disable_device(h->pdev); 4185 spin_lock_irqsave(&h->lock, flags); 4186 fail_all_cmds_on_list(h, &h->cmpQ); 4187 fail_all_cmds_on_list(h, &h->reqQ); 4188 spin_unlock_irqrestore(&h->lock, flags); 4189 } 4190 4191 #define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ) 4192 #define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2) 4193 4194 static void detect_controller_lockup(struct ctlr_info *h) 4195 { 4196 u64 now; 4197 u32 heartbeat; 4198 unsigned long flags; 4199 4200 assert_spin_locked(&lockup_detector_lock); 4201 now = get_jiffies_64(); 4202 /* If we've received an interrupt recently, we're ok. */ 4203 if (time_after64(h->last_intr_timestamp + 4204 (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) 4205 return; 4206 4207 /* 4208 * If we've already checked the heartbeat recently, we're ok. 4209 * This could happen if someone sends us a signal. We 4210 * otherwise don't care about signals in this thread. 4211 */ 4212 if (time_after64(h->last_heartbeat_timestamp + 4213 (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now)) 4214 return; 4215 4216 /* If heartbeat has not changed since we last looked, we're not ok. */ 4217 spin_lock_irqsave(&h->lock, flags); 4218 heartbeat = readl(&h->cfgtable->HeartBeat); 4219 spin_unlock_irqrestore(&h->lock, flags); 4220 if (h->last_heartbeat == heartbeat) { 4221 controller_lockup_detected(h); 4222 return; 4223 } 4224 4225 /* We're ok. */ 4226 h->last_heartbeat = heartbeat; 4227 h->last_heartbeat_timestamp = now; 4228 } 4229 4230 static int detect_controller_lockup_thread(void *notused) 4231 { 4232 struct ctlr_info *h; 4233 unsigned long flags; 4234 4235 while (1) { 4236 struct list_head *this, *tmp; 4237 4238 schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL); 4239 if (kthread_should_stop()) 4240 break; 4241 spin_lock_irqsave(&lockup_detector_lock, flags); 4242 list_for_each_safe(this, tmp, &hpsa_ctlr_list) { 4243 h = list_entry(this, struct ctlr_info, lockup_list); 4244 detect_controller_lockup(h); 4245 } 4246 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4247 } 4248 return 0; 4249 } 4250 4251 static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h) 4252 { 4253 unsigned long flags; 4254 4255 spin_lock_irqsave(&lockup_detector_lock, flags); 4256 list_add_tail(&h->lockup_list, &hpsa_ctlr_list); 4257 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4258 } 4259 4260 static void start_controller_lockup_detector(struct ctlr_info *h) 4261 { 4262 /* Start the lockup detector thread if not already started */ 4263 if (!hpsa_lockup_detector) { 4264 spin_lock_init(&lockup_detector_lock); 4265 hpsa_lockup_detector = 4266 kthread_run(detect_controller_lockup_thread, 4267 NULL, HPSA); 4268 } 4269 if (!hpsa_lockup_detector) { 4270 dev_warn(&h->pdev->dev, 4271 "Could not start lockup detector thread\n"); 4272 return; 4273 } 4274 add_ctlr_to_lockup_detector_list(h); 4275 } 4276 4277 static void stop_controller_lockup_detector(struct ctlr_info *h) 4278 { 4279 unsigned long flags; 4280 4281 spin_lock_irqsave(&lockup_detector_lock, flags); 4282 remove_ctlr_from_lockup_detector_list(h); 4283 /* If the list of ctlr's to monitor is empty, stop the thread */ 4284 if (list_empty(&hpsa_ctlr_list)) { 4285 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4286 kthread_stop(hpsa_lockup_detector); 4287 spin_lock_irqsave(&lockup_detector_lock, flags); 4288 hpsa_lockup_detector = NULL; 4289 } 4290 spin_unlock_irqrestore(&lockup_detector_lock, flags); 4291 } 4292 4293 static int __devinit hpsa_init_one(struct pci_dev *pdev, 4294 const struct pci_device_id *ent) 4295 { 4296 int dac, rc; 4297 struct ctlr_info *h; 4298 int try_soft_reset = 0; 4299 unsigned long flags; 4300 4301 if (number_of_controllers == 0) 4302 printk(KERN_INFO DRIVER_NAME "\n"); 4303 4304 rc = hpsa_init_reset_devices(pdev); 4305 if (rc) { 4306 if (rc != -ENOTSUPP) 4307 return rc; 4308 /* If the reset fails in a particular way (it has no way to do 4309 * a proper hard reset, so returns -ENOTSUPP) we can try to do 4310 * a soft reset once we get the controller configured up to the 4311 * point that it can accept a command. 4312 */ 4313 try_soft_reset = 1; 4314 rc = 0; 4315 } 4316 4317 reinit_after_soft_reset: 4318 4319 /* Command structures must be aligned on a 32-byte boundary because 4320 * the 5 lower bits of the address are used by the hardware. and by 4321 * the driver. See comments in hpsa.h for more info. 4322 */ 4323 #define COMMANDLIST_ALIGNMENT 32 4324 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 4325 h = kzalloc(sizeof(*h), GFP_KERNEL); 4326 if (!h) 4327 return -ENOMEM; 4328 4329 h->pdev = pdev; 4330 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 4331 INIT_LIST_HEAD(&h->cmpQ); 4332 INIT_LIST_HEAD(&h->reqQ); 4333 spin_lock_init(&h->lock); 4334 spin_lock_init(&h->scan_lock); 4335 rc = hpsa_pci_init(h); 4336 if (rc != 0) 4337 goto clean1; 4338 4339 sprintf(h->devname, HPSA "%d", number_of_controllers); 4340 h->ctlr = number_of_controllers; 4341 number_of_controllers++; 4342 4343 /* configure PCI DMA stuff */ 4344 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 4345 if (rc == 0) { 4346 dac = 1; 4347 } else { 4348 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4349 if (rc == 0) { 4350 dac = 0; 4351 } else { 4352 dev_err(&pdev->dev, "no suitable DMA available\n"); 4353 goto clean1; 4354 } 4355 } 4356 4357 /* make sure the board interrupts are off */ 4358 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4359 4360 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 4361 goto clean2; 4362 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 4363 h->devname, pdev->device, 4364 h->intr[h->intr_mode], dac ? "" : " not"); 4365 if (hpsa_allocate_cmd_pool(h)) 4366 goto clean4; 4367 if (hpsa_allocate_sg_chain_blocks(h)) 4368 goto clean4; 4369 init_waitqueue_head(&h->scan_wait_queue); 4370 h->scan_finished = 1; /* no scan currently in progress */ 4371 4372 pci_set_drvdata(pdev, h); 4373 h->ndevices = 0; 4374 h->scsi_host = NULL; 4375 spin_lock_init(&h->devlock); 4376 hpsa_put_ctlr_into_performant_mode(h); 4377 4378 /* At this point, the controller is ready to take commands. 4379 * Now, if reset_devices and the hard reset didn't work, try 4380 * the soft reset and see if that works. 4381 */ 4382 if (try_soft_reset) { 4383 4384 /* This is kind of gross. We may or may not get a completion 4385 * from the soft reset command, and if we do, then the value 4386 * from the fifo may or may not be valid. So, we wait 10 secs 4387 * after the reset throwing away any completions we get during 4388 * that time. Unregister the interrupt handler and register 4389 * fake ones to scoop up any residual completions. 4390 */ 4391 spin_lock_irqsave(&h->lock, flags); 4392 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4393 spin_unlock_irqrestore(&h->lock, flags); 4394 free_irq(h->intr[h->intr_mode], h); 4395 rc = hpsa_request_irq(h, hpsa_msix_discard_completions, 4396 hpsa_intx_discard_completions); 4397 if (rc) { 4398 dev_warn(&h->pdev->dev, "Failed to request_irq after " 4399 "soft reset.\n"); 4400 goto clean4; 4401 } 4402 4403 rc = hpsa_kdump_soft_reset(h); 4404 if (rc) 4405 /* Neither hard nor soft reset worked, we're hosed. */ 4406 goto clean4; 4407 4408 dev_info(&h->pdev->dev, "Board READY.\n"); 4409 dev_info(&h->pdev->dev, 4410 "Waiting for stale completions to drain.\n"); 4411 h->access.set_intr_mask(h, HPSA_INTR_ON); 4412 msleep(10000); 4413 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4414 4415 rc = controller_reset_failed(h->cfgtable); 4416 if (rc) 4417 dev_info(&h->pdev->dev, 4418 "Soft reset appears to have failed.\n"); 4419 4420 /* since the controller's reset, we have to go back and re-init 4421 * everything. Easiest to just forget what we've done and do it 4422 * all over again. 4423 */ 4424 hpsa_undo_allocations_after_kdump_soft_reset(h); 4425 try_soft_reset = 0; 4426 if (rc) 4427 /* don't go to clean4, we already unallocated */ 4428 return -ENODEV; 4429 4430 goto reinit_after_soft_reset; 4431 } 4432 4433 /* Turn the interrupts on so we can service requests */ 4434 h->access.set_intr_mask(h, HPSA_INTR_ON); 4435 4436 hpsa_hba_inquiry(h); 4437 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 4438 start_controller_lockup_detector(h); 4439 return 1; 4440 4441 clean4: 4442 hpsa_free_sg_chain_blocks(h); 4443 hpsa_free_cmd_pool(h); 4444 free_irq(h->intr[h->intr_mode], h); 4445 clean2: 4446 clean1: 4447 kfree(h); 4448 return rc; 4449 } 4450 4451 static void hpsa_flush_cache(struct ctlr_info *h) 4452 { 4453 char *flush_buf; 4454 struct CommandList *c; 4455 4456 flush_buf = kzalloc(4, GFP_KERNEL); 4457 if (!flush_buf) 4458 return; 4459 4460 c = cmd_special_alloc(h); 4461 if (!c) { 4462 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 4463 goto out_of_memory; 4464 } 4465 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 4466 RAID_CTLR_LUNID, TYPE_CMD); 4467 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 4468 if (c->err_info->CommandStatus != 0) 4469 dev_warn(&h->pdev->dev, 4470 "error flushing cache on controller\n"); 4471 cmd_special_free(h, c); 4472 out_of_memory: 4473 kfree(flush_buf); 4474 } 4475 4476 static void hpsa_shutdown(struct pci_dev *pdev) 4477 { 4478 struct ctlr_info *h; 4479 4480 h = pci_get_drvdata(pdev); 4481 /* Turn board interrupts off and send the flush cache command 4482 * sendcmd will turn off interrupt, and send the flush... 4483 * To write all data in the battery backed cache to disks 4484 */ 4485 hpsa_flush_cache(h); 4486 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4487 free_irq(h->intr[h->intr_mode], h); 4488 #ifdef CONFIG_PCI_MSI 4489 if (h->msix_vector) 4490 pci_disable_msix(h->pdev); 4491 else if (h->msi_vector) 4492 pci_disable_msi(h->pdev); 4493 #endif /* CONFIG_PCI_MSI */ 4494 } 4495 4496 static void __devexit hpsa_free_device_info(struct ctlr_info *h) 4497 { 4498 int i; 4499 4500 for (i = 0; i < h->ndevices; i++) 4501 kfree(h->dev[i]); 4502 } 4503 4504 static void __devexit hpsa_remove_one(struct pci_dev *pdev) 4505 { 4506 struct ctlr_info *h; 4507 4508 if (pci_get_drvdata(pdev) == NULL) { 4509 dev_err(&pdev->dev, "unable to remove device\n"); 4510 return; 4511 } 4512 h = pci_get_drvdata(pdev); 4513 stop_controller_lockup_detector(h); 4514 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 4515 hpsa_shutdown(pdev); 4516 iounmap(h->vaddr); 4517 iounmap(h->transtable); 4518 iounmap(h->cfgtable); 4519 hpsa_free_device_info(h); 4520 hpsa_free_sg_chain_blocks(h); 4521 pci_free_consistent(h->pdev, 4522 h->nr_cmds * sizeof(struct CommandList), 4523 h->cmd_pool, h->cmd_pool_dhandle); 4524 pci_free_consistent(h->pdev, 4525 h->nr_cmds * sizeof(struct ErrorInfo), 4526 h->errinfo_pool, h->errinfo_pool_dhandle); 4527 pci_free_consistent(h->pdev, h->reply_pool_size, 4528 h->reply_pool, h->reply_pool_dhandle); 4529 kfree(h->cmd_pool_bits); 4530 kfree(h->blockFetchTable); 4531 kfree(h->hba_inquiry_data); 4532 /* 4533 * Deliberately omit pci_disable_device(): it does something nasty to 4534 * Smart Array controllers that pci_enable_device does not undo 4535 */ 4536 pci_release_regions(pdev); 4537 pci_set_drvdata(pdev, NULL); 4538 kfree(h); 4539 } 4540 4541 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 4542 __attribute__((unused)) pm_message_t state) 4543 { 4544 return -ENOSYS; 4545 } 4546 4547 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 4548 { 4549 return -ENOSYS; 4550 } 4551 4552 static struct pci_driver hpsa_pci_driver = { 4553 .name = HPSA, 4554 .probe = hpsa_init_one, 4555 .remove = __devexit_p(hpsa_remove_one), 4556 .id_table = hpsa_pci_device_id, /* id_table */ 4557 .shutdown = hpsa_shutdown, 4558 .suspend = hpsa_suspend, 4559 .resume = hpsa_resume, 4560 }; 4561 4562 /* Fill in bucket_map[], given nsgs (the max number of 4563 * scatter gather elements supported) and bucket[], 4564 * which is an array of 8 integers. The bucket[] array 4565 * contains 8 different DMA transfer sizes (in 16 4566 * byte increments) which the controller uses to fetch 4567 * commands. This function fills in bucket_map[], which 4568 * maps a given number of scatter gather elements to one of 4569 * the 8 DMA transfer sizes. The point of it is to allow the 4570 * controller to only do as much DMA as needed to fetch the 4571 * command, with the DMA transfer size encoded in the lower 4572 * bits of the command address. 4573 */ 4574 static void calc_bucket_map(int bucket[], int num_buckets, 4575 int nsgs, int *bucket_map) 4576 { 4577 int i, j, b, size; 4578 4579 /* even a command with 0 SGs requires 4 blocks */ 4580 #define MINIMUM_TRANSFER_BLOCKS 4 4581 #define NUM_BUCKETS 8 4582 /* Note, bucket_map must have nsgs+1 entries. */ 4583 for (i = 0; i <= nsgs; i++) { 4584 /* Compute size of a command with i SG entries */ 4585 size = i + MINIMUM_TRANSFER_BLOCKS; 4586 b = num_buckets; /* Assume the biggest bucket */ 4587 /* Find the bucket that is just big enough */ 4588 for (j = 0; j < 8; j++) { 4589 if (bucket[j] >= size) { 4590 b = j; 4591 break; 4592 } 4593 } 4594 /* for a command with i SG entries, use bucket b. */ 4595 bucket_map[i] = b; 4596 } 4597 } 4598 4599 static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h, 4600 u32 use_short_tags) 4601 { 4602 int i; 4603 unsigned long register_value; 4604 4605 /* This is a bit complicated. There are 8 registers on 4606 * the controller which we write to to tell it 8 different 4607 * sizes of commands which there may be. It's a way of 4608 * reducing the DMA done to fetch each command. Encoded into 4609 * each command's tag are 3 bits which communicate to the controller 4610 * which of the eight sizes that command fits within. The size of 4611 * each command depends on how many scatter gather entries there are. 4612 * Each SG entry requires 16 bytes. The eight registers are programmed 4613 * with the number of 16-byte blocks a command of that size requires. 4614 * The smallest command possible requires 5 such 16 byte blocks. 4615 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte 4616 * blocks. Note, this only extends to the SG entries contained 4617 * within the command block, and does not extend to chained blocks 4618 * of SG elements. bft[] contains the eight values we write to 4619 * the registers. They are not evenly distributed, but have more 4620 * sizes for small commands, and fewer sizes for larger commands. 4621 */ 4622 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; 4623 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); 4624 /* 5 = 1 s/g entry or 4k 4625 * 6 = 2 s/g entry or 8k 4626 * 8 = 4 s/g entry or 16k 4627 * 10 = 6 s/g entry or 24k 4628 */ 4629 4630 h->reply_pool_wraparound = 1; /* spec: init to 1 */ 4631 4632 /* Controller spec: zero out this buffer. */ 4633 memset(h->reply_pool, 0, h->reply_pool_size); 4634 h->reply_pool_head = h->reply_pool; 4635 4636 bft[7] = SG_ENTRIES_IN_CMD + 4; 4637 calc_bucket_map(bft, ARRAY_SIZE(bft), 4638 SG_ENTRIES_IN_CMD, h->blockFetchTable); 4639 for (i = 0; i < 8; i++) 4640 writel(bft[i], &h->transtable->BlockFetch[i]); 4641 4642 /* size of controller ring buffer */ 4643 writel(h->max_commands, &h->transtable->RepQSize); 4644 writel(1, &h->transtable->RepQCount); 4645 writel(0, &h->transtable->RepQCtrAddrLow32); 4646 writel(0, &h->transtable->RepQCtrAddrHigh32); 4647 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 4648 writel(0, &h->transtable->RepQAddr0High32); 4649 writel(CFGTBL_Trans_Performant | use_short_tags, 4650 &(h->cfgtable->HostWrite.TransportRequest)); 4651 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 4652 hpsa_wait_for_mode_change_ack(h); 4653 register_value = readl(&(h->cfgtable->TransportActive)); 4654 if (!(register_value & CFGTBL_Trans_Performant)) { 4655 dev_warn(&h->pdev->dev, "unable to get board into" 4656 " performant mode\n"); 4657 return; 4658 } 4659 /* Change the access methods to the performant access methods */ 4660 h->access = SA5_performant_access; 4661 h->transMethod = CFGTBL_Trans_Performant; 4662 } 4663 4664 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 4665 { 4666 u32 trans_support; 4667 4668 if (hpsa_simple_mode) 4669 return; 4670 4671 trans_support = readl(&(h->cfgtable->TransportSupport)); 4672 if (!(trans_support & PERFORMANT_MODE)) 4673 return; 4674 4675 hpsa_get_max_perf_mode_cmds(h); 4676 /* Performant mode ring buffer and supporting data structures */ 4677 h->reply_pool_size = h->max_commands * sizeof(u64); 4678 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 4679 &(h->reply_pool_dhandle)); 4680 4681 /* Need a block fetch table for performant mode */ 4682 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * 4683 sizeof(u32)), GFP_KERNEL); 4684 4685 if ((h->reply_pool == NULL) 4686 || (h->blockFetchTable == NULL)) 4687 goto clean_up; 4688 4689 hpsa_enter_performant_mode(h, 4690 trans_support & CFGTBL_Trans_use_short_tags); 4691 4692 return; 4693 4694 clean_up: 4695 if (h->reply_pool) 4696 pci_free_consistent(h->pdev, h->reply_pool_size, 4697 h->reply_pool, h->reply_pool_dhandle); 4698 kfree(h->blockFetchTable); 4699 } 4700 4701 /* 4702 * This is it. Register the PCI driver information for the cards we control 4703 * the OS will call our registered routines when it finds one of our cards. 4704 */ 4705 static int __init hpsa_init(void) 4706 { 4707 return pci_register_driver(&hpsa_pci_driver); 4708 } 4709 4710 static void __exit hpsa_cleanup(void) 4711 { 4712 pci_unregister_driver(&hpsa_pci_driver); 4713 } 4714 4715 module_init(hpsa_init); 4716 module_exit(hpsa_cleanup); 4717