1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/delay.h> 29 #include <linux/fs.h> 30 #include <linux/timer.h> 31 #include <linux/seq_file.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/compat.h> 35 #include <linux/blktrace_api.h> 36 #include <linux/uaccess.h> 37 #include <linux/io.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/completion.h> 40 #include <linux/moduleparam.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_tcq.h> 46 #include <linux/cciss_ioctl.h> 47 #include <linux/string.h> 48 #include <linux/bitmap.h> 49 #include <linux/atomic.h> 50 #include <linux/kthread.h> 51 #include "hpsa_cmd.h" 52 #include "hpsa.h" 53 54 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 55 #define HPSA_DRIVER_VERSION "2.0.2-1" 56 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 57 58 /* How long to wait (in milliseconds) for board to go into simple mode */ 59 #define MAX_CONFIG_WAIT 30000 60 #define MAX_IOCTL_CONFIG_WAIT 1000 61 62 /*define how many times we will try a command because of bus resets */ 63 #define MAX_CMD_RETRIES 3 64 65 /* Embedded module documentation macros - see modules.h */ 66 MODULE_AUTHOR("Hewlett-Packard Company"); 67 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 68 HPSA_DRIVER_VERSION); 69 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 70 MODULE_VERSION(HPSA_DRIVER_VERSION); 71 MODULE_LICENSE("GPL"); 72 73 static int hpsa_allow_any; 74 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 75 MODULE_PARM_DESC(hpsa_allow_any, 76 "Allow hpsa driver to access unknown HP Smart Array hardware"); 77 static int hpsa_simple_mode; 78 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 79 MODULE_PARM_DESC(hpsa_simple_mode, 80 "Use 'simple mode' rather than 'performant mode'"); 81 82 /* define the PCI info for the cards we can control */ 83 static const struct pci_device_id hpsa_pci_device_id[] = { 84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 99 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 100 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 101 {0,} 102 }; 103 104 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 105 106 /* board_id = Subsystem Device ID & Vendor ID 107 * product = Marketing Name for the board 108 * access = Address of the struct of function pointers 109 */ 110 static struct board_type products[] = { 111 {0x3241103C, "Smart Array P212", &SA5_access}, 112 {0x3243103C, "Smart Array P410", &SA5_access}, 113 {0x3245103C, "Smart Array P410i", &SA5_access}, 114 {0x3247103C, "Smart Array P411", &SA5_access}, 115 {0x3249103C, "Smart Array P812", &SA5_access}, 116 {0x324a103C, "Smart Array P712m", &SA5_access}, 117 {0x324b103C, "Smart Array P711m", &SA5_access}, 118 {0x3350103C, "Smart Array", &SA5_access}, 119 {0x3351103C, "Smart Array", &SA5_access}, 120 {0x3352103C, "Smart Array", &SA5_access}, 121 {0x3353103C, "Smart Array", &SA5_access}, 122 {0x3354103C, "Smart Array", &SA5_access}, 123 {0x3355103C, "Smart Array", &SA5_access}, 124 {0x3356103C, "Smart Array", &SA5_access}, 125 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 126 }; 127 128 static int number_of_controllers; 129 130 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 131 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 132 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 133 static void start_io(struct ctlr_info *h); 134 135 #ifdef CONFIG_COMPAT 136 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 137 #endif 138 139 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 140 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); 141 static struct CommandList *cmd_alloc(struct ctlr_info *h); 142 static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 143 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 144 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 145 int cmd_type); 146 147 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 148 static void hpsa_scan_start(struct Scsi_Host *); 149 static int hpsa_scan_finished(struct Scsi_Host *sh, 150 unsigned long elapsed_time); 151 static int hpsa_change_queue_depth(struct scsi_device *sdev, 152 int qdepth, int reason); 153 154 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 155 static int hpsa_slave_alloc(struct scsi_device *sdev); 156 static void hpsa_slave_destroy(struct scsi_device *sdev); 157 158 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 159 static int check_for_unit_attention(struct ctlr_info *h, 160 struct CommandList *c); 161 static void check_ioctl_unit_attention(struct ctlr_info *h, 162 struct CommandList *c); 163 /* performant mode helper functions */ 164 static void calc_bucket_map(int *bucket, int num_buckets, 165 int nsgs, int *bucket_map); 166 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 167 static inline u32 next_command(struct ctlr_info *h); 168 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 169 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 170 u64 *cfg_offset); 171 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 172 unsigned long *memory_bar); 173 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 174 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 175 void __iomem *vaddr, int wait_for_ready); 176 #define BOARD_NOT_READY 0 177 #define BOARD_READY 1 178 179 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 180 { 181 unsigned long *priv = shost_priv(sdev->host); 182 return (struct ctlr_info *) *priv; 183 } 184 185 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 186 { 187 unsigned long *priv = shost_priv(sh); 188 return (struct ctlr_info *) *priv; 189 } 190 191 static int check_for_unit_attention(struct ctlr_info *h, 192 struct CommandList *c) 193 { 194 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 195 return 0; 196 197 switch (c->err_info->SenseInfo[12]) { 198 case STATE_CHANGED: 199 dev_warn(&h->pdev->dev, "hpsa%d: a state change " 200 "detected, command retried\n", h->ctlr); 201 break; 202 case LUN_FAILED: 203 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure " 204 "detected, action required\n", h->ctlr); 205 break; 206 case REPORT_LUNS_CHANGED: 207 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " 208 "changed, action required\n", h->ctlr); 209 /* 210 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. 211 */ 212 break; 213 case POWER_OR_RESET: 214 dev_warn(&h->pdev->dev, "hpsa%d: a power on " 215 "or device reset detected\n", h->ctlr); 216 break; 217 case UNIT_ATTENTION_CLEARED: 218 dev_warn(&h->pdev->dev, "hpsa%d: unit attention " 219 "cleared by another initiator\n", h->ctlr); 220 break; 221 default: 222 dev_warn(&h->pdev->dev, "hpsa%d: unknown " 223 "unit attention detected\n", h->ctlr); 224 break; 225 } 226 return 1; 227 } 228 229 static ssize_t host_store_rescan(struct device *dev, 230 struct device_attribute *attr, 231 const char *buf, size_t count) 232 { 233 struct ctlr_info *h; 234 struct Scsi_Host *shost = class_to_shost(dev); 235 h = shost_to_hba(shost); 236 hpsa_scan_start(h->scsi_host); 237 return count; 238 } 239 240 static ssize_t host_show_firmware_revision(struct device *dev, 241 struct device_attribute *attr, char *buf) 242 { 243 struct ctlr_info *h; 244 struct Scsi_Host *shost = class_to_shost(dev); 245 unsigned char *fwrev; 246 247 h = shost_to_hba(shost); 248 if (!h->hba_inquiry_data) 249 return 0; 250 fwrev = &h->hba_inquiry_data[32]; 251 return snprintf(buf, 20, "%c%c%c%c\n", 252 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 253 } 254 255 static ssize_t host_show_commands_outstanding(struct device *dev, 256 struct device_attribute *attr, char *buf) 257 { 258 struct Scsi_Host *shost = class_to_shost(dev); 259 struct ctlr_info *h = shost_to_hba(shost); 260 261 return snprintf(buf, 20, "%d\n", h->commands_outstanding); 262 } 263 264 static ssize_t host_show_transport_mode(struct device *dev, 265 struct device_attribute *attr, char *buf) 266 { 267 struct ctlr_info *h; 268 struct Scsi_Host *shost = class_to_shost(dev); 269 270 h = shost_to_hba(shost); 271 return snprintf(buf, 20, "%s\n", 272 h->transMethod & CFGTBL_Trans_Performant ? 273 "performant" : "simple"); 274 } 275 276 /* List of controllers which cannot be hard reset on kexec with reset_devices */ 277 static u32 unresettable_controller[] = { 278 0x324a103C, /* Smart Array P712m */ 279 0x324b103C, /* SmartArray P711m */ 280 0x3223103C, /* Smart Array P800 */ 281 0x3234103C, /* Smart Array P400 */ 282 0x3235103C, /* Smart Array P400i */ 283 0x3211103C, /* Smart Array E200i */ 284 0x3212103C, /* Smart Array E200 */ 285 0x3213103C, /* Smart Array E200i */ 286 0x3214103C, /* Smart Array E200i */ 287 0x3215103C, /* Smart Array E200i */ 288 0x3237103C, /* Smart Array E500 */ 289 0x323D103C, /* Smart Array P700m */ 290 0x409C0E11, /* Smart Array 6400 */ 291 0x409D0E11, /* Smart Array 6400 EM */ 292 }; 293 294 /* List of controllers which cannot even be soft reset */ 295 static u32 soft_unresettable_controller[] = { 296 /* Exclude 640x boards. These are two pci devices in one slot 297 * which share a battery backed cache module. One controls the 298 * cache, the other accesses the cache through the one that controls 299 * it. If we reset the one controlling the cache, the other will 300 * likely not be happy. Just forbid resetting this conjoined mess. 301 * The 640x isn't really supported by hpsa anyway. 302 */ 303 0x409C0E11, /* Smart Array 6400 */ 304 0x409D0E11, /* Smart Array 6400 EM */ 305 }; 306 307 static int ctlr_is_hard_resettable(u32 board_id) 308 { 309 int i; 310 311 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 312 if (unresettable_controller[i] == board_id) 313 return 0; 314 return 1; 315 } 316 317 static int ctlr_is_soft_resettable(u32 board_id) 318 { 319 int i; 320 321 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 322 if (soft_unresettable_controller[i] == board_id) 323 return 0; 324 return 1; 325 } 326 327 static int ctlr_is_resettable(u32 board_id) 328 { 329 return ctlr_is_hard_resettable(board_id) || 330 ctlr_is_soft_resettable(board_id); 331 } 332 333 static ssize_t host_show_resettable(struct device *dev, 334 struct device_attribute *attr, char *buf) 335 { 336 struct ctlr_info *h; 337 struct Scsi_Host *shost = class_to_shost(dev); 338 339 h = shost_to_hba(shost); 340 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 341 } 342 343 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 344 { 345 return (scsi3addr[3] & 0xC0) == 0x40; 346 } 347 348 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 349 "UNKNOWN" 350 }; 351 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 352 353 static ssize_t raid_level_show(struct device *dev, 354 struct device_attribute *attr, char *buf) 355 { 356 ssize_t l = 0; 357 unsigned char rlevel; 358 struct ctlr_info *h; 359 struct scsi_device *sdev; 360 struct hpsa_scsi_dev_t *hdev; 361 unsigned long flags; 362 363 sdev = to_scsi_device(dev); 364 h = sdev_to_hba(sdev); 365 spin_lock_irqsave(&h->lock, flags); 366 hdev = sdev->hostdata; 367 if (!hdev) { 368 spin_unlock_irqrestore(&h->lock, flags); 369 return -ENODEV; 370 } 371 372 /* Is this even a logical drive? */ 373 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 374 spin_unlock_irqrestore(&h->lock, flags); 375 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 376 return l; 377 } 378 379 rlevel = hdev->raid_level; 380 spin_unlock_irqrestore(&h->lock, flags); 381 if (rlevel > RAID_UNKNOWN) 382 rlevel = RAID_UNKNOWN; 383 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 384 return l; 385 } 386 387 static ssize_t lunid_show(struct device *dev, 388 struct device_attribute *attr, char *buf) 389 { 390 struct ctlr_info *h; 391 struct scsi_device *sdev; 392 struct hpsa_scsi_dev_t *hdev; 393 unsigned long flags; 394 unsigned char lunid[8]; 395 396 sdev = to_scsi_device(dev); 397 h = sdev_to_hba(sdev); 398 spin_lock_irqsave(&h->lock, flags); 399 hdev = sdev->hostdata; 400 if (!hdev) { 401 spin_unlock_irqrestore(&h->lock, flags); 402 return -ENODEV; 403 } 404 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 405 spin_unlock_irqrestore(&h->lock, flags); 406 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 407 lunid[0], lunid[1], lunid[2], lunid[3], 408 lunid[4], lunid[5], lunid[6], lunid[7]); 409 } 410 411 static ssize_t unique_id_show(struct device *dev, 412 struct device_attribute *attr, char *buf) 413 { 414 struct ctlr_info *h; 415 struct scsi_device *sdev; 416 struct hpsa_scsi_dev_t *hdev; 417 unsigned long flags; 418 unsigned char sn[16]; 419 420 sdev = to_scsi_device(dev); 421 h = sdev_to_hba(sdev); 422 spin_lock_irqsave(&h->lock, flags); 423 hdev = sdev->hostdata; 424 if (!hdev) { 425 spin_unlock_irqrestore(&h->lock, flags); 426 return -ENODEV; 427 } 428 memcpy(sn, hdev->device_id, sizeof(sn)); 429 spin_unlock_irqrestore(&h->lock, flags); 430 return snprintf(buf, 16 * 2 + 2, 431 "%02X%02X%02X%02X%02X%02X%02X%02X" 432 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 433 sn[0], sn[1], sn[2], sn[3], 434 sn[4], sn[5], sn[6], sn[7], 435 sn[8], sn[9], sn[10], sn[11], 436 sn[12], sn[13], sn[14], sn[15]); 437 } 438 439 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 440 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 441 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 442 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 443 static DEVICE_ATTR(firmware_revision, S_IRUGO, 444 host_show_firmware_revision, NULL); 445 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 446 host_show_commands_outstanding, NULL); 447 static DEVICE_ATTR(transport_mode, S_IRUGO, 448 host_show_transport_mode, NULL); 449 static DEVICE_ATTR(resettable, S_IRUGO, 450 host_show_resettable, NULL); 451 452 static struct device_attribute *hpsa_sdev_attrs[] = { 453 &dev_attr_raid_level, 454 &dev_attr_lunid, 455 &dev_attr_unique_id, 456 NULL, 457 }; 458 459 static struct device_attribute *hpsa_shost_attrs[] = { 460 &dev_attr_rescan, 461 &dev_attr_firmware_revision, 462 &dev_attr_commands_outstanding, 463 &dev_attr_transport_mode, 464 &dev_attr_resettable, 465 NULL, 466 }; 467 468 static struct scsi_host_template hpsa_driver_template = { 469 .module = THIS_MODULE, 470 .name = "hpsa", 471 .proc_name = "hpsa", 472 .queuecommand = hpsa_scsi_queue_command, 473 .scan_start = hpsa_scan_start, 474 .scan_finished = hpsa_scan_finished, 475 .change_queue_depth = hpsa_change_queue_depth, 476 .this_id = -1, 477 .use_clustering = ENABLE_CLUSTERING, 478 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 479 .ioctl = hpsa_ioctl, 480 .slave_alloc = hpsa_slave_alloc, 481 .slave_destroy = hpsa_slave_destroy, 482 #ifdef CONFIG_COMPAT 483 .compat_ioctl = hpsa_compat_ioctl, 484 #endif 485 .sdev_attrs = hpsa_sdev_attrs, 486 .shost_attrs = hpsa_shost_attrs, 487 }; 488 489 490 /* Enqueuing and dequeuing functions for cmdlists. */ 491 static inline void addQ(struct list_head *list, struct CommandList *c) 492 { 493 list_add_tail(&c->list, list); 494 } 495 496 static inline u32 next_command(struct ctlr_info *h) 497 { 498 u32 a; 499 500 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 501 return h->access.command_completed(h); 502 503 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 504 a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 505 (h->reply_pool_head)++; 506 h->commands_outstanding--; 507 } else { 508 a = FIFO_EMPTY; 509 } 510 /* Check for wraparound */ 511 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 512 h->reply_pool_head = h->reply_pool; 513 h->reply_pool_wraparound ^= 1; 514 } 515 return a; 516 } 517 518 /* set_performant_mode: Modify the tag for cciss performant 519 * set bit 0 for pull model, bits 3-1 for block fetch 520 * register number 521 */ 522 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 523 { 524 if (likely(h->transMethod & CFGTBL_Trans_Performant)) 525 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 526 } 527 528 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 529 struct CommandList *c) 530 { 531 unsigned long flags; 532 533 set_performant_mode(h, c); 534 spin_lock_irqsave(&h->lock, flags); 535 addQ(&h->reqQ, c); 536 h->Qdepth++; 537 start_io(h); 538 spin_unlock_irqrestore(&h->lock, flags); 539 } 540 541 static inline void removeQ(struct CommandList *c) 542 { 543 if (WARN_ON(list_empty(&c->list))) 544 return; 545 list_del_init(&c->list); 546 } 547 548 static inline int is_hba_lunid(unsigned char scsi3addr[]) 549 { 550 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 551 } 552 553 static inline int is_scsi_rev_5(struct ctlr_info *h) 554 { 555 if (!h->hba_inquiry_data) 556 return 0; 557 if ((h->hba_inquiry_data[2] & 0x07) == 5) 558 return 1; 559 return 0; 560 } 561 562 static int hpsa_find_target_lun(struct ctlr_info *h, 563 unsigned char scsi3addr[], int bus, int *target, int *lun) 564 { 565 /* finds an unused bus, target, lun for a new physical device 566 * assumes h->devlock is held 567 */ 568 int i, found = 0; 569 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA); 570 571 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3); 572 573 for (i = 0; i < h->ndevices; i++) { 574 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 575 set_bit(h->dev[i]->target, lun_taken); 576 } 577 578 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) { 579 if (!test_bit(i, lun_taken)) { 580 /* *bus = 1; */ 581 *target = i; 582 *lun = 0; 583 found = 1; 584 break; 585 } 586 } 587 return !found; 588 } 589 590 /* Add an entry into h->dev[] array. */ 591 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 592 struct hpsa_scsi_dev_t *device, 593 struct hpsa_scsi_dev_t *added[], int *nadded) 594 { 595 /* assumes h->devlock is held */ 596 int n = h->ndevices; 597 int i; 598 unsigned char addr1[8], addr2[8]; 599 struct hpsa_scsi_dev_t *sd; 600 601 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) { 602 dev_err(&h->pdev->dev, "too many devices, some will be " 603 "inaccessible.\n"); 604 return -1; 605 } 606 607 /* physical devices do not have lun or target assigned until now. */ 608 if (device->lun != -1) 609 /* Logical device, lun is already assigned. */ 610 goto lun_assigned; 611 612 /* If this device a non-zero lun of a multi-lun device 613 * byte 4 of the 8-byte LUN addr will contain the logical 614 * unit no, zero otherise. 615 */ 616 if (device->scsi3addr[4] == 0) { 617 /* This is not a non-zero lun of a multi-lun device */ 618 if (hpsa_find_target_lun(h, device->scsi3addr, 619 device->bus, &device->target, &device->lun) != 0) 620 return -1; 621 goto lun_assigned; 622 } 623 624 /* This is a non-zero lun of a multi-lun device. 625 * Search through our list and find the device which 626 * has the same 8 byte LUN address, excepting byte 4. 627 * Assign the same bus and target for this new LUN. 628 * Use the logical unit number from the firmware. 629 */ 630 memcpy(addr1, device->scsi3addr, 8); 631 addr1[4] = 0; 632 for (i = 0; i < n; i++) { 633 sd = h->dev[i]; 634 memcpy(addr2, sd->scsi3addr, 8); 635 addr2[4] = 0; 636 /* differ only in byte 4? */ 637 if (memcmp(addr1, addr2, 8) == 0) { 638 device->bus = sd->bus; 639 device->target = sd->target; 640 device->lun = device->scsi3addr[4]; 641 break; 642 } 643 } 644 if (device->lun == -1) { 645 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 646 " suspect firmware bug or unsupported hardware " 647 "configuration.\n"); 648 return -1; 649 } 650 651 lun_assigned: 652 653 h->dev[n] = device; 654 h->ndevices++; 655 added[*nadded] = device; 656 (*nadded)++; 657 658 /* initially, (before registering with scsi layer) we don't 659 * know our hostno and we don't want to print anything first 660 * time anyway (the scsi layer's inquiries will show that info) 661 */ 662 /* if (hostno != -1) */ 663 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 664 scsi_device_type(device->devtype), hostno, 665 device->bus, device->target, device->lun); 666 return 0; 667 } 668 669 /* Replace an entry from h->dev[] array. */ 670 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 671 int entry, struct hpsa_scsi_dev_t *new_entry, 672 struct hpsa_scsi_dev_t *added[], int *nadded, 673 struct hpsa_scsi_dev_t *removed[], int *nremoved) 674 { 675 /* assumes h->devlock is held */ 676 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); 677 removed[*nremoved] = h->dev[entry]; 678 (*nremoved)++; 679 680 /* 681 * New physical devices won't have target/lun assigned yet 682 * so we need to preserve the values in the slot we are replacing. 683 */ 684 if (new_entry->target == -1) { 685 new_entry->target = h->dev[entry]->target; 686 new_entry->lun = h->dev[entry]->lun; 687 } 688 689 h->dev[entry] = new_entry; 690 added[*nadded] = new_entry; 691 (*nadded)++; 692 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 693 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 694 new_entry->target, new_entry->lun); 695 } 696 697 /* Remove an entry from h->dev[] array. */ 698 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 699 struct hpsa_scsi_dev_t *removed[], int *nremoved) 700 { 701 /* assumes h->devlock is held */ 702 int i; 703 struct hpsa_scsi_dev_t *sd; 704 705 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); 706 707 sd = h->dev[entry]; 708 removed[*nremoved] = h->dev[entry]; 709 (*nremoved)++; 710 711 for (i = entry; i < h->ndevices-1; i++) 712 h->dev[i] = h->dev[i+1]; 713 h->ndevices--; 714 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 715 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 716 sd->lun); 717 } 718 719 #define SCSI3ADDR_EQ(a, b) ( \ 720 (a)[7] == (b)[7] && \ 721 (a)[6] == (b)[6] && \ 722 (a)[5] == (b)[5] && \ 723 (a)[4] == (b)[4] && \ 724 (a)[3] == (b)[3] && \ 725 (a)[2] == (b)[2] && \ 726 (a)[1] == (b)[1] && \ 727 (a)[0] == (b)[0]) 728 729 static void fixup_botched_add(struct ctlr_info *h, 730 struct hpsa_scsi_dev_t *added) 731 { 732 /* called when scsi_add_device fails in order to re-adjust 733 * h->dev[] to match the mid layer's view. 734 */ 735 unsigned long flags; 736 int i, j; 737 738 spin_lock_irqsave(&h->lock, flags); 739 for (i = 0; i < h->ndevices; i++) { 740 if (h->dev[i] == added) { 741 for (j = i; j < h->ndevices-1; j++) 742 h->dev[j] = h->dev[j+1]; 743 h->ndevices--; 744 break; 745 } 746 } 747 spin_unlock_irqrestore(&h->lock, flags); 748 kfree(added); 749 } 750 751 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 752 struct hpsa_scsi_dev_t *dev2) 753 { 754 /* we compare everything except lun and target as these 755 * are not yet assigned. Compare parts likely 756 * to differ first 757 */ 758 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 759 sizeof(dev1->scsi3addr)) != 0) 760 return 0; 761 if (memcmp(dev1->device_id, dev2->device_id, 762 sizeof(dev1->device_id)) != 0) 763 return 0; 764 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 765 return 0; 766 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 767 return 0; 768 if (dev1->devtype != dev2->devtype) 769 return 0; 770 if (dev1->bus != dev2->bus) 771 return 0; 772 return 1; 773 } 774 775 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 776 * and return needle location in *index. If scsi3addr matches, but not 777 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 778 * location in *index. If needle not found, return DEVICE_NOT_FOUND. 779 */ 780 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 781 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 782 int *index) 783 { 784 int i; 785 #define DEVICE_NOT_FOUND 0 786 #define DEVICE_CHANGED 1 787 #define DEVICE_SAME 2 788 for (i = 0; i < haystack_size; i++) { 789 if (haystack[i] == NULL) /* previously removed. */ 790 continue; 791 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 792 *index = i; 793 if (device_is_the_same(needle, haystack[i])) 794 return DEVICE_SAME; 795 else 796 return DEVICE_CHANGED; 797 } 798 } 799 *index = -1; 800 return DEVICE_NOT_FOUND; 801 } 802 803 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 804 struct hpsa_scsi_dev_t *sd[], int nsds) 805 { 806 /* sd contains scsi3 addresses and devtypes, and inquiry 807 * data. This function takes what's in sd to be the current 808 * reality and updates h->dev[] to reflect that reality. 809 */ 810 int i, entry, device_change, changes = 0; 811 struct hpsa_scsi_dev_t *csd; 812 unsigned long flags; 813 struct hpsa_scsi_dev_t **added, **removed; 814 int nadded, nremoved; 815 struct Scsi_Host *sh = NULL; 816 817 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA, 818 GFP_KERNEL); 819 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA, 820 GFP_KERNEL); 821 822 if (!added || !removed) { 823 dev_warn(&h->pdev->dev, "out of memory in " 824 "adjust_hpsa_scsi_table\n"); 825 goto free_and_out; 826 } 827 828 spin_lock_irqsave(&h->devlock, flags); 829 830 /* find any devices in h->dev[] that are not in 831 * sd[] and remove them from h->dev[], and for any 832 * devices which have changed, remove the old device 833 * info and add the new device info. 834 */ 835 i = 0; 836 nremoved = 0; 837 nadded = 0; 838 while (i < h->ndevices) { 839 csd = h->dev[i]; 840 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 841 if (device_change == DEVICE_NOT_FOUND) { 842 changes++; 843 hpsa_scsi_remove_entry(h, hostno, i, 844 removed, &nremoved); 845 continue; /* remove ^^^, hence i not incremented */ 846 } else if (device_change == DEVICE_CHANGED) { 847 changes++; 848 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 849 added, &nadded, removed, &nremoved); 850 /* Set it to NULL to prevent it from being freed 851 * at the bottom of hpsa_update_scsi_devices() 852 */ 853 sd[entry] = NULL; 854 } 855 i++; 856 } 857 858 /* Now, make sure every device listed in sd[] is also 859 * listed in h->dev[], adding them if they aren't found 860 */ 861 862 for (i = 0; i < nsds; i++) { 863 if (!sd[i]) /* if already added above. */ 864 continue; 865 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 866 h->ndevices, &entry); 867 if (device_change == DEVICE_NOT_FOUND) { 868 changes++; 869 if (hpsa_scsi_add_entry(h, hostno, sd[i], 870 added, &nadded) != 0) 871 break; 872 sd[i] = NULL; /* prevent from being freed later. */ 873 } else if (device_change == DEVICE_CHANGED) { 874 /* should never happen... */ 875 changes++; 876 dev_warn(&h->pdev->dev, 877 "device unexpectedly changed.\n"); 878 /* but if it does happen, we just ignore that device */ 879 } 880 } 881 spin_unlock_irqrestore(&h->devlock, flags); 882 883 /* Don't notify scsi mid layer of any changes the first time through 884 * (or if there are no changes) scsi_scan_host will do it later the 885 * first time through. 886 */ 887 if (hostno == -1 || !changes) 888 goto free_and_out; 889 890 sh = h->scsi_host; 891 /* Notify scsi mid layer of any removed devices */ 892 for (i = 0; i < nremoved; i++) { 893 struct scsi_device *sdev = 894 scsi_device_lookup(sh, removed[i]->bus, 895 removed[i]->target, removed[i]->lun); 896 if (sdev != NULL) { 897 scsi_remove_device(sdev); 898 scsi_device_put(sdev); 899 } else { 900 /* We don't expect to get here. 901 * future cmds to this device will get selection 902 * timeout as if the device was gone. 903 */ 904 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 905 " for removal.", hostno, removed[i]->bus, 906 removed[i]->target, removed[i]->lun); 907 } 908 kfree(removed[i]); 909 removed[i] = NULL; 910 } 911 912 /* Notify scsi mid layer of any added devices */ 913 for (i = 0; i < nadded; i++) { 914 if (scsi_add_device(sh, added[i]->bus, 915 added[i]->target, added[i]->lun) == 0) 916 continue; 917 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 918 "device not added.\n", hostno, added[i]->bus, 919 added[i]->target, added[i]->lun); 920 /* now we have to remove it from h->dev, 921 * since it didn't get added to scsi mid layer 922 */ 923 fixup_botched_add(h, added[i]); 924 } 925 926 free_and_out: 927 kfree(added); 928 kfree(removed); 929 } 930 931 /* 932 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * 933 * Assume's h->devlock is held. 934 */ 935 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 936 int bus, int target, int lun) 937 { 938 int i; 939 struct hpsa_scsi_dev_t *sd; 940 941 for (i = 0; i < h->ndevices; i++) { 942 sd = h->dev[i]; 943 if (sd->bus == bus && sd->target == target && sd->lun == lun) 944 return sd; 945 } 946 return NULL; 947 } 948 949 /* link sdev->hostdata to our per-device structure. */ 950 static int hpsa_slave_alloc(struct scsi_device *sdev) 951 { 952 struct hpsa_scsi_dev_t *sd; 953 unsigned long flags; 954 struct ctlr_info *h; 955 956 h = sdev_to_hba(sdev); 957 spin_lock_irqsave(&h->devlock, flags); 958 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 959 sdev_id(sdev), sdev->lun); 960 if (sd != NULL) 961 sdev->hostdata = sd; 962 spin_unlock_irqrestore(&h->devlock, flags); 963 return 0; 964 } 965 966 static void hpsa_slave_destroy(struct scsi_device *sdev) 967 { 968 /* nothing to do. */ 969 } 970 971 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 972 { 973 int i; 974 975 if (!h->cmd_sg_list) 976 return; 977 for (i = 0; i < h->nr_cmds; i++) { 978 kfree(h->cmd_sg_list[i]); 979 h->cmd_sg_list[i] = NULL; 980 } 981 kfree(h->cmd_sg_list); 982 h->cmd_sg_list = NULL; 983 } 984 985 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 986 { 987 int i; 988 989 if (h->chainsize <= 0) 990 return 0; 991 992 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 993 GFP_KERNEL); 994 if (!h->cmd_sg_list) 995 return -ENOMEM; 996 for (i = 0; i < h->nr_cmds; i++) { 997 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 998 h->chainsize, GFP_KERNEL); 999 if (!h->cmd_sg_list[i]) 1000 goto clean; 1001 } 1002 return 0; 1003 1004 clean: 1005 hpsa_free_sg_chain_blocks(h); 1006 return -ENOMEM; 1007 } 1008 1009 static void hpsa_map_sg_chain_block(struct ctlr_info *h, 1010 struct CommandList *c) 1011 { 1012 struct SGDescriptor *chain_sg, *chain_block; 1013 u64 temp64; 1014 1015 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1016 chain_block = h->cmd_sg_list[c->cmdindex]; 1017 chain_sg->Ext = HPSA_SG_CHAIN; 1018 chain_sg->Len = sizeof(*chain_sg) * 1019 (c->Header.SGTotal - h->max_cmd_sg_entries); 1020 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 1021 PCI_DMA_TODEVICE); 1022 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 1023 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); 1024 } 1025 1026 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 1027 struct CommandList *c) 1028 { 1029 struct SGDescriptor *chain_sg; 1030 union u64bit temp64; 1031 1032 if (c->Header.SGTotal <= h->max_cmd_sg_entries) 1033 return; 1034 1035 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 1036 temp64.val32.lower = chain_sg->Addr.lower; 1037 temp64.val32.upper = chain_sg->Addr.upper; 1038 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1039 } 1040 1041 static void complete_scsi_command(struct CommandList *cp) 1042 { 1043 struct scsi_cmnd *cmd; 1044 struct ctlr_info *h; 1045 struct ErrorInfo *ei; 1046 1047 unsigned char sense_key; 1048 unsigned char asc; /* additional sense code */ 1049 unsigned char ascq; /* additional sense code qualifier */ 1050 unsigned long sense_data_size; 1051 1052 ei = cp->err_info; 1053 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1054 h = cp->h; 1055 1056 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1057 if (cp->Header.SGTotal > h->max_cmd_sg_entries) 1058 hpsa_unmap_sg_chain_block(h, cp); 1059 1060 cmd->result = (DID_OK << 16); /* host byte */ 1061 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1062 cmd->result |= ei->ScsiStatus; 1063 1064 /* copy the sense data whether we need to or not. */ 1065 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) 1066 sense_data_size = SCSI_SENSE_BUFFERSIZE; 1067 else 1068 sense_data_size = sizeof(ei->SenseInfo); 1069 if (ei->SenseLen < sense_data_size) 1070 sense_data_size = ei->SenseLen; 1071 1072 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); 1073 scsi_set_resid(cmd, ei->ResidualCnt); 1074 1075 if (ei->CommandStatus == 0) { 1076 cmd->scsi_done(cmd); 1077 cmd_free(h, cp); 1078 return; 1079 } 1080 1081 /* an error has occurred */ 1082 switch (ei->CommandStatus) { 1083 1084 case CMD_TARGET_STATUS: 1085 if (ei->ScsiStatus) { 1086 /* Get sense key */ 1087 sense_key = 0xf & ei->SenseInfo[2]; 1088 /* Get additional sense code */ 1089 asc = ei->SenseInfo[12]; 1090 /* Get addition sense code qualifier */ 1091 ascq = ei->SenseInfo[13]; 1092 } 1093 1094 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1095 if (check_for_unit_attention(h, cp)) { 1096 cmd->result = DID_SOFT_ERROR << 16; 1097 break; 1098 } 1099 if (sense_key == ILLEGAL_REQUEST) { 1100 /* 1101 * SCSI REPORT_LUNS is commonly unsupported on 1102 * Smart Array. Suppress noisy complaint. 1103 */ 1104 if (cp->Request.CDB[0] == REPORT_LUNS) 1105 break; 1106 1107 /* If ASC/ASCQ indicate Logical Unit 1108 * Not Supported condition, 1109 */ 1110 if ((asc == 0x25) && (ascq == 0x0)) { 1111 dev_warn(&h->pdev->dev, "cp %p " 1112 "has check condition\n", cp); 1113 break; 1114 } 1115 } 1116 1117 if (sense_key == NOT_READY) { 1118 /* If Sense is Not Ready, Logical Unit 1119 * Not ready, Manual Intervention 1120 * required 1121 */ 1122 if ((asc == 0x04) && (ascq == 0x03)) { 1123 dev_warn(&h->pdev->dev, "cp %p " 1124 "has check condition: unit " 1125 "not ready, manual " 1126 "intervention required\n", cp); 1127 break; 1128 } 1129 } 1130 if (sense_key == ABORTED_COMMAND) { 1131 /* Aborted command is retryable */ 1132 dev_warn(&h->pdev->dev, "cp %p " 1133 "has check condition: aborted command: " 1134 "ASC: 0x%x, ASCQ: 0x%x\n", 1135 cp, asc, ascq); 1136 cmd->result = DID_SOFT_ERROR << 16; 1137 break; 1138 } 1139 /* Must be some other type of check condition */ 1140 dev_warn(&h->pdev->dev, "cp %p has check condition: " 1141 "unknown type: " 1142 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1143 "Returning result: 0x%x, " 1144 "cmd=[%02x %02x %02x %02x %02x " 1145 "%02x %02x %02x %02x %02x %02x " 1146 "%02x %02x %02x %02x %02x]\n", 1147 cp, sense_key, asc, ascq, 1148 cmd->result, 1149 cmd->cmnd[0], cmd->cmnd[1], 1150 cmd->cmnd[2], cmd->cmnd[3], 1151 cmd->cmnd[4], cmd->cmnd[5], 1152 cmd->cmnd[6], cmd->cmnd[7], 1153 cmd->cmnd[8], cmd->cmnd[9], 1154 cmd->cmnd[10], cmd->cmnd[11], 1155 cmd->cmnd[12], cmd->cmnd[13], 1156 cmd->cmnd[14], cmd->cmnd[15]); 1157 break; 1158 } 1159 1160 1161 /* Problem was not a check condition 1162 * Pass it up to the upper layers... 1163 */ 1164 if (ei->ScsiStatus) { 1165 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1166 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1167 "Returning result: 0x%x\n", 1168 cp, ei->ScsiStatus, 1169 sense_key, asc, ascq, 1170 cmd->result); 1171 } else { /* scsi status is zero??? How??? */ 1172 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1173 "Returning no connection.\n", cp), 1174 1175 /* Ordinarily, this case should never happen, 1176 * but there is a bug in some released firmware 1177 * revisions that allows it to happen if, for 1178 * example, a 4100 backplane loses power and 1179 * the tape drive is in it. We assume that 1180 * it's a fatal error of some kind because we 1181 * can't show that it wasn't. We will make it 1182 * look like selection timeout since that is 1183 * the most common reason for this to occur, 1184 * and it's severe enough. 1185 */ 1186 1187 cmd->result = DID_NO_CONNECT << 16; 1188 } 1189 break; 1190 1191 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1192 break; 1193 case CMD_DATA_OVERRUN: 1194 dev_warn(&h->pdev->dev, "cp %p has" 1195 " completed with data overrun " 1196 "reported\n", cp); 1197 break; 1198 case CMD_INVALID: { 1199 /* print_bytes(cp, sizeof(*cp), 1, 0); 1200 print_cmd(cp); */ 1201 /* We get CMD_INVALID if you address a non-existent device 1202 * instead of a selection timeout (no response). You will 1203 * see this if you yank out a drive, then try to access it. 1204 * This is kind of a shame because it means that any other 1205 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1206 * missing target. */ 1207 cmd->result = DID_NO_CONNECT << 16; 1208 } 1209 break; 1210 case CMD_PROTOCOL_ERR: 1211 dev_warn(&h->pdev->dev, "cp %p has " 1212 "protocol error \n", cp); 1213 break; 1214 case CMD_HARDWARE_ERR: 1215 cmd->result = DID_ERROR << 16; 1216 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1217 break; 1218 case CMD_CONNECTION_LOST: 1219 cmd->result = DID_ERROR << 16; 1220 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1221 break; 1222 case CMD_ABORTED: 1223 cmd->result = DID_ABORT << 16; 1224 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1225 cp, ei->ScsiStatus); 1226 break; 1227 case CMD_ABORT_FAILED: 1228 cmd->result = DID_ERROR << 16; 1229 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1230 break; 1231 case CMD_UNSOLICITED_ABORT: 1232 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 1233 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " 1234 "abort\n", cp); 1235 break; 1236 case CMD_TIMEOUT: 1237 cmd->result = DID_TIME_OUT << 16; 1238 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1239 break; 1240 case CMD_UNABORTABLE: 1241 cmd->result = DID_ERROR << 16; 1242 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1243 break; 1244 default: 1245 cmd->result = DID_ERROR << 16; 1246 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1247 cp, ei->CommandStatus); 1248 } 1249 cmd->scsi_done(cmd); 1250 cmd_free(h, cp); 1251 } 1252 1253 static int hpsa_scsi_detect(struct ctlr_info *h) 1254 { 1255 struct Scsi_Host *sh; 1256 int error; 1257 1258 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 1259 if (sh == NULL) 1260 goto fail; 1261 1262 sh->io_port = 0; 1263 sh->n_io_port = 0; 1264 sh->this_id = -1; 1265 sh->max_channel = 3; 1266 sh->max_cmd_len = MAX_COMMAND_SIZE; 1267 sh->max_lun = HPSA_MAX_LUN; 1268 sh->max_id = HPSA_MAX_LUN; 1269 sh->can_queue = h->nr_cmds; 1270 sh->cmd_per_lun = h->nr_cmds; 1271 sh->sg_tablesize = h->maxsgentries; 1272 h->scsi_host = sh; 1273 sh->hostdata[0] = (unsigned long) h; 1274 sh->irq = h->intr[h->intr_mode]; 1275 sh->unique_id = sh->irq; 1276 error = scsi_add_host(sh, &h->pdev->dev); 1277 if (error) 1278 goto fail_host_put; 1279 scsi_scan_host(sh); 1280 return 0; 1281 1282 fail_host_put: 1283 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" 1284 " failed for controller %d\n", h->ctlr); 1285 scsi_host_put(sh); 1286 return error; 1287 fail: 1288 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" 1289 " failed for controller %d\n", h->ctlr); 1290 return -ENOMEM; 1291 } 1292 1293 static void hpsa_pci_unmap(struct pci_dev *pdev, 1294 struct CommandList *c, int sg_used, int data_direction) 1295 { 1296 int i; 1297 union u64bit addr64; 1298 1299 for (i = 0; i < sg_used; i++) { 1300 addr64.val32.lower = c->SG[i].Addr.lower; 1301 addr64.val32.upper = c->SG[i].Addr.upper; 1302 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, 1303 data_direction); 1304 } 1305 } 1306 1307 static void hpsa_map_one(struct pci_dev *pdev, 1308 struct CommandList *cp, 1309 unsigned char *buf, 1310 size_t buflen, 1311 int data_direction) 1312 { 1313 u64 addr64; 1314 1315 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1316 cp->Header.SGList = 0; 1317 cp->Header.SGTotal = 0; 1318 return; 1319 } 1320 1321 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); 1322 cp->SG[0].Addr.lower = 1323 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1324 cp->SG[0].Addr.upper = 1325 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1326 cp->SG[0].Len = buflen; 1327 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ 1328 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ 1329 } 1330 1331 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1332 struct CommandList *c) 1333 { 1334 DECLARE_COMPLETION_ONSTACK(wait); 1335 1336 c->waiting = &wait; 1337 enqueue_cmd_and_start_io(h, c); 1338 wait_for_completion(&wait); 1339 } 1340 1341 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 1342 struct CommandList *c, int data_direction) 1343 { 1344 int retry_count = 0; 1345 1346 do { 1347 memset(c->err_info, 0, sizeof(*c->err_info)); 1348 hpsa_scsi_do_simple_cmd_core(h, c); 1349 retry_count++; 1350 } while (check_for_unit_attention(h, c) && retry_count <= 3); 1351 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 1352 } 1353 1354 static void hpsa_scsi_interpret_error(struct CommandList *cp) 1355 { 1356 struct ErrorInfo *ei; 1357 struct device *d = &cp->h->pdev->dev; 1358 1359 ei = cp->err_info; 1360 switch (ei->CommandStatus) { 1361 case CMD_TARGET_STATUS: 1362 dev_warn(d, "cmd %p has completed with errors\n", cp); 1363 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, 1364 ei->ScsiStatus); 1365 if (ei->ScsiStatus == 0) 1366 dev_warn(d, "SCSI status is abnormally zero. " 1367 "(probably indicates selection timeout " 1368 "reported incorrectly due to a known " 1369 "firmware bug, circa July, 2001.)\n"); 1370 break; 1371 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1372 dev_info(d, "UNDERRUN\n"); 1373 break; 1374 case CMD_DATA_OVERRUN: 1375 dev_warn(d, "cp %p has completed with data overrun\n", cp); 1376 break; 1377 case CMD_INVALID: { 1378 /* controller unfortunately reports SCSI passthru's 1379 * to non-existent targets as invalid commands. 1380 */ 1381 dev_warn(d, "cp %p is reported invalid (probably means " 1382 "target device no longer present)\n", cp); 1383 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); 1384 print_cmd(cp); */ 1385 } 1386 break; 1387 case CMD_PROTOCOL_ERR: 1388 dev_warn(d, "cp %p has protocol error \n", cp); 1389 break; 1390 case CMD_HARDWARE_ERR: 1391 /* cmd->result = DID_ERROR << 16; */ 1392 dev_warn(d, "cp %p had hardware error\n", cp); 1393 break; 1394 case CMD_CONNECTION_LOST: 1395 dev_warn(d, "cp %p had connection lost\n", cp); 1396 break; 1397 case CMD_ABORTED: 1398 dev_warn(d, "cp %p was aborted\n", cp); 1399 break; 1400 case CMD_ABORT_FAILED: 1401 dev_warn(d, "cp %p reports abort failed\n", cp); 1402 break; 1403 case CMD_UNSOLICITED_ABORT: 1404 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); 1405 break; 1406 case CMD_TIMEOUT: 1407 dev_warn(d, "cp %p timed out\n", cp); 1408 break; 1409 case CMD_UNABORTABLE: 1410 dev_warn(d, "Command unabortable\n"); 1411 break; 1412 default: 1413 dev_warn(d, "cp %p returned unknown status %x\n", cp, 1414 ei->CommandStatus); 1415 } 1416 } 1417 1418 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 1419 unsigned char page, unsigned char *buf, 1420 unsigned char bufsize) 1421 { 1422 int rc = IO_OK; 1423 struct CommandList *c; 1424 struct ErrorInfo *ei; 1425 1426 c = cmd_special_alloc(h); 1427 1428 if (c == NULL) { /* trouble... */ 1429 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1430 return -ENOMEM; 1431 } 1432 1433 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); 1434 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1435 ei = c->err_info; 1436 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 1437 hpsa_scsi_interpret_error(c); 1438 rc = -1; 1439 } 1440 cmd_special_free(h, c); 1441 return rc; 1442 } 1443 1444 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) 1445 { 1446 int rc = IO_OK; 1447 struct CommandList *c; 1448 struct ErrorInfo *ei; 1449 1450 c = cmd_special_alloc(h); 1451 1452 if (c == NULL) { /* trouble... */ 1453 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1454 return -ENOMEM; 1455 } 1456 1457 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); 1458 hpsa_scsi_do_simple_cmd_core(h, c); 1459 /* no unmap needed here because no data xfer. */ 1460 1461 ei = c->err_info; 1462 if (ei->CommandStatus != 0) { 1463 hpsa_scsi_interpret_error(c); 1464 rc = -1; 1465 } 1466 cmd_special_free(h, c); 1467 return rc; 1468 } 1469 1470 static void hpsa_get_raid_level(struct ctlr_info *h, 1471 unsigned char *scsi3addr, unsigned char *raid_level) 1472 { 1473 int rc; 1474 unsigned char *buf; 1475 1476 *raid_level = RAID_UNKNOWN; 1477 buf = kzalloc(64, GFP_KERNEL); 1478 if (!buf) 1479 return; 1480 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); 1481 if (rc == 0) 1482 *raid_level = buf[8]; 1483 if (*raid_level > RAID_UNKNOWN) 1484 *raid_level = RAID_UNKNOWN; 1485 kfree(buf); 1486 return; 1487 } 1488 1489 /* Get the device id from inquiry page 0x83 */ 1490 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 1491 unsigned char *device_id, int buflen) 1492 { 1493 int rc; 1494 unsigned char *buf; 1495 1496 if (buflen > 16) 1497 buflen = 16; 1498 buf = kzalloc(64, GFP_KERNEL); 1499 if (!buf) 1500 return -1; 1501 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); 1502 if (rc == 0) 1503 memcpy(device_id, &buf[8], buflen); 1504 kfree(buf); 1505 return rc != 0; 1506 } 1507 1508 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 1509 struct ReportLUNdata *buf, int bufsize, 1510 int extended_response) 1511 { 1512 int rc = IO_OK; 1513 struct CommandList *c; 1514 unsigned char scsi3addr[8]; 1515 struct ErrorInfo *ei; 1516 1517 c = cmd_special_alloc(h); 1518 if (c == NULL) { /* trouble... */ 1519 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1520 return -1; 1521 } 1522 /* address the controller */ 1523 memset(scsi3addr, 0, sizeof(scsi3addr)); 1524 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 1525 buf, bufsize, 0, scsi3addr, TYPE_CMD); 1526 if (extended_response) 1527 c->Request.CDB[1] = extended_response; 1528 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1529 ei = c->err_info; 1530 if (ei->CommandStatus != 0 && 1531 ei->CommandStatus != CMD_DATA_UNDERRUN) { 1532 hpsa_scsi_interpret_error(c); 1533 rc = -1; 1534 } 1535 cmd_special_free(h, c); 1536 return rc; 1537 } 1538 1539 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 1540 struct ReportLUNdata *buf, 1541 int bufsize, int extended_response) 1542 { 1543 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 1544 } 1545 1546 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 1547 struct ReportLUNdata *buf, int bufsize) 1548 { 1549 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 1550 } 1551 1552 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 1553 int bus, int target, int lun) 1554 { 1555 device->bus = bus; 1556 device->target = target; 1557 device->lun = lun; 1558 } 1559 1560 static int hpsa_update_device_info(struct ctlr_info *h, 1561 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 1562 unsigned char *is_OBDR_device) 1563 { 1564 1565 #define OBDR_SIG_OFFSET 43 1566 #define OBDR_TAPE_SIG "$DR-10" 1567 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 1568 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 1569 1570 unsigned char *inq_buff; 1571 unsigned char *obdr_sig; 1572 1573 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1574 if (!inq_buff) 1575 goto bail_out; 1576 1577 /* Do an inquiry to the device to see what it is. */ 1578 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 1579 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 1580 /* Inquiry failed (msg printed already) */ 1581 dev_err(&h->pdev->dev, 1582 "hpsa_update_device_info: inquiry failed\n"); 1583 goto bail_out; 1584 } 1585 1586 this_device->devtype = (inq_buff[0] & 0x1f); 1587 memcpy(this_device->scsi3addr, scsi3addr, 8); 1588 memcpy(this_device->vendor, &inq_buff[8], 1589 sizeof(this_device->vendor)); 1590 memcpy(this_device->model, &inq_buff[16], 1591 sizeof(this_device->model)); 1592 memset(this_device->device_id, 0, 1593 sizeof(this_device->device_id)); 1594 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 1595 sizeof(this_device->device_id)); 1596 1597 if (this_device->devtype == TYPE_DISK && 1598 is_logical_dev_addr_mode(scsi3addr)) 1599 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 1600 else 1601 this_device->raid_level = RAID_UNKNOWN; 1602 1603 if (is_OBDR_device) { 1604 /* See if this is a One-Button-Disaster-Recovery device 1605 * by looking for "$DR-10" at offset 43 in inquiry data. 1606 */ 1607 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 1608 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 1609 strncmp(obdr_sig, OBDR_TAPE_SIG, 1610 OBDR_SIG_LEN) == 0); 1611 } 1612 1613 kfree(inq_buff); 1614 return 0; 1615 1616 bail_out: 1617 kfree(inq_buff); 1618 return 1; 1619 } 1620 1621 static unsigned char *msa2xxx_model[] = { 1622 "MSA2012", 1623 "MSA2024", 1624 "MSA2312", 1625 "MSA2324", 1626 "P2000 G3 SAS", 1627 NULL, 1628 }; 1629 1630 static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 1631 { 1632 int i; 1633 1634 for (i = 0; msa2xxx_model[i]; i++) 1635 if (strncmp(device->model, msa2xxx_model[i], 1636 strlen(msa2xxx_model[i])) == 0) 1637 return 1; 1638 return 0; 1639 } 1640 1641 /* Helper function to assign bus, target, lun mapping of devices. 1642 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical 1643 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 1644 * Logical drive target and lun are assigned at this time, but 1645 * physical device lun and target assignment are deferred (assigned 1646 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 1647 */ 1648 static void figure_bus_target_lun(struct ctlr_info *h, 1649 u8 *lunaddrbytes, int *bus, int *target, int *lun, 1650 struct hpsa_scsi_dev_t *device) 1651 { 1652 u32 lunid; 1653 1654 if (is_logical_dev_addr_mode(lunaddrbytes)) { 1655 /* logical device */ 1656 if (unlikely(is_scsi_rev_5(h))) { 1657 /* p1210m, logical drives lun assignments 1658 * match SCSI REPORT LUNS data. 1659 */ 1660 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 1661 *bus = 0; 1662 *target = 0; 1663 *lun = (lunid & 0x3fff) + 1; 1664 } else { 1665 /* not p1210m... */ 1666 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 1667 if (is_msa2xxx(h, device)) { 1668 /* msa2xxx way, put logicals on bus 1 1669 * and match target/lun numbers box 1670 * reports. 1671 */ 1672 *bus = 1; 1673 *target = (lunid >> 16) & 0x3fff; 1674 *lun = lunid & 0x00ff; 1675 } else { 1676 /* Traditional smart array way. */ 1677 *bus = 0; 1678 *lun = 0; 1679 *target = lunid & 0x3fff; 1680 } 1681 } 1682 } else { 1683 /* physical device */ 1684 if (is_hba_lunid(lunaddrbytes)) 1685 if (unlikely(is_scsi_rev_5(h))) { 1686 *bus = 0; /* put p1210m ctlr at 0,0,0 */ 1687 *target = 0; 1688 *lun = 0; 1689 return; 1690 } else 1691 *bus = 3; /* traditional smartarray */ 1692 else 1693 *bus = 2; /* physical disk */ 1694 *target = -1; 1695 *lun = -1; /* we will fill these in later. */ 1696 } 1697 } 1698 1699 /* 1700 * If there is no lun 0 on a target, linux won't find any devices. 1701 * For the MSA2xxx boxes, we have to manually detect the enclosure 1702 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 1703 * it for some reason. *tmpdevice is the target we're adding, 1704 * this_device is a pointer into the current element of currentsd[] 1705 * that we're building up in update_scsi_devices(), below. 1706 * lunzerobits is a bitmap that tracks which targets already have a 1707 * lun 0 assigned. 1708 * Returns 1 if an enclosure was added, 0 if not. 1709 */ 1710 static int add_msa2xxx_enclosure_device(struct ctlr_info *h, 1711 struct hpsa_scsi_dev_t *tmpdevice, 1712 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 1713 int bus, int target, int lun, unsigned long lunzerobits[], 1714 int *nmsa2xxx_enclosures) 1715 { 1716 unsigned char scsi3addr[8]; 1717 1718 if (test_bit(target, lunzerobits)) 1719 return 0; /* There is already a lun 0 on this target. */ 1720 1721 if (!is_logical_dev_addr_mode(lunaddrbytes)) 1722 return 0; /* It's the logical targets that may lack lun 0. */ 1723 1724 if (!is_msa2xxx(h, tmpdevice)) 1725 return 0; /* It's only the MSA2xxx that have this problem. */ 1726 1727 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ 1728 return 0; 1729 1730 memset(scsi3addr, 0, 8); 1731 scsi3addr[3] = target; 1732 if (is_hba_lunid(scsi3addr)) 1733 return 0; /* Don't add the RAID controller here. */ 1734 1735 if (is_scsi_rev_5(h)) 1736 return 0; /* p1210m doesn't need to do this. */ 1737 1738 #define MAX_MSA2XXX_ENCLOSURES 32 1739 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { 1740 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " 1741 "enclosures exceeded. Check your hardware " 1742 "configuration."); 1743 return 0; 1744 } 1745 1746 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 1747 return 0; 1748 (*nmsa2xxx_enclosures)++; 1749 hpsa_set_bus_target_lun(this_device, bus, target, 0); 1750 set_bit(target, lunzerobits); 1751 return 1; 1752 } 1753 1754 /* 1755 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 1756 * logdev. The number of luns in physdev and logdev are returned in 1757 * *nphysicals and *nlogicals, respectively. 1758 * Returns 0 on success, -1 otherwise. 1759 */ 1760 static int hpsa_gather_lun_info(struct ctlr_info *h, 1761 int reportlunsize, 1762 struct ReportLUNdata *physdev, u32 *nphysicals, 1763 struct ReportLUNdata *logdev, u32 *nlogicals) 1764 { 1765 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { 1766 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 1767 return -1; 1768 } 1769 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; 1770 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 1771 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 1772 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1773 *nphysicals - HPSA_MAX_PHYS_LUN); 1774 *nphysicals = HPSA_MAX_PHYS_LUN; 1775 } 1776 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { 1777 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 1778 return -1; 1779 } 1780 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 1781 /* Reject Logicals in excess of our max capability. */ 1782 if (*nlogicals > HPSA_MAX_LUN) { 1783 dev_warn(&h->pdev->dev, 1784 "maximum logical LUNs (%d) exceeded. " 1785 "%d LUNs ignored.\n", HPSA_MAX_LUN, 1786 *nlogicals - HPSA_MAX_LUN); 1787 *nlogicals = HPSA_MAX_LUN; 1788 } 1789 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 1790 dev_warn(&h->pdev->dev, 1791 "maximum logical + physical LUNs (%d) exceeded. " 1792 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1793 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 1794 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 1795 } 1796 return 0; 1797 } 1798 1799 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 1800 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, 1801 struct ReportLUNdata *logdev_list) 1802 { 1803 /* Helper function, figure out where the LUN ID info is coming from 1804 * given index i, lists of physical and logical devices, where in 1805 * the list the raid controller is supposed to appear (first or last) 1806 */ 1807 1808 int logicals_start = nphysicals + (raid_ctlr_position == 0); 1809 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 1810 1811 if (i == raid_ctlr_position) 1812 return RAID_CTLR_LUNID; 1813 1814 if (i < logicals_start) 1815 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 1816 1817 if (i < last_device) 1818 return &logdev_list->LUN[i - nphysicals - 1819 (raid_ctlr_position == 0)][0]; 1820 BUG(); 1821 return NULL; 1822 } 1823 1824 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 1825 { 1826 /* the idea here is we could get notified 1827 * that some devices have changed, so we do a report 1828 * physical luns and report logical luns cmd, and adjust 1829 * our list of devices accordingly. 1830 * 1831 * The scsi3addr's of devices won't change so long as the 1832 * adapter is not reset. That means we can rescan and 1833 * tell which devices we already know about, vs. new 1834 * devices, vs. disappearing devices. 1835 */ 1836 struct ReportLUNdata *physdev_list = NULL; 1837 struct ReportLUNdata *logdev_list = NULL; 1838 u32 nphysicals = 0; 1839 u32 nlogicals = 0; 1840 u32 ndev_allocated = 0; 1841 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 1842 int ncurrent = 0; 1843 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; 1844 int i, nmsa2xxx_enclosures, ndevs_to_allocate; 1845 int bus, target, lun; 1846 int raid_ctlr_position; 1847 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); 1848 1849 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, 1850 GFP_KERNEL); 1851 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1852 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1853 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 1854 1855 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 1856 dev_err(&h->pdev->dev, "out of memory\n"); 1857 goto out; 1858 } 1859 memset(lunzerobits, 0, sizeof(lunzerobits)); 1860 1861 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, 1862 logdev_list, &nlogicals)) 1863 goto out; 1864 1865 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them 1866 * but each of them 4 times through different paths. The plus 1 1867 * is for the RAID controller. 1868 */ 1869 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1; 1870 1871 /* Allocate the per device structures */ 1872 for (i = 0; i < ndevs_to_allocate; i++) { 1873 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 1874 if (!currentsd[i]) { 1875 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 1876 __FILE__, __LINE__); 1877 goto out; 1878 } 1879 ndev_allocated++; 1880 } 1881 1882 if (unlikely(is_scsi_rev_5(h))) 1883 raid_ctlr_position = 0; 1884 else 1885 raid_ctlr_position = nphysicals + nlogicals; 1886 1887 /* adjust our table of devices */ 1888 nmsa2xxx_enclosures = 0; 1889 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1890 u8 *lunaddrbytes, is_OBDR = 0; 1891 1892 /* Figure out where the LUN ID info is coming from */ 1893 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 1894 i, nphysicals, nlogicals, physdev_list, logdev_list); 1895 /* skip masked physical devices. */ 1896 if (lunaddrbytes[3] & 0xC0 && 1897 i < nphysicals + (raid_ctlr_position == 0)) 1898 continue; 1899 1900 /* Get device type, vendor, model, device id */ 1901 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 1902 &is_OBDR)) 1903 continue; /* skip it if we can't talk to it. */ 1904 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, 1905 tmpdevice); 1906 this_device = currentsd[ncurrent]; 1907 1908 /* 1909 * For the msa2xxx boxes, we have to insert a LUN 0 which 1910 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 1911 * is nonetheless an enclosure device there. We have to 1912 * present that otherwise linux won't find anything if 1913 * there is no lun 0. 1914 */ 1915 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device, 1916 lunaddrbytes, bus, target, lun, lunzerobits, 1917 &nmsa2xxx_enclosures)) { 1918 ncurrent++; 1919 this_device = currentsd[ncurrent]; 1920 } 1921 1922 *this_device = *tmpdevice; 1923 hpsa_set_bus_target_lun(this_device, bus, target, lun); 1924 1925 switch (this_device->devtype) { 1926 case TYPE_ROM: 1927 /* We don't *really* support actual CD-ROM devices, 1928 * just "One Button Disaster Recovery" tape drive 1929 * which temporarily pretends to be a CD-ROM drive. 1930 * So we check that the device is really an OBDR tape 1931 * device by checking for "$DR-10" in bytes 43-48 of 1932 * the inquiry data. 1933 */ 1934 if (is_OBDR) 1935 ncurrent++; 1936 break; 1937 case TYPE_DISK: 1938 if (i < nphysicals) 1939 break; 1940 ncurrent++; 1941 break; 1942 case TYPE_TAPE: 1943 case TYPE_MEDIUM_CHANGER: 1944 ncurrent++; 1945 break; 1946 case TYPE_RAID: 1947 /* Only present the Smartarray HBA as a RAID controller. 1948 * If it's a RAID controller other than the HBA itself 1949 * (an external RAID controller, MSA500 or similar) 1950 * don't present it. 1951 */ 1952 if (!is_hba_lunid(lunaddrbytes)) 1953 break; 1954 ncurrent++; 1955 break; 1956 default: 1957 break; 1958 } 1959 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA) 1960 break; 1961 } 1962 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 1963 out: 1964 kfree(tmpdevice); 1965 for (i = 0; i < ndev_allocated; i++) 1966 kfree(currentsd[i]); 1967 kfree(currentsd); 1968 kfree(physdev_list); 1969 kfree(logdev_list); 1970 } 1971 1972 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 1973 * dma mapping and fills in the scatter gather entries of the 1974 * hpsa command, cp. 1975 */ 1976 static int hpsa_scatter_gather(struct ctlr_info *h, 1977 struct CommandList *cp, 1978 struct scsi_cmnd *cmd) 1979 { 1980 unsigned int len; 1981 struct scatterlist *sg; 1982 u64 addr64; 1983 int use_sg, i, sg_index, chained; 1984 struct SGDescriptor *curr_sg; 1985 1986 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 1987 1988 use_sg = scsi_dma_map(cmd); 1989 if (use_sg < 0) 1990 return use_sg; 1991 1992 if (!use_sg) 1993 goto sglist_finished; 1994 1995 curr_sg = cp->SG; 1996 chained = 0; 1997 sg_index = 0; 1998 scsi_for_each_sg(cmd, sg, use_sg, i) { 1999 if (i == h->max_cmd_sg_entries - 1 && 2000 use_sg > h->max_cmd_sg_entries) { 2001 chained = 1; 2002 curr_sg = h->cmd_sg_list[cp->cmdindex]; 2003 sg_index = 0; 2004 } 2005 addr64 = (u64) sg_dma_address(sg); 2006 len = sg_dma_len(sg); 2007 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 2008 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 2009 curr_sg->Len = len; 2010 curr_sg->Ext = 0; /* we are not chaining */ 2011 curr_sg++; 2012 } 2013 2014 if (use_sg + chained > h->maxSG) 2015 h->maxSG = use_sg + chained; 2016 2017 if (chained) { 2018 cp->Header.SGList = h->max_cmd_sg_entries; 2019 cp->Header.SGTotal = (u16) (use_sg + 1); 2020 hpsa_map_sg_chain_block(h, cp); 2021 return 0; 2022 } 2023 2024 sglist_finished: 2025 2026 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 2027 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ 2028 return 0; 2029 } 2030 2031 2032 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 2033 void (*done)(struct scsi_cmnd *)) 2034 { 2035 struct ctlr_info *h; 2036 struct hpsa_scsi_dev_t *dev; 2037 unsigned char scsi3addr[8]; 2038 struct CommandList *c; 2039 unsigned long flags; 2040 2041 /* Get the ptr to our adapter structure out of cmd->host. */ 2042 h = sdev_to_hba(cmd->device); 2043 dev = cmd->device->hostdata; 2044 if (!dev) { 2045 cmd->result = DID_NO_CONNECT << 16; 2046 done(cmd); 2047 return 0; 2048 } 2049 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 2050 2051 /* Need a lock as this is being allocated from the pool */ 2052 spin_lock_irqsave(&h->lock, flags); 2053 c = cmd_alloc(h); 2054 spin_unlock_irqrestore(&h->lock, flags); 2055 if (c == NULL) { /* trouble... */ 2056 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2057 return SCSI_MLQUEUE_HOST_BUSY; 2058 } 2059 2060 /* Fill in the command list header */ 2061 2062 cmd->scsi_done = done; /* save this for use by completion code */ 2063 2064 /* save c in case we have to abort it */ 2065 cmd->host_scribble = (unsigned char *) c; 2066 2067 c->cmd_type = CMD_SCSI; 2068 c->scsi_cmd = cmd; 2069 c->Header.ReplyQueue = 0; /* unused in simple mode */ 2070 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 2071 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 2072 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; 2073 2074 /* Fill in the request block... */ 2075 2076 c->Request.Timeout = 0; 2077 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 2078 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 2079 c->Request.CDBLen = cmd->cmd_len; 2080 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 2081 c->Request.Type.Type = TYPE_CMD; 2082 c->Request.Type.Attribute = ATTR_SIMPLE; 2083 switch (cmd->sc_data_direction) { 2084 case DMA_TO_DEVICE: 2085 c->Request.Type.Direction = XFER_WRITE; 2086 break; 2087 case DMA_FROM_DEVICE: 2088 c->Request.Type.Direction = XFER_READ; 2089 break; 2090 case DMA_NONE: 2091 c->Request.Type.Direction = XFER_NONE; 2092 break; 2093 case DMA_BIDIRECTIONAL: 2094 /* This can happen if a buggy application does a scsi passthru 2095 * and sets both inlen and outlen to non-zero. ( see 2096 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 2097 */ 2098 2099 c->Request.Type.Direction = XFER_RSVD; 2100 /* This is technically wrong, and hpsa controllers should 2101 * reject it with CMD_INVALID, which is the most correct 2102 * response, but non-fibre backends appear to let it 2103 * slide by, and give the same results as if this field 2104 * were set correctly. Either way is acceptable for 2105 * our purposes here. 2106 */ 2107 2108 break; 2109 2110 default: 2111 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 2112 cmd->sc_data_direction); 2113 BUG(); 2114 break; 2115 } 2116 2117 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 2118 cmd_free(h, c); 2119 return SCSI_MLQUEUE_HOST_BUSY; 2120 } 2121 enqueue_cmd_and_start_io(h, c); 2122 /* the cmd'll come back via intr handler in complete_scsi_command() */ 2123 return 0; 2124 } 2125 2126 static DEF_SCSI_QCMD(hpsa_scsi_queue_command) 2127 2128 static void hpsa_scan_start(struct Scsi_Host *sh) 2129 { 2130 struct ctlr_info *h = shost_to_hba(sh); 2131 unsigned long flags; 2132 2133 /* wait until any scan already in progress is finished. */ 2134 while (1) { 2135 spin_lock_irqsave(&h->scan_lock, flags); 2136 if (h->scan_finished) 2137 break; 2138 spin_unlock_irqrestore(&h->scan_lock, flags); 2139 wait_event(h->scan_wait_queue, h->scan_finished); 2140 /* Note: We don't need to worry about a race between this 2141 * thread and driver unload because the midlayer will 2142 * have incremented the reference count, so unload won't 2143 * happen if we're in here. 2144 */ 2145 } 2146 h->scan_finished = 0; /* mark scan as in progress */ 2147 spin_unlock_irqrestore(&h->scan_lock, flags); 2148 2149 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 2150 2151 spin_lock_irqsave(&h->scan_lock, flags); 2152 h->scan_finished = 1; /* mark scan as finished. */ 2153 wake_up_all(&h->scan_wait_queue); 2154 spin_unlock_irqrestore(&h->scan_lock, flags); 2155 } 2156 2157 static int hpsa_scan_finished(struct Scsi_Host *sh, 2158 unsigned long elapsed_time) 2159 { 2160 struct ctlr_info *h = shost_to_hba(sh); 2161 unsigned long flags; 2162 int finished; 2163 2164 spin_lock_irqsave(&h->scan_lock, flags); 2165 finished = h->scan_finished; 2166 spin_unlock_irqrestore(&h->scan_lock, flags); 2167 return finished; 2168 } 2169 2170 static int hpsa_change_queue_depth(struct scsi_device *sdev, 2171 int qdepth, int reason) 2172 { 2173 struct ctlr_info *h = sdev_to_hba(sdev); 2174 2175 if (reason != SCSI_QDEPTH_DEFAULT) 2176 return -ENOTSUPP; 2177 2178 if (qdepth < 1) 2179 qdepth = 1; 2180 else 2181 if (qdepth > h->nr_cmds) 2182 qdepth = h->nr_cmds; 2183 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2184 return sdev->queue_depth; 2185 } 2186 2187 static void hpsa_unregister_scsi(struct ctlr_info *h) 2188 { 2189 /* we are being forcibly unloaded, and may not refuse. */ 2190 scsi_remove_host(h->scsi_host); 2191 scsi_host_put(h->scsi_host); 2192 h->scsi_host = NULL; 2193 } 2194 2195 static int hpsa_register_scsi(struct ctlr_info *h) 2196 { 2197 int rc; 2198 2199 rc = hpsa_scsi_detect(h); 2200 if (rc != 0) 2201 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" 2202 " hpsa_scsi_detect(), rc is %d\n", rc); 2203 return rc; 2204 } 2205 2206 static int wait_for_device_to_become_ready(struct ctlr_info *h, 2207 unsigned char lunaddr[]) 2208 { 2209 int rc = 0; 2210 int count = 0; 2211 int waittime = 1; /* seconds */ 2212 struct CommandList *c; 2213 2214 c = cmd_special_alloc(h); 2215 if (!c) { 2216 dev_warn(&h->pdev->dev, "out of memory in " 2217 "wait_for_device_to_become_ready.\n"); 2218 return IO_ERROR; 2219 } 2220 2221 /* Send test unit ready until device ready, or give up. */ 2222 while (count < HPSA_TUR_RETRY_LIMIT) { 2223 2224 /* Wait for a bit. do this first, because if we send 2225 * the TUR right away, the reset will just abort it. 2226 */ 2227 msleep(1000 * waittime); 2228 count++; 2229 2230 /* Increase wait time with each try, up to a point. */ 2231 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 2232 waittime = waittime * 2; 2233 2234 /* Send the Test Unit Ready */ 2235 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); 2236 hpsa_scsi_do_simple_cmd_core(h, c); 2237 /* no unmap needed here because no data xfer. */ 2238 2239 if (c->err_info->CommandStatus == CMD_SUCCESS) 2240 break; 2241 2242 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 2243 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 2244 (c->err_info->SenseInfo[2] == NO_SENSE || 2245 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 2246 break; 2247 2248 dev_warn(&h->pdev->dev, "waiting %d secs " 2249 "for device to become ready.\n", waittime); 2250 rc = 1; /* device not ready. */ 2251 } 2252 2253 if (rc) 2254 dev_warn(&h->pdev->dev, "giving up on device.\n"); 2255 else 2256 dev_warn(&h->pdev->dev, "device is ready.\n"); 2257 2258 cmd_special_free(h, c); 2259 return rc; 2260 } 2261 2262 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 2263 * complaining. Doing a host- or bus-reset can't do anything good here. 2264 */ 2265 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 2266 { 2267 int rc; 2268 struct ctlr_info *h; 2269 struct hpsa_scsi_dev_t *dev; 2270 2271 /* find the controller to which the command to be aborted was sent */ 2272 h = sdev_to_hba(scsicmd->device); 2273 if (h == NULL) /* paranoia */ 2274 return FAILED; 2275 dev = scsicmd->device->hostdata; 2276 if (!dev) { 2277 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 2278 "device lookup failed.\n"); 2279 return FAILED; 2280 } 2281 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 2282 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 2283 /* send a reset to the SCSI LUN which the command was sent to */ 2284 rc = hpsa_send_reset(h, dev->scsi3addr); 2285 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 2286 return SUCCESS; 2287 2288 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 2289 return FAILED; 2290 } 2291 2292 /* 2293 * For operations that cannot sleep, a command block is allocated at init, 2294 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 2295 * which ones are free or in use. Lock must be held when calling this. 2296 * cmd_free() is the complement. 2297 */ 2298 static struct CommandList *cmd_alloc(struct ctlr_info *h) 2299 { 2300 struct CommandList *c; 2301 int i; 2302 union u64bit temp64; 2303 dma_addr_t cmd_dma_handle, err_dma_handle; 2304 2305 do { 2306 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 2307 if (i == h->nr_cmds) 2308 return NULL; 2309 } while (test_and_set_bit 2310 (i & (BITS_PER_LONG - 1), 2311 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 2312 c = h->cmd_pool + i; 2313 memset(c, 0, sizeof(*c)); 2314 cmd_dma_handle = h->cmd_pool_dhandle 2315 + i * sizeof(*c); 2316 c->err_info = h->errinfo_pool + i; 2317 memset(c->err_info, 0, sizeof(*c->err_info)); 2318 err_dma_handle = h->errinfo_pool_dhandle 2319 + i * sizeof(*c->err_info); 2320 h->nr_allocs++; 2321 2322 c->cmdindex = i; 2323 2324 INIT_LIST_HEAD(&c->list); 2325 c->busaddr = (u32) cmd_dma_handle; 2326 temp64.val = (u64) err_dma_handle; 2327 c->ErrDesc.Addr.lower = temp64.val32.lower; 2328 c->ErrDesc.Addr.upper = temp64.val32.upper; 2329 c->ErrDesc.Len = sizeof(*c->err_info); 2330 2331 c->h = h; 2332 return c; 2333 } 2334 2335 /* For operations that can wait for kmalloc to possibly sleep, 2336 * this routine can be called. Lock need not be held to call 2337 * cmd_special_alloc. cmd_special_free() is the complement. 2338 */ 2339 static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 2340 { 2341 struct CommandList *c; 2342 union u64bit temp64; 2343 dma_addr_t cmd_dma_handle, err_dma_handle; 2344 2345 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 2346 if (c == NULL) 2347 return NULL; 2348 memset(c, 0, sizeof(*c)); 2349 2350 c->cmdindex = -1; 2351 2352 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), 2353 &err_dma_handle); 2354 2355 if (c->err_info == NULL) { 2356 pci_free_consistent(h->pdev, 2357 sizeof(*c), c, cmd_dma_handle); 2358 return NULL; 2359 } 2360 memset(c->err_info, 0, sizeof(*c->err_info)); 2361 2362 INIT_LIST_HEAD(&c->list); 2363 c->busaddr = (u32) cmd_dma_handle; 2364 temp64.val = (u64) err_dma_handle; 2365 c->ErrDesc.Addr.lower = temp64.val32.lower; 2366 c->ErrDesc.Addr.upper = temp64.val32.upper; 2367 c->ErrDesc.Len = sizeof(*c->err_info); 2368 2369 c->h = h; 2370 return c; 2371 } 2372 2373 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 2374 { 2375 int i; 2376 2377 i = c - h->cmd_pool; 2378 clear_bit(i & (BITS_PER_LONG - 1), 2379 h->cmd_pool_bits + (i / BITS_PER_LONG)); 2380 h->nr_frees++; 2381 } 2382 2383 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 2384 { 2385 union u64bit temp64; 2386 2387 temp64.val32.lower = c->ErrDesc.Addr.lower; 2388 temp64.val32.upper = c->ErrDesc.Addr.upper; 2389 pci_free_consistent(h->pdev, sizeof(*c->err_info), 2390 c->err_info, (dma_addr_t) temp64.val); 2391 pci_free_consistent(h->pdev, sizeof(*c), 2392 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 2393 } 2394 2395 #ifdef CONFIG_COMPAT 2396 2397 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 2398 { 2399 IOCTL32_Command_struct __user *arg32 = 2400 (IOCTL32_Command_struct __user *) arg; 2401 IOCTL_Command_struct arg64; 2402 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 2403 int err; 2404 u32 cp; 2405 2406 memset(&arg64, 0, sizeof(arg64)); 2407 err = 0; 2408 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2409 sizeof(arg64.LUN_info)); 2410 err |= copy_from_user(&arg64.Request, &arg32->Request, 2411 sizeof(arg64.Request)); 2412 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 2413 sizeof(arg64.error_info)); 2414 err |= get_user(arg64.buf_size, &arg32->buf_size); 2415 err |= get_user(cp, &arg32->buf); 2416 arg64.buf = compat_ptr(cp); 2417 err |= copy_to_user(p, &arg64, sizeof(arg64)); 2418 2419 if (err) 2420 return -EFAULT; 2421 2422 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); 2423 if (err) 2424 return err; 2425 err |= copy_in_user(&arg32->error_info, &p->error_info, 2426 sizeof(arg32->error_info)); 2427 if (err) 2428 return -EFAULT; 2429 return err; 2430 } 2431 2432 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 2433 int cmd, void *arg) 2434 { 2435 BIG_IOCTL32_Command_struct __user *arg32 = 2436 (BIG_IOCTL32_Command_struct __user *) arg; 2437 BIG_IOCTL_Command_struct arg64; 2438 BIG_IOCTL_Command_struct __user *p = 2439 compat_alloc_user_space(sizeof(arg64)); 2440 int err; 2441 u32 cp; 2442 2443 memset(&arg64, 0, sizeof(arg64)); 2444 err = 0; 2445 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2446 sizeof(arg64.LUN_info)); 2447 err |= copy_from_user(&arg64.Request, &arg32->Request, 2448 sizeof(arg64.Request)); 2449 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 2450 sizeof(arg64.error_info)); 2451 err |= get_user(arg64.buf_size, &arg32->buf_size); 2452 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 2453 err |= get_user(cp, &arg32->buf); 2454 arg64.buf = compat_ptr(cp); 2455 err |= copy_to_user(p, &arg64, sizeof(arg64)); 2456 2457 if (err) 2458 return -EFAULT; 2459 2460 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 2461 if (err) 2462 return err; 2463 err |= copy_in_user(&arg32->error_info, &p->error_info, 2464 sizeof(arg32->error_info)); 2465 if (err) 2466 return -EFAULT; 2467 return err; 2468 } 2469 2470 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) 2471 { 2472 switch (cmd) { 2473 case CCISS_GETPCIINFO: 2474 case CCISS_GETINTINFO: 2475 case CCISS_SETINTINFO: 2476 case CCISS_GETNODENAME: 2477 case CCISS_SETNODENAME: 2478 case CCISS_GETHEARTBEAT: 2479 case CCISS_GETBUSTYPES: 2480 case CCISS_GETFIRMVER: 2481 case CCISS_GETDRIVVER: 2482 case CCISS_REVALIDVOLS: 2483 case CCISS_DEREGDISK: 2484 case CCISS_REGNEWDISK: 2485 case CCISS_REGNEWD: 2486 case CCISS_RESCANDISK: 2487 case CCISS_GETLUNINFO: 2488 return hpsa_ioctl(dev, cmd, arg); 2489 2490 case CCISS_PASSTHRU32: 2491 return hpsa_ioctl32_passthru(dev, cmd, arg); 2492 case CCISS_BIG_PASSTHRU32: 2493 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 2494 2495 default: 2496 return -ENOIOCTLCMD; 2497 } 2498 } 2499 #endif 2500 2501 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 2502 { 2503 struct hpsa_pci_info pciinfo; 2504 2505 if (!argp) 2506 return -EINVAL; 2507 pciinfo.domain = pci_domain_nr(h->pdev->bus); 2508 pciinfo.bus = h->pdev->bus->number; 2509 pciinfo.dev_fn = h->pdev->devfn; 2510 pciinfo.board_id = h->board_id; 2511 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 2512 return -EFAULT; 2513 return 0; 2514 } 2515 2516 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 2517 { 2518 DriverVer_type DriverVer; 2519 unsigned char vmaj, vmin, vsubmin; 2520 int rc; 2521 2522 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 2523 &vmaj, &vmin, &vsubmin); 2524 if (rc != 3) { 2525 dev_info(&h->pdev->dev, "driver version string '%s' " 2526 "unrecognized.", HPSA_DRIVER_VERSION); 2527 vmaj = 0; 2528 vmin = 0; 2529 vsubmin = 0; 2530 } 2531 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 2532 if (!argp) 2533 return -EINVAL; 2534 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 2535 return -EFAULT; 2536 return 0; 2537 } 2538 2539 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 2540 { 2541 IOCTL_Command_struct iocommand; 2542 struct CommandList *c; 2543 char *buff = NULL; 2544 union u64bit temp64; 2545 2546 if (!argp) 2547 return -EINVAL; 2548 if (!capable(CAP_SYS_RAWIO)) 2549 return -EPERM; 2550 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 2551 return -EFAULT; 2552 if ((iocommand.buf_size < 1) && 2553 (iocommand.Request.Type.Direction != XFER_NONE)) { 2554 return -EINVAL; 2555 } 2556 if (iocommand.buf_size > 0) { 2557 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 2558 if (buff == NULL) 2559 return -EFAULT; 2560 if (iocommand.Request.Type.Direction == XFER_WRITE) { 2561 /* Copy the data into the buffer we created */ 2562 if (copy_from_user(buff, iocommand.buf, 2563 iocommand.buf_size)) { 2564 kfree(buff); 2565 return -EFAULT; 2566 } 2567 } else { 2568 memset(buff, 0, iocommand.buf_size); 2569 } 2570 } 2571 c = cmd_special_alloc(h); 2572 if (c == NULL) { 2573 kfree(buff); 2574 return -ENOMEM; 2575 } 2576 /* Fill in the command type */ 2577 c->cmd_type = CMD_IOCTL_PEND; 2578 /* Fill in Command Header */ 2579 c->Header.ReplyQueue = 0; /* unused in simple mode */ 2580 if (iocommand.buf_size > 0) { /* buffer to fill */ 2581 c->Header.SGList = 1; 2582 c->Header.SGTotal = 1; 2583 } else { /* no buffers to fill */ 2584 c->Header.SGList = 0; 2585 c->Header.SGTotal = 0; 2586 } 2587 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 2588 /* use the kernel address the cmd block for tag */ 2589 c->Header.Tag.lower = c->busaddr; 2590 2591 /* Fill in Request block */ 2592 memcpy(&c->Request, &iocommand.Request, 2593 sizeof(c->Request)); 2594 2595 /* Fill in the scatter gather information */ 2596 if (iocommand.buf_size > 0) { 2597 temp64.val = pci_map_single(h->pdev, buff, 2598 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 2599 c->SG[0].Addr.lower = temp64.val32.lower; 2600 c->SG[0].Addr.upper = temp64.val32.upper; 2601 c->SG[0].Len = iocommand.buf_size; 2602 c->SG[0].Ext = 0; /* we are not chaining*/ 2603 } 2604 hpsa_scsi_do_simple_cmd_core(h, c); 2605 if (iocommand.buf_size > 0) 2606 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 2607 check_ioctl_unit_attention(h, c); 2608 2609 /* Copy the error information out */ 2610 memcpy(&iocommand.error_info, c->err_info, 2611 sizeof(iocommand.error_info)); 2612 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 2613 kfree(buff); 2614 cmd_special_free(h, c); 2615 return -EFAULT; 2616 } 2617 if (iocommand.Request.Type.Direction == XFER_READ && 2618 iocommand.buf_size > 0) { 2619 /* Copy the data out of the buffer we created */ 2620 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 2621 kfree(buff); 2622 cmd_special_free(h, c); 2623 return -EFAULT; 2624 } 2625 } 2626 kfree(buff); 2627 cmd_special_free(h, c); 2628 return 0; 2629 } 2630 2631 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 2632 { 2633 BIG_IOCTL_Command_struct *ioc; 2634 struct CommandList *c; 2635 unsigned char **buff = NULL; 2636 int *buff_size = NULL; 2637 union u64bit temp64; 2638 BYTE sg_used = 0; 2639 int status = 0; 2640 int i; 2641 u32 left; 2642 u32 sz; 2643 BYTE __user *data_ptr; 2644 2645 if (!argp) 2646 return -EINVAL; 2647 if (!capable(CAP_SYS_RAWIO)) 2648 return -EPERM; 2649 ioc = (BIG_IOCTL_Command_struct *) 2650 kmalloc(sizeof(*ioc), GFP_KERNEL); 2651 if (!ioc) { 2652 status = -ENOMEM; 2653 goto cleanup1; 2654 } 2655 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 2656 status = -EFAULT; 2657 goto cleanup1; 2658 } 2659 if ((ioc->buf_size < 1) && 2660 (ioc->Request.Type.Direction != XFER_NONE)) { 2661 status = -EINVAL; 2662 goto cleanup1; 2663 } 2664 /* Check kmalloc limits using all SGs */ 2665 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 2666 status = -EINVAL; 2667 goto cleanup1; 2668 } 2669 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { 2670 status = -EINVAL; 2671 goto cleanup1; 2672 } 2673 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); 2674 if (!buff) { 2675 status = -ENOMEM; 2676 goto cleanup1; 2677 } 2678 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); 2679 if (!buff_size) { 2680 status = -ENOMEM; 2681 goto cleanup1; 2682 } 2683 left = ioc->buf_size; 2684 data_ptr = ioc->buf; 2685 while (left) { 2686 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 2687 buff_size[sg_used] = sz; 2688 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 2689 if (buff[sg_used] == NULL) { 2690 status = -ENOMEM; 2691 goto cleanup1; 2692 } 2693 if (ioc->Request.Type.Direction == XFER_WRITE) { 2694 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 2695 status = -ENOMEM; 2696 goto cleanup1; 2697 } 2698 } else 2699 memset(buff[sg_used], 0, sz); 2700 left -= sz; 2701 data_ptr += sz; 2702 sg_used++; 2703 } 2704 c = cmd_special_alloc(h); 2705 if (c == NULL) { 2706 status = -ENOMEM; 2707 goto cleanup1; 2708 } 2709 c->cmd_type = CMD_IOCTL_PEND; 2710 c->Header.ReplyQueue = 0; 2711 c->Header.SGList = c->Header.SGTotal = sg_used; 2712 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 2713 c->Header.Tag.lower = c->busaddr; 2714 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 2715 if (ioc->buf_size > 0) { 2716 int i; 2717 for (i = 0; i < sg_used; i++) { 2718 temp64.val = pci_map_single(h->pdev, buff[i], 2719 buff_size[i], PCI_DMA_BIDIRECTIONAL); 2720 c->SG[i].Addr.lower = temp64.val32.lower; 2721 c->SG[i].Addr.upper = temp64.val32.upper; 2722 c->SG[i].Len = buff_size[i]; 2723 /* we are not chaining */ 2724 c->SG[i].Ext = 0; 2725 } 2726 } 2727 hpsa_scsi_do_simple_cmd_core(h, c); 2728 if (sg_used) 2729 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 2730 check_ioctl_unit_attention(h, c); 2731 /* Copy the error information out */ 2732 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 2733 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 2734 cmd_special_free(h, c); 2735 status = -EFAULT; 2736 goto cleanup1; 2737 } 2738 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { 2739 /* Copy the data out of the buffer we created */ 2740 BYTE __user *ptr = ioc->buf; 2741 for (i = 0; i < sg_used; i++) { 2742 if (copy_to_user(ptr, buff[i], buff_size[i])) { 2743 cmd_special_free(h, c); 2744 status = -EFAULT; 2745 goto cleanup1; 2746 } 2747 ptr += buff_size[i]; 2748 } 2749 } 2750 cmd_special_free(h, c); 2751 status = 0; 2752 cleanup1: 2753 if (buff) { 2754 for (i = 0; i < sg_used; i++) 2755 kfree(buff[i]); 2756 kfree(buff); 2757 } 2758 kfree(buff_size); 2759 kfree(ioc); 2760 return status; 2761 } 2762 2763 static void check_ioctl_unit_attention(struct ctlr_info *h, 2764 struct CommandList *c) 2765 { 2766 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 2767 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 2768 (void) check_for_unit_attention(h, c); 2769 } 2770 /* 2771 * ioctl 2772 */ 2773 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) 2774 { 2775 struct ctlr_info *h; 2776 void __user *argp = (void __user *)arg; 2777 2778 h = sdev_to_hba(dev); 2779 2780 switch (cmd) { 2781 case CCISS_DEREGDISK: 2782 case CCISS_REGNEWDISK: 2783 case CCISS_REGNEWD: 2784 hpsa_scan_start(h->scsi_host); 2785 return 0; 2786 case CCISS_GETPCIINFO: 2787 return hpsa_getpciinfo_ioctl(h, argp); 2788 case CCISS_GETDRIVVER: 2789 return hpsa_getdrivver_ioctl(h, argp); 2790 case CCISS_PASSTHRU: 2791 return hpsa_passthru_ioctl(h, argp); 2792 case CCISS_BIG_PASSTHRU: 2793 return hpsa_big_passthru_ioctl(h, argp); 2794 default: 2795 return -ENOTTY; 2796 } 2797 } 2798 2799 static int __devinit hpsa_send_host_reset(struct ctlr_info *h, 2800 unsigned char *scsi3addr, u8 reset_type) 2801 { 2802 struct CommandList *c; 2803 2804 c = cmd_alloc(h); 2805 if (!c) 2806 return -ENOMEM; 2807 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, 2808 RAID_CTLR_LUNID, TYPE_MSG); 2809 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 2810 c->waiting = NULL; 2811 enqueue_cmd_and_start_io(h, c); 2812 /* Don't wait for completion, the reset won't complete. Don't free 2813 * the command either. This is the last command we will send before 2814 * re-initializing everything, so it doesn't matter and won't leak. 2815 */ 2816 return 0; 2817 } 2818 2819 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 2820 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 2821 int cmd_type) 2822 { 2823 int pci_dir = XFER_NONE; 2824 2825 c->cmd_type = CMD_IOCTL_PEND; 2826 c->Header.ReplyQueue = 0; 2827 if (buff != NULL && size > 0) { 2828 c->Header.SGList = 1; 2829 c->Header.SGTotal = 1; 2830 } else { 2831 c->Header.SGList = 0; 2832 c->Header.SGTotal = 0; 2833 } 2834 c->Header.Tag.lower = c->busaddr; 2835 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 2836 2837 c->Request.Type.Type = cmd_type; 2838 if (cmd_type == TYPE_CMD) { 2839 switch (cmd) { 2840 case HPSA_INQUIRY: 2841 /* are we trying to read a vital product page */ 2842 if (page_code != 0) { 2843 c->Request.CDB[1] = 0x01; 2844 c->Request.CDB[2] = page_code; 2845 } 2846 c->Request.CDBLen = 6; 2847 c->Request.Type.Attribute = ATTR_SIMPLE; 2848 c->Request.Type.Direction = XFER_READ; 2849 c->Request.Timeout = 0; 2850 c->Request.CDB[0] = HPSA_INQUIRY; 2851 c->Request.CDB[4] = size & 0xFF; 2852 break; 2853 case HPSA_REPORT_LOG: 2854 case HPSA_REPORT_PHYS: 2855 /* Talking to controller so It's a physical command 2856 mode = 00 target = 0. Nothing to write. 2857 */ 2858 c->Request.CDBLen = 12; 2859 c->Request.Type.Attribute = ATTR_SIMPLE; 2860 c->Request.Type.Direction = XFER_READ; 2861 c->Request.Timeout = 0; 2862 c->Request.CDB[0] = cmd; 2863 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 2864 c->Request.CDB[7] = (size >> 16) & 0xFF; 2865 c->Request.CDB[8] = (size >> 8) & 0xFF; 2866 c->Request.CDB[9] = size & 0xFF; 2867 break; 2868 case HPSA_CACHE_FLUSH: 2869 c->Request.CDBLen = 12; 2870 c->Request.Type.Attribute = ATTR_SIMPLE; 2871 c->Request.Type.Direction = XFER_WRITE; 2872 c->Request.Timeout = 0; 2873 c->Request.CDB[0] = BMIC_WRITE; 2874 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2875 break; 2876 case TEST_UNIT_READY: 2877 c->Request.CDBLen = 6; 2878 c->Request.Type.Attribute = ATTR_SIMPLE; 2879 c->Request.Type.Direction = XFER_NONE; 2880 c->Request.Timeout = 0; 2881 break; 2882 default: 2883 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 2884 BUG(); 2885 return; 2886 } 2887 } else if (cmd_type == TYPE_MSG) { 2888 switch (cmd) { 2889 2890 case HPSA_DEVICE_RESET_MSG: 2891 c->Request.CDBLen = 16; 2892 c->Request.Type.Type = 1; /* It is a MSG not a CMD */ 2893 c->Request.Type.Attribute = ATTR_SIMPLE; 2894 c->Request.Type.Direction = XFER_NONE; 2895 c->Request.Timeout = 0; /* Don't time out */ 2896 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 2897 c->Request.CDB[0] = cmd; 2898 c->Request.CDB[1] = 0x03; /* Reset target above */ 2899 /* If bytes 4-7 are zero, it means reset the */ 2900 /* LunID device */ 2901 c->Request.CDB[4] = 0x00; 2902 c->Request.CDB[5] = 0x00; 2903 c->Request.CDB[6] = 0x00; 2904 c->Request.CDB[7] = 0x00; 2905 break; 2906 2907 default: 2908 dev_warn(&h->pdev->dev, "unknown message type %d\n", 2909 cmd); 2910 BUG(); 2911 } 2912 } else { 2913 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 2914 BUG(); 2915 } 2916 2917 switch (c->Request.Type.Direction) { 2918 case XFER_READ: 2919 pci_dir = PCI_DMA_FROMDEVICE; 2920 break; 2921 case XFER_WRITE: 2922 pci_dir = PCI_DMA_TODEVICE; 2923 break; 2924 case XFER_NONE: 2925 pci_dir = PCI_DMA_NONE; 2926 break; 2927 default: 2928 pci_dir = PCI_DMA_BIDIRECTIONAL; 2929 } 2930 2931 hpsa_map_one(h->pdev, c, buff, size, pci_dir); 2932 2933 return; 2934 } 2935 2936 /* 2937 * Map (physical) PCI mem into (virtual) kernel space 2938 */ 2939 static void __iomem *remap_pci_mem(ulong base, ulong size) 2940 { 2941 ulong page_base = ((ulong) base) & PAGE_MASK; 2942 ulong page_offs = ((ulong) base) - page_base; 2943 void __iomem *page_remapped = ioremap(page_base, page_offs + size); 2944 2945 return page_remapped ? (page_remapped + page_offs) : NULL; 2946 } 2947 2948 /* Takes cmds off the submission queue and sends them to the hardware, 2949 * then puts them on the queue of cmds waiting for completion. 2950 */ 2951 static void start_io(struct ctlr_info *h) 2952 { 2953 struct CommandList *c; 2954 2955 while (!list_empty(&h->reqQ)) { 2956 c = list_entry(h->reqQ.next, struct CommandList, list); 2957 /* can't do anything if fifo is full */ 2958 if ((h->access.fifo_full(h))) { 2959 dev_warn(&h->pdev->dev, "fifo full\n"); 2960 break; 2961 } 2962 2963 /* Get the first entry from the Request Q */ 2964 removeQ(c); 2965 h->Qdepth--; 2966 2967 /* Tell the controller execute command */ 2968 h->access.submit_command(h, c); 2969 2970 /* Put job onto the completed Q */ 2971 addQ(&h->cmpQ, c); 2972 } 2973 } 2974 2975 static inline unsigned long get_next_completion(struct ctlr_info *h) 2976 { 2977 return h->access.command_completed(h); 2978 } 2979 2980 static inline bool interrupt_pending(struct ctlr_info *h) 2981 { 2982 return h->access.intr_pending(h); 2983 } 2984 2985 static inline long interrupt_not_for_us(struct ctlr_info *h) 2986 { 2987 return (h->access.intr_pending(h) == 0) || 2988 (h->interrupts_enabled == 0); 2989 } 2990 2991 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 2992 u32 raw_tag) 2993 { 2994 if (unlikely(tag_index >= h->nr_cmds)) { 2995 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 2996 return 1; 2997 } 2998 return 0; 2999 } 3000 3001 static inline void finish_cmd(struct CommandList *c, u32 raw_tag) 3002 { 3003 removeQ(c); 3004 if (likely(c->cmd_type == CMD_SCSI)) 3005 complete_scsi_command(c); 3006 else if (c->cmd_type == CMD_IOCTL_PEND) 3007 complete(c->waiting); 3008 } 3009 3010 static inline u32 hpsa_tag_contains_index(u32 tag) 3011 { 3012 return tag & DIRECT_LOOKUP_BIT; 3013 } 3014 3015 static inline u32 hpsa_tag_to_index(u32 tag) 3016 { 3017 return tag >> DIRECT_LOOKUP_SHIFT; 3018 } 3019 3020 3021 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) 3022 { 3023 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 3024 #define HPSA_SIMPLE_ERROR_BITS 0x03 3025 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 3026 return tag & ~HPSA_SIMPLE_ERROR_BITS; 3027 return tag & ~HPSA_PERF_ERROR_BITS; 3028 } 3029 3030 /* process completion of an indexed ("direct lookup") command */ 3031 static inline u32 process_indexed_cmd(struct ctlr_info *h, 3032 u32 raw_tag) 3033 { 3034 u32 tag_index; 3035 struct CommandList *c; 3036 3037 tag_index = hpsa_tag_to_index(raw_tag); 3038 if (bad_tag(h, tag_index, raw_tag)) 3039 return next_command(h); 3040 c = h->cmd_pool + tag_index; 3041 finish_cmd(c, raw_tag); 3042 return next_command(h); 3043 } 3044 3045 /* process completion of a non-indexed command */ 3046 static inline u32 process_nonindexed_cmd(struct ctlr_info *h, 3047 u32 raw_tag) 3048 { 3049 u32 tag; 3050 struct CommandList *c = NULL; 3051 3052 tag = hpsa_tag_discard_error_bits(h, raw_tag); 3053 list_for_each_entry(c, &h->cmpQ, list) { 3054 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 3055 finish_cmd(c, raw_tag); 3056 return next_command(h); 3057 } 3058 } 3059 bad_tag(h, h->nr_cmds + 1, raw_tag); 3060 return next_command(h); 3061 } 3062 3063 /* Some controllers, like p400, will give us one interrupt 3064 * after a soft reset, even if we turned interrupts off. 3065 * Only need to check for this in the hpsa_xxx_discard_completions 3066 * functions. 3067 */ 3068 static int ignore_bogus_interrupt(struct ctlr_info *h) 3069 { 3070 if (likely(!reset_devices)) 3071 return 0; 3072 3073 if (likely(h->interrupts_enabled)) 3074 return 0; 3075 3076 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 3077 "(known firmware bug.) Ignoring.\n"); 3078 3079 return 1; 3080 } 3081 3082 static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id) 3083 { 3084 struct ctlr_info *h = dev_id; 3085 unsigned long flags; 3086 u32 raw_tag; 3087 3088 if (ignore_bogus_interrupt(h)) 3089 return IRQ_NONE; 3090 3091 if (interrupt_not_for_us(h)) 3092 return IRQ_NONE; 3093 spin_lock_irqsave(&h->lock, flags); 3094 while (interrupt_pending(h)) { 3095 raw_tag = get_next_completion(h); 3096 while (raw_tag != FIFO_EMPTY) 3097 raw_tag = next_command(h); 3098 } 3099 spin_unlock_irqrestore(&h->lock, flags); 3100 return IRQ_HANDLED; 3101 } 3102 3103 static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id) 3104 { 3105 struct ctlr_info *h = dev_id; 3106 unsigned long flags; 3107 u32 raw_tag; 3108 3109 if (ignore_bogus_interrupt(h)) 3110 return IRQ_NONE; 3111 3112 spin_lock_irqsave(&h->lock, flags); 3113 raw_tag = get_next_completion(h); 3114 while (raw_tag != FIFO_EMPTY) 3115 raw_tag = next_command(h); 3116 spin_unlock_irqrestore(&h->lock, flags); 3117 return IRQ_HANDLED; 3118 } 3119 3120 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) 3121 { 3122 struct ctlr_info *h = dev_id; 3123 unsigned long flags; 3124 u32 raw_tag; 3125 3126 if (interrupt_not_for_us(h)) 3127 return IRQ_NONE; 3128 spin_lock_irqsave(&h->lock, flags); 3129 while (interrupt_pending(h)) { 3130 raw_tag = get_next_completion(h); 3131 while (raw_tag != FIFO_EMPTY) { 3132 if (hpsa_tag_contains_index(raw_tag)) 3133 raw_tag = process_indexed_cmd(h, raw_tag); 3134 else 3135 raw_tag = process_nonindexed_cmd(h, raw_tag); 3136 } 3137 } 3138 spin_unlock_irqrestore(&h->lock, flags); 3139 return IRQ_HANDLED; 3140 } 3141 3142 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) 3143 { 3144 struct ctlr_info *h = dev_id; 3145 unsigned long flags; 3146 u32 raw_tag; 3147 3148 spin_lock_irqsave(&h->lock, flags); 3149 raw_tag = get_next_completion(h); 3150 while (raw_tag != FIFO_EMPTY) { 3151 if (hpsa_tag_contains_index(raw_tag)) 3152 raw_tag = process_indexed_cmd(h, raw_tag); 3153 else 3154 raw_tag = process_nonindexed_cmd(h, raw_tag); 3155 } 3156 spin_unlock_irqrestore(&h->lock, flags); 3157 return IRQ_HANDLED; 3158 } 3159 3160 /* Send a message CDB to the firmware. Careful, this only works 3161 * in simple mode, not performant mode due to the tag lookup. 3162 * We only ever use this immediately after a controller reset. 3163 */ 3164 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 3165 unsigned char type) 3166 { 3167 struct Command { 3168 struct CommandListHeader CommandHeader; 3169 struct RequestBlock Request; 3170 struct ErrDescriptor ErrorDescriptor; 3171 }; 3172 struct Command *cmd; 3173 static const size_t cmd_sz = sizeof(*cmd) + 3174 sizeof(cmd->ErrorDescriptor); 3175 dma_addr_t paddr64; 3176 uint32_t paddr32, tag; 3177 void __iomem *vaddr; 3178 int i, err; 3179 3180 vaddr = pci_ioremap_bar(pdev, 0); 3181 if (vaddr == NULL) 3182 return -ENOMEM; 3183 3184 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 3185 * CCISS commands, so they must be allocated from the lower 4GiB of 3186 * memory. 3187 */ 3188 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3189 if (err) { 3190 iounmap(vaddr); 3191 return -ENOMEM; 3192 } 3193 3194 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 3195 if (cmd == NULL) { 3196 iounmap(vaddr); 3197 return -ENOMEM; 3198 } 3199 3200 /* This must fit, because of the 32-bit consistent DMA mask. Also, 3201 * although there's no guarantee, we assume that the address is at 3202 * least 4-byte aligned (most likely, it's page-aligned). 3203 */ 3204 paddr32 = paddr64; 3205 3206 cmd->CommandHeader.ReplyQueue = 0; 3207 cmd->CommandHeader.SGList = 0; 3208 cmd->CommandHeader.SGTotal = 0; 3209 cmd->CommandHeader.Tag.lower = paddr32; 3210 cmd->CommandHeader.Tag.upper = 0; 3211 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 3212 3213 cmd->Request.CDBLen = 16; 3214 cmd->Request.Type.Type = TYPE_MSG; 3215 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 3216 cmd->Request.Type.Direction = XFER_NONE; 3217 cmd->Request.Timeout = 0; /* Don't time out */ 3218 cmd->Request.CDB[0] = opcode; 3219 cmd->Request.CDB[1] = type; 3220 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 3221 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); 3222 cmd->ErrorDescriptor.Addr.upper = 0; 3223 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); 3224 3225 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 3226 3227 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 3228 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 3229 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) 3230 break; 3231 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 3232 } 3233 3234 iounmap(vaddr); 3235 3236 /* we leak the DMA buffer here ... no choice since the controller could 3237 * still complete the command. 3238 */ 3239 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 3240 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 3241 opcode, type); 3242 return -ETIMEDOUT; 3243 } 3244 3245 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 3246 3247 if (tag & HPSA_ERROR_BIT) { 3248 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 3249 opcode, type); 3250 return -EIO; 3251 } 3252 3253 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 3254 opcode, type); 3255 return 0; 3256 } 3257 3258 #define hpsa_noop(p) hpsa_message(p, 3, 0) 3259 3260 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 3261 void * __iomem vaddr, u32 use_doorbell) 3262 { 3263 u16 pmcsr; 3264 int pos; 3265 3266 if (use_doorbell) { 3267 /* For everything after the P600, the PCI power state method 3268 * of resetting the controller doesn't work, so we have this 3269 * other way using the doorbell register. 3270 */ 3271 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 3272 writel(use_doorbell, vaddr + SA5_DOORBELL); 3273 } else { /* Try to do it the PCI power state way */ 3274 3275 /* Quoting from the Open CISS Specification: "The Power 3276 * Management Control/Status Register (CSR) controls the power 3277 * state of the device. The normal operating state is D0, 3278 * CSR=00h. The software off state is D3, CSR=03h. To reset 3279 * the controller, place the interface device in D3 then to D0, 3280 * this causes a secondary PCI reset which will reset the 3281 * controller." */ 3282 3283 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 3284 if (pos == 0) { 3285 dev_err(&pdev->dev, 3286 "hpsa_reset_controller: " 3287 "PCI PM not supported\n"); 3288 return -ENODEV; 3289 } 3290 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 3291 /* enter the D3hot power management state */ 3292 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 3293 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3294 pmcsr |= PCI_D3hot; 3295 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3296 3297 msleep(500); 3298 3299 /* enter the D0 power management state */ 3300 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3301 pmcsr |= PCI_D0; 3302 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3303 } 3304 return 0; 3305 } 3306 3307 static __devinit void init_driver_version(char *driver_version, int len) 3308 { 3309 memset(driver_version, 0, len); 3310 strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1); 3311 } 3312 3313 static __devinit int write_driver_ver_to_cfgtable( 3314 struct CfgTable __iomem *cfgtable) 3315 { 3316 char *driver_version; 3317 int i, size = sizeof(cfgtable->driver_version); 3318 3319 driver_version = kmalloc(size, GFP_KERNEL); 3320 if (!driver_version) 3321 return -ENOMEM; 3322 3323 init_driver_version(driver_version, size); 3324 for (i = 0; i < size; i++) 3325 writeb(driver_version[i], &cfgtable->driver_version[i]); 3326 kfree(driver_version); 3327 return 0; 3328 } 3329 3330 static __devinit void read_driver_ver_from_cfgtable( 3331 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver) 3332 { 3333 int i; 3334 3335 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 3336 driver_ver[i] = readb(&cfgtable->driver_version[i]); 3337 } 3338 3339 static __devinit int controller_reset_failed( 3340 struct CfgTable __iomem *cfgtable) 3341 { 3342 3343 char *driver_ver, *old_driver_ver; 3344 int rc, size = sizeof(cfgtable->driver_version); 3345 3346 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 3347 if (!old_driver_ver) 3348 return -ENOMEM; 3349 driver_ver = old_driver_ver + size; 3350 3351 /* After a reset, the 32 bytes of "driver version" in the cfgtable 3352 * should have been changed, otherwise we know the reset failed. 3353 */ 3354 init_driver_version(old_driver_ver, size); 3355 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 3356 rc = !memcmp(driver_ver, old_driver_ver, size); 3357 kfree(old_driver_ver); 3358 return rc; 3359 } 3360 /* This does a hard reset of the controller using PCI power management 3361 * states or the using the doorbell register. 3362 */ 3363 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 3364 { 3365 u64 cfg_offset; 3366 u32 cfg_base_addr; 3367 u64 cfg_base_addr_index; 3368 void __iomem *vaddr; 3369 unsigned long paddr; 3370 u32 misc_fw_support; 3371 int rc; 3372 struct CfgTable __iomem *cfgtable; 3373 u32 use_doorbell; 3374 u32 board_id; 3375 u16 command_register; 3376 3377 /* For controllers as old as the P600, this is very nearly 3378 * the same thing as 3379 * 3380 * pci_save_state(pci_dev); 3381 * pci_set_power_state(pci_dev, PCI_D3hot); 3382 * pci_set_power_state(pci_dev, PCI_D0); 3383 * pci_restore_state(pci_dev); 3384 * 3385 * For controllers newer than the P600, the pci power state 3386 * method of resetting doesn't work so we have another way 3387 * using the doorbell register. 3388 */ 3389 3390 rc = hpsa_lookup_board_id(pdev, &board_id); 3391 if (rc < 0 || !ctlr_is_resettable(board_id)) { 3392 dev_warn(&pdev->dev, "Not resetting device.\n"); 3393 return -ENODEV; 3394 } 3395 3396 /* if controller is soft- but not hard resettable... */ 3397 if (!ctlr_is_hard_resettable(board_id)) 3398 return -ENOTSUPP; /* try soft reset later. */ 3399 3400 /* Save the PCI command register */ 3401 pci_read_config_word(pdev, 4, &command_register); 3402 /* Turn the board off. This is so that later pci_restore_state() 3403 * won't turn the board on before the rest of config space is ready. 3404 */ 3405 pci_disable_device(pdev); 3406 pci_save_state(pdev); 3407 3408 /* find the first memory BAR, so we can find the cfg table */ 3409 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 3410 if (rc) 3411 return rc; 3412 vaddr = remap_pci_mem(paddr, 0x250); 3413 if (!vaddr) 3414 return -ENOMEM; 3415 3416 /* find cfgtable in order to check if reset via doorbell is supported */ 3417 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 3418 &cfg_base_addr_index, &cfg_offset); 3419 if (rc) 3420 goto unmap_vaddr; 3421 cfgtable = remap_pci_mem(pci_resource_start(pdev, 3422 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 3423 if (!cfgtable) { 3424 rc = -ENOMEM; 3425 goto unmap_vaddr; 3426 } 3427 rc = write_driver_ver_to_cfgtable(cfgtable); 3428 if (rc) 3429 goto unmap_vaddr; 3430 3431 /* If reset via doorbell register is supported, use that. 3432 * There are two such methods. Favor the newest method. 3433 */ 3434 misc_fw_support = readl(&cfgtable->misc_fw_support); 3435 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 3436 if (use_doorbell) { 3437 use_doorbell = DOORBELL_CTLR_RESET2; 3438 } else { 3439 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3440 if (use_doorbell) { 3441 dev_warn(&pdev->dev, "Soft reset not supported. " 3442 "Firmware update is required.\n"); 3443 rc = -ENOTSUPP; /* try soft reset */ 3444 goto unmap_cfgtable; 3445 } 3446 } 3447 3448 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3449 if (rc) 3450 goto unmap_cfgtable; 3451 3452 pci_restore_state(pdev); 3453 rc = pci_enable_device(pdev); 3454 if (rc) { 3455 dev_warn(&pdev->dev, "failed to enable device.\n"); 3456 goto unmap_cfgtable; 3457 } 3458 pci_write_config_word(pdev, 4, command_register); 3459 3460 /* Some devices (notably the HP Smart Array 5i Controller) 3461 need a little pause here */ 3462 msleep(HPSA_POST_RESET_PAUSE_MSECS); 3463 3464 /* Wait for board to become not ready, then ready. */ 3465 dev_info(&pdev->dev, "Waiting for board to reset.\n"); 3466 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 3467 if (rc) { 3468 dev_warn(&pdev->dev, 3469 "failed waiting for board to reset." 3470 " Will try soft reset.\n"); 3471 rc = -ENOTSUPP; /* Not expected, but try soft reset later */ 3472 goto unmap_cfgtable; 3473 } 3474 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 3475 if (rc) { 3476 dev_warn(&pdev->dev, 3477 "failed waiting for board to become ready " 3478 "after hard reset\n"); 3479 goto unmap_cfgtable; 3480 } 3481 3482 rc = controller_reset_failed(vaddr); 3483 if (rc < 0) 3484 goto unmap_cfgtable; 3485 if (rc) { 3486 dev_warn(&pdev->dev, "Unable to successfully reset " 3487 "controller. Will try soft reset.\n"); 3488 rc = -ENOTSUPP; 3489 } else { 3490 dev_info(&pdev->dev, "board ready after hard reset.\n"); 3491 } 3492 3493 unmap_cfgtable: 3494 iounmap(cfgtable); 3495 3496 unmap_vaddr: 3497 iounmap(vaddr); 3498 return rc; 3499 } 3500 3501 /* 3502 * We cannot read the structure directly, for portability we must use 3503 * the io functions. 3504 * This is for debug only. 3505 */ 3506 static void print_cfg_table(struct device *dev, struct CfgTable *tb) 3507 { 3508 #ifdef HPSA_DEBUG 3509 int i; 3510 char temp_name[17]; 3511 3512 dev_info(dev, "Controller Configuration information\n"); 3513 dev_info(dev, "------------------------------------\n"); 3514 for (i = 0; i < 4; i++) 3515 temp_name[i] = readb(&(tb->Signature[i])); 3516 temp_name[4] = '\0'; 3517 dev_info(dev, " Signature = %s\n", temp_name); 3518 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 3519 dev_info(dev, " Transport methods supported = 0x%x\n", 3520 readl(&(tb->TransportSupport))); 3521 dev_info(dev, " Transport methods active = 0x%x\n", 3522 readl(&(tb->TransportActive))); 3523 dev_info(dev, " Requested transport Method = 0x%x\n", 3524 readl(&(tb->HostWrite.TransportRequest))); 3525 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 3526 readl(&(tb->HostWrite.CoalIntDelay))); 3527 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 3528 readl(&(tb->HostWrite.CoalIntCount))); 3529 dev_info(dev, " Max outstanding commands = 0x%d\n", 3530 readl(&(tb->CmdsOutMax))); 3531 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 3532 for (i = 0; i < 16; i++) 3533 temp_name[i] = readb(&(tb->ServerName[i])); 3534 temp_name[16] = '\0'; 3535 dev_info(dev, " Server Name = %s\n", temp_name); 3536 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 3537 readl(&(tb->HeartBeat))); 3538 #endif /* HPSA_DEBUG */ 3539 } 3540 3541 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 3542 { 3543 int i, offset, mem_type, bar_type; 3544 3545 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 3546 return 0; 3547 offset = 0; 3548 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3549 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 3550 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 3551 offset += 4; 3552 else { 3553 mem_type = pci_resource_flags(pdev, i) & 3554 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 3555 switch (mem_type) { 3556 case PCI_BASE_ADDRESS_MEM_TYPE_32: 3557 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 3558 offset += 4; /* 32 bit */ 3559 break; 3560 case PCI_BASE_ADDRESS_MEM_TYPE_64: 3561 offset += 8; 3562 break; 3563 default: /* reserved in PCI 2.2 */ 3564 dev_warn(&pdev->dev, 3565 "base address is invalid\n"); 3566 return -1; 3567 break; 3568 } 3569 } 3570 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 3571 return i + 1; 3572 } 3573 return -1; 3574 } 3575 3576 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 3577 * controllers that are capable. If not, we use IO-APIC mode. 3578 */ 3579 3580 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) 3581 { 3582 #ifdef CONFIG_PCI_MSI 3583 int err; 3584 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1}, 3585 {0, 2}, {0, 3} 3586 }; 3587 3588 /* Some boards advertise MSI but don't really support it */ 3589 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 3590 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 3591 goto default_int_mode; 3592 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 3593 dev_info(&h->pdev->dev, "MSIX\n"); 3594 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4); 3595 if (!err) { 3596 h->intr[0] = hpsa_msix_entries[0].vector; 3597 h->intr[1] = hpsa_msix_entries[1].vector; 3598 h->intr[2] = hpsa_msix_entries[2].vector; 3599 h->intr[3] = hpsa_msix_entries[3].vector; 3600 h->msix_vector = 1; 3601 return; 3602 } 3603 if (err > 0) { 3604 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 3605 "available\n", err); 3606 goto default_int_mode; 3607 } else { 3608 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", 3609 err); 3610 goto default_int_mode; 3611 } 3612 } 3613 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 3614 dev_info(&h->pdev->dev, "MSI\n"); 3615 if (!pci_enable_msi(h->pdev)) 3616 h->msi_vector = 1; 3617 else 3618 dev_warn(&h->pdev->dev, "MSI init failed\n"); 3619 } 3620 default_int_mode: 3621 #endif /* CONFIG_PCI_MSI */ 3622 /* if we get here we're going to use the default interrupt mode */ 3623 h->intr[h->intr_mode] = h->pdev->irq; 3624 } 3625 3626 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 3627 { 3628 int i; 3629 u32 subsystem_vendor_id, subsystem_device_id; 3630 3631 subsystem_vendor_id = pdev->subsystem_vendor; 3632 subsystem_device_id = pdev->subsystem_device; 3633 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 3634 subsystem_vendor_id; 3635 3636 for (i = 0; i < ARRAY_SIZE(products); i++) 3637 if (*board_id == products[i].board_id) 3638 return i; 3639 3640 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 3641 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 3642 !hpsa_allow_any) { 3643 dev_warn(&pdev->dev, "unrecognized board ID: " 3644 "0x%08x, ignoring.\n", *board_id); 3645 return -ENODEV; 3646 } 3647 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 3648 } 3649 3650 static inline bool hpsa_board_disabled(struct pci_dev *pdev) 3651 { 3652 u16 command; 3653 3654 (void) pci_read_config_word(pdev, PCI_COMMAND, &command); 3655 return ((command & PCI_COMMAND_MEMORY) == 0); 3656 } 3657 3658 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 3659 unsigned long *memory_bar) 3660 { 3661 int i; 3662 3663 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 3664 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3665 /* addressing mode bits already removed */ 3666 *memory_bar = pci_resource_start(pdev, i); 3667 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 3668 *memory_bar); 3669 return 0; 3670 } 3671 dev_warn(&pdev->dev, "no memory BAR found\n"); 3672 return -ENODEV; 3673 } 3674 3675 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 3676 void __iomem *vaddr, int wait_for_ready) 3677 { 3678 int i, iterations; 3679 u32 scratchpad; 3680 if (wait_for_ready) 3681 iterations = HPSA_BOARD_READY_ITERATIONS; 3682 else 3683 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 3684 3685 for (i = 0; i < iterations; i++) { 3686 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 3687 if (wait_for_ready) { 3688 if (scratchpad == HPSA_FIRMWARE_READY) 3689 return 0; 3690 } else { 3691 if (scratchpad != HPSA_FIRMWARE_READY) 3692 return 0; 3693 } 3694 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 3695 } 3696 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 3697 return -ENODEV; 3698 } 3699 3700 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 3701 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 3702 u64 *cfg_offset) 3703 { 3704 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 3705 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 3706 *cfg_base_addr &= (u32) 0x0000ffff; 3707 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 3708 if (*cfg_base_addr_index == -1) { 3709 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 3710 return -ENODEV; 3711 } 3712 return 0; 3713 } 3714 3715 static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) 3716 { 3717 u64 cfg_offset; 3718 u32 cfg_base_addr; 3719 u64 cfg_base_addr_index; 3720 u32 trans_offset; 3721 int rc; 3722 3723 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 3724 &cfg_base_addr_index, &cfg_offset); 3725 if (rc) 3726 return rc; 3727 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 3728 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 3729 if (!h->cfgtable) 3730 return -ENOMEM; 3731 rc = write_driver_ver_to_cfgtable(h->cfgtable); 3732 if (rc) 3733 return rc; 3734 /* Find performant mode table. */ 3735 trans_offset = readl(&h->cfgtable->TransMethodOffset); 3736 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 3737 cfg_base_addr_index)+cfg_offset+trans_offset, 3738 sizeof(*h->transtable)); 3739 if (!h->transtable) 3740 return -ENOMEM; 3741 return 0; 3742 } 3743 3744 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 3745 { 3746 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3747 3748 /* Limit commands in memory limited kdump scenario. */ 3749 if (reset_devices && h->max_commands > 32) 3750 h->max_commands = 32; 3751 3752 if (h->max_commands < 16) { 3753 dev_warn(&h->pdev->dev, "Controller reports " 3754 "max supported commands of %d, an obvious lie. " 3755 "Using 16. Ensure that firmware is up to date.\n", 3756 h->max_commands); 3757 h->max_commands = 16; 3758 } 3759 } 3760 3761 /* Interrogate the hardware for some limits: 3762 * max commands, max SG elements without chaining, and with chaining, 3763 * SG chain block size, etc. 3764 */ 3765 static void __devinit hpsa_find_board_params(struct ctlr_info *h) 3766 { 3767 hpsa_get_max_perf_mode_cmds(h); 3768 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 3769 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 3770 /* 3771 * Limit in-command s/g elements to 32 save dma'able memory. 3772 * Howvever spec says if 0, use 31 3773 */ 3774 h->max_cmd_sg_entries = 31; 3775 if (h->maxsgentries > 512) { 3776 h->max_cmd_sg_entries = 32; 3777 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 3778 h->maxsgentries--; /* save one for chain pointer */ 3779 } else { 3780 h->maxsgentries = 31; /* default to traditional values */ 3781 h->chainsize = 0; 3782 } 3783 } 3784 3785 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 3786 { 3787 if ((readb(&h->cfgtable->Signature[0]) != 'C') || 3788 (readb(&h->cfgtable->Signature[1]) != 'I') || 3789 (readb(&h->cfgtable->Signature[2]) != 'S') || 3790 (readb(&h->cfgtable->Signature[3]) != 'S')) { 3791 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 3792 return false; 3793 } 3794 return true; 3795 } 3796 3797 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 3798 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) 3799 { 3800 #ifdef CONFIG_X86 3801 u32 prefetch; 3802 3803 prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); 3804 prefetch |= 0x100; 3805 writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); 3806 #endif 3807 } 3808 3809 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 3810 * in a prefetch beyond physical memory. 3811 */ 3812 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 3813 { 3814 u32 dma_prefetch; 3815 3816 if (h->board_id != 0x3225103C) 3817 return; 3818 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 3819 dma_prefetch |= 0x8000; 3820 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 3821 } 3822 3823 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 3824 { 3825 int i; 3826 u32 doorbell_value; 3827 unsigned long flags; 3828 3829 /* under certain very rare conditions, this can take awhile. 3830 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 3831 * as we enter this code.) 3832 */ 3833 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3834 spin_lock_irqsave(&h->lock, flags); 3835 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 3836 spin_unlock_irqrestore(&h->lock, flags); 3837 if (!(doorbell_value & CFGTBL_ChangeReq)) 3838 break; 3839 /* delay and try again */ 3840 usleep_range(10000, 20000); 3841 } 3842 } 3843 3844 static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h) 3845 { 3846 u32 trans_support; 3847 3848 trans_support = readl(&(h->cfgtable->TransportSupport)); 3849 if (!(trans_support & SIMPLE_MODE)) 3850 return -ENOTSUPP; 3851 3852 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 3853 /* Update the field, and then ring the doorbell */ 3854 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 3855 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 3856 hpsa_wait_for_mode_change_ack(h); 3857 print_cfg_table(&h->pdev->dev, h->cfgtable); 3858 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { 3859 dev_warn(&h->pdev->dev, 3860 "unable to get board into simple mode\n"); 3861 return -ENODEV; 3862 } 3863 h->transMethod = CFGTBL_Trans_Simple; 3864 return 0; 3865 } 3866 3867 static int __devinit hpsa_pci_init(struct ctlr_info *h) 3868 { 3869 int prod_index, err; 3870 3871 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 3872 if (prod_index < 0) 3873 return -ENODEV; 3874 h->product_name = products[prod_index].product_name; 3875 h->access = *(products[prod_index].access); 3876 3877 if (hpsa_board_disabled(h->pdev)) { 3878 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 3879 return -ENODEV; 3880 } 3881 err = pci_enable_device(h->pdev); 3882 if (err) { 3883 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 3884 return err; 3885 } 3886 3887 err = pci_request_regions(h->pdev, "hpsa"); 3888 if (err) { 3889 dev_err(&h->pdev->dev, 3890 "cannot obtain PCI resources, aborting\n"); 3891 return err; 3892 } 3893 hpsa_interrupt_mode(h); 3894 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 3895 if (err) 3896 goto err_out_free_res; 3897 h->vaddr = remap_pci_mem(h->paddr, 0x250); 3898 if (!h->vaddr) { 3899 err = -ENOMEM; 3900 goto err_out_free_res; 3901 } 3902 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 3903 if (err) 3904 goto err_out_free_res; 3905 err = hpsa_find_cfgtables(h); 3906 if (err) 3907 goto err_out_free_res; 3908 hpsa_find_board_params(h); 3909 3910 if (!hpsa_CISS_signature_present(h)) { 3911 err = -ENODEV; 3912 goto err_out_free_res; 3913 } 3914 hpsa_enable_scsi_prefetch(h); 3915 hpsa_p600_dma_prefetch_quirk(h); 3916 err = hpsa_enter_simple_mode(h); 3917 if (err) 3918 goto err_out_free_res; 3919 return 0; 3920 3921 err_out_free_res: 3922 if (h->transtable) 3923 iounmap(h->transtable); 3924 if (h->cfgtable) 3925 iounmap(h->cfgtable); 3926 if (h->vaddr) 3927 iounmap(h->vaddr); 3928 /* 3929 * Deliberately omit pci_disable_device(): it does something nasty to 3930 * Smart Array controllers that pci_enable_device does not undo 3931 */ 3932 pci_release_regions(h->pdev); 3933 return err; 3934 } 3935 3936 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) 3937 { 3938 int rc; 3939 3940 #define HBA_INQUIRY_BYTE_COUNT 64 3941 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 3942 if (!h->hba_inquiry_data) 3943 return; 3944 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 3945 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 3946 if (rc != 0) { 3947 kfree(h->hba_inquiry_data); 3948 h->hba_inquiry_data = NULL; 3949 } 3950 } 3951 3952 static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) 3953 { 3954 int rc, i; 3955 3956 if (!reset_devices) 3957 return 0; 3958 3959 /* Reset the controller with a PCI power-cycle or via doorbell */ 3960 rc = hpsa_kdump_hard_reset_controller(pdev); 3961 3962 /* -ENOTSUPP here means we cannot reset the controller 3963 * but it's already (and still) up and running in 3964 * "performant mode". Or, it might be 640x, which can't reset 3965 * due to concerns about shared bbwc between 6402/6404 pair. 3966 */ 3967 if (rc == -ENOTSUPP) 3968 return rc; /* just try to do the kdump anyhow. */ 3969 if (rc) 3970 return -ENODEV; 3971 3972 /* Now try to get the controller to respond to a no-op */ 3973 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); 3974 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 3975 if (hpsa_noop(pdev) == 0) 3976 break; 3977 else 3978 dev_warn(&pdev->dev, "no-op failed%s\n", 3979 (i < 11 ? "; re-trying" : "")); 3980 } 3981 return 0; 3982 } 3983 3984 static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h) 3985 { 3986 h->cmd_pool_bits = kzalloc( 3987 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 3988 sizeof(unsigned long), GFP_KERNEL); 3989 h->cmd_pool = pci_alloc_consistent(h->pdev, 3990 h->nr_cmds * sizeof(*h->cmd_pool), 3991 &(h->cmd_pool_dhandle)); 3992 h->errinfo_pool = pci_alloc_consistent(h->pdev, 3993 h->nr_cmds * sizeof(*h->errinfo_pool), 3994 &(h->errinfo_pool_dhandle)); 3995 if ((h->cmd_pool_bits == NULL) 3996 || (h->cmd_pool == NULL) 3997 || (h->errinfo_pool == NULL)) { 3998 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 3999 return -ENOMEM; 4000 } 4001 return 0; 4002 } 4003 4004 static void hpsa_free_cmd_pool(struct ctlr_info *h) 4005 { 4006 kfree(h->cmd_pool_bits); 4007 if (h->cmd_pool) 4008 pci_free_consistent(h->pdev, 4009 h->nr_cmds * sizeof(struct CommandList), 4010 h->cmd_pool, h->cmd_pool_dhandle); 4011 if (h->errinfo_pool) 4012 pci_free_consistent(h->pdev, 4013 h->nr_cmds * sizeof(struct ErrorInfo), 4014 h->errinfo_pool, 4015 h->errinfo_pool_dhandle); 4016 } 4017 4018 static int hpsa_request_irq(struct ctlr_info *h, 4019 irqreturn_t (*msixhandler)(int, void *), 4020 irqreturn_t (*intxhandler)(int, void *)) 4021 { 4022 int rc; 4023 4024 if (h->msix_vector || h->msi_vector) 4025 rc = request_irq(h->intr[h->intr_mode], msixhandler, 4026 IRQF_DISABLED, h->devname, h); 4027 else 4028 rc = request_irq(h->intr[h->intr_mode], intxhandler, 4029 IRQF_DISABLED, h->devname, h); 4030 if (rc) { 4031 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 4032 h->intr[h->intr_mode], h->devname); 4033 return -ENODEV; 4034 } 4035 return 0; 4036 } 4037 4038 static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h) 4039 { 4040 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, 4041 HPSA_RESET_TYPE_CONTROLLER)) { 4042 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 4043 return -EIO; 4044 } 4045 4046 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 4047 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 4048 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 4049 return -1; 4050 } 4051 4052 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 4053 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 4054 dev_warn(&h->pdev->dev, "Board failed to become ready " 4055 "after soft reset.\n"); 4056 return -1; 4057 } 4058 4059 return 0; 4060 } 4061 4062 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) 4063 { 4064 free_irq(h->intr[h->intr_mode], h); 4065 #ifdef CONFIG_PCI_MSI 4066 if (h->msix_vector) 4067 pci_disable_msix(h->pdev); 4068 else if (h->msi_vector) 4069 pci_disable_msi(h->pdev); 4070 #endif /* CONFIG_PCI_MSI */ 4071 hpsa_free_sg_chain_blocks(h); 4072 hpsa_free_cmd_pool(h); 4073 kfree(h->blockFetchTable); 4074 pci_free_consistent(h->pdev, h->reply_pool_size, 4075 h->reply_pool, h->reply_pool_dhandle); 4076 if (h->vaddr) 4077 iounmap(h->vaddr); 4078 if (h->transtable) 4079 iounmap(h->transtable); 4080 if (h->cfgtable) 4081 iounmap(h->cfgtable); 4082 pci_release_regions(h->pdev); 4083 kfree(h); 4084 } 4085 4086 static int __devinit hpsa_init_one(struct pci_dev *pdev, 4087 const struct pci_device_id *ent) 4088 { 4089 int dac, rc; 4090 struct ctlr_info *h; 4091 int try_soft_reset = 0; 4092 unsigned long flags; 4093 4094 if (number_of_controllers == 0) 4095 printk(KERN_INFO DRIVER_NAME "\n"); 4096 4097 rc = hpsa_init_reset_devices(pdev); 4098 if (rc) { 4099 if (rc != -ENOTSUPP) 4100 return rc; 4101 /* If the reset fails in a particular way (it has no way to do 4102 * a proper hard reset, so returns -ENOTSUPP) we can try to do 4103 * a soft reset once we get the controller configured up to the 4104 * point that it can accept a command. 4105 */ 4106 try_soft_reset = 1; 4107 rc = 0; 4108 } 4109 4110 reinit_after_soft_reset: 4111 4112 /* Command structures must be aligned on a 32-byte boundary because 4113 * the 5 lower bits of the address are used by the hardware. and by 4114 * the driver. See comments in hpsa.h for more info. 4115 */ 4116 #define COMMANDLIST_ALIGNMENT 32 4117 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 4118 h = kzalloc(sizeof(*h), GFP_KERNEL); 4119 if (!h) 4120 return -ENOMEM; 4121 4122 h->pdev = pdev; 4123 h->busy_initializing = 1; 4124 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 4125 INIT_LIST_HEAD(&h->cmpQ); 4126 INIT_LIST_HEAD(&h->reqQ); 4127 spin_lock_init(&h->lock); 4128 spin_lock_init(&h->scan_lock); 4129 rc = hpsa_pci_init(h); 4130 if (rc != 0) 4131 goto clean1; 4132 4133 sprintf(h->devname, "hpsa%d", number_of_controllers); 4134 h->ctlr = number_of_controllers; 4135 number_of_controllers++; 4136 4137 /* configure PCI DMA stuff */ 4138 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 4139 if (rc == 0) { 4140 dac = 1; 4141 } else { 4142 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4143 if (rc == 0) { 4144 dac = 0; 4145 } else { 4146 dev_err(&pdev->dev, "no suitable DMA available\n"); 4147 goto clean1; 4148 } 4149 } 4150 4151 /* make sure the board interrupts are off */ 4152 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4153 4154 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 4155 goto clean2; 4156 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 4157 h->devname, pdev->device, 4158 h->intr[h->intr_mode], dac ? "" : " not"); 4159 if (hpsa_allocate_cmd_pool(h)) 4160 goto clean4; 4161 if (hpsa_allocate_sg_chain_blocks(h)) 4162 goto clean4; 4163 init_waitqueue_head(&h->scan_wait_queue); 4164 h->scan_finished = 1; /* no scan currently in progress */ 4165 4166 pci_set_drvdata(pdev, h); 4167 h->ndevices = 0; 4168 h->scsi_host = NULL; 4169 spin_lock_init(&h->devlock); 4170 hpsa_put_ctlr_into_performant_mode(h); 4171 4172 /* At this point, the controller is ready to take commands. 4173 * Now, if reset_devices and the hard reset didn't work, try 4174 * the soft reset and see if that works. 4175 */ 4176 if (try_soft_reset) { 4177 4178 /* This is kind of gross. We may or may not get a completion 4179 * from the soft reset command, and if we do, then the value 4180 * from the fifo may or may not be valid. So, we wait 10 secs 4181 * after the reset throwing away any completions we get during 4182 * that time. Unregister the interrupt handler and register 4183 * fake ones to scoop up any residual completions. 4184 */ 4185 spin_lock_irqsave(&h->lock, flags); 4186 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4187 spin_unlock_irqrestore(&h->lock, flags); 4188 free_irq(h->intr[h->intr_mode], h); 4189 rc = hpsa_request_irq(h, hpsa_msix_discard_completions, 4190 hpsa_intx_discard_completions); 4191 if (rc) { 4192 dev_warn(&h->pdev->dev, "Failed to request_irq after " 4193 "soft reset.\n"); 4194 goto clean4; 4195 } 4196 4197 rc = hpsa_kdump_soft_reset(h); 4198 if (rc) 4199 /* Neither hard nor soft reset worked, we're hosed. */ 4200 goto clean4; 4201 4202 dev_info(&h->pdev->dev, "Board READY.\n"); 4203 dev_info(&h->pdev->dev, 4204 "Waiting for stale completions to drain.\n"); 4205 h->access.set_intr_mask(h, HPSA_INTR_ON); 4206 msleep(10000); 4207 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4208 4209 rc = controller_reset_failed(h->cfgtable); 4210 if (rc) 4211 dev_info(&h->pdev->dev, 4212 "Soft reset appears to have failed.\n"); 4213 4214 /* since the controller's reset, we have to go back and re-init 4215 * everything. Easiest to just forget what we've done and do it 4216 * all over again. 4217 */ 4218 hpsa_undo_allocations_after_kdump_soft_reset(h); 4219 try_soft_reset = 0; 4220 if (rc) 4221 /* don't go to clean4, we already unallocated */ 4222 return -ENODEV; 4223 4224 goto reinit_after_soft_reset; 4225 } 4226 4227 /* Turn the interrupts on so we can service requests */ 4228 h->access.set_intr_mask(h, HPSA_INTR_ON); 4229 4230 hpsa_hba_inquiry(h); 4231 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 4232 h->busy_initializing = 0; 4233 return 1; 4234 4235 clean4: 4236 hpsa_free_sg_chain_blocks(h); 4237 hpsa_free_cmd_pool(h); 4238 free_irq(h->intr[h->intr_mode], h); 4239 clean2: 4240 clean1: 4241 h->busy_initializing = 0; 4242 kfree(h); 4243 return rc; 4244 } 4245 4246 static void hpsa_flush_cache(struct ctlr_info *h) 4247 { 4248 char *flush_buf; 4249 struct CommandList *c; 4250 4251 flush_buf = kzalloc(4, GFP_KERNEL); 4252 if (!flush_buf) 4253 return; 4254 4255 c = cmd_special_alloc(h); 4256 if (!c) { 4257 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 4258 goto out_of_memory; 4259 } 4260 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 4261 RAID_CTLR_LUNID, TYPE_CMD); 4262 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 4263 if (c->err_info->CommandStatus != 0) 4264 dev_warn(&h->pdev->dev, 4265 "error flushing cache on controller\n"); 4266 cmd_special_free(h, c); 4267 out_of_memory: 4268 kfree(flush_buf); 4269 } 4270 4271 static void hpsa_shutdown(struct pci_dev *pdev) 4272 { 4273 struct ctlr_info *h; 4274 4275 h = pci_get_drvdata(pdev); 4276 /* Turn board interrupts off and send the flush cache command 4277 * sendcmd will turn off interrupt, and send the flush... 4278 * To write all data in the battery backed cache to disks 4279 */ 4280 hpsa_flush_cache(h); 4281 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4282 free_irq(h->intr[h->intr_mode], h); 4283 #ifdef CONFIG_PCI_MSI 4284 if (h->msix_vector) 4285 pci_disable_msix(h->pdev); 4286 else if (h->msi_vector) 4287 pci_disable_msi(h->pdev); 4288 #endif /* CONFIG_PCI_MSI */ 4289 } 4290 4291 static void __devexit hpsa_remove_one(struct pci_dev *pdev) 4292 { 4293 struct ctlr_info *h; 4294 4295 if (pci_get_drvdata(pdev) == NULL) { 4296 dev_err(&pdev->dev, "unable to remove device \n"); 4297 return; 4298 } 4299 h = pci_get_drvdata(pdev); 4300 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 4301 hpsa_shutdown(pdev); 4302 iounmap(h->vaddr); 4303 iounmap(h->transtable); 4304 iounmap(h->cfgtable); 4305 hpsa_free_sg_chain_blocks(h); 4306 pci_free_consistent(h->pdev, 4307 h->nr_cmds * sizeof(struct CommandList), 4308 h->cmd_pool, h->cmd_pool_dhandle); 4309 pci_free_consistent(h->pdev, 4310 h->nr_cmds * sizeof(struct ErrorInfo), 4311 h->errinfo_pool, h->errinfo_pool_dhandle); 4312 pci_free_consistent(h->pdev, h->reply_pool_size, 4313 h->reply_pool, h->reply_pool_dhandle); 4314 kfree(h->cmd_pool_bits); 4315 kfree(h->blockFetchTable); 4316 kfree(h->hba_inquiry_data); 4317 /* 4318 * Deliberately omit pci_disable_device(): it does something nasty to 4319 * Smart Array controllers that pci_enable_device does not undo 4320 */ 4321 pci_release_regions(pdev); 4322 pci_set_drvdata(pdev, NULL); 4323 kfree(h); 4324 } 4325 4326 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 4327 __attribute__((unused)) pm_message_t state) 4328 { 4329 return -ENOSYS; 4330 } 4331 4332 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 4333 { 4334 return -ENOSYS; 4335 } 4336 4337 static struct pci_driver hpsa_pci_driver = { 4338 .name = "hpsa", 4339 .probe = hpsa_init_one, 4340 .remove = __devexit_p(hpsa_remove_one), 4341 .id_table = hpsa_pci_device_id, /* id_table */ 4342 .shutdown = hpsa_shutdown, 4343 .suspend = hpsa_suspend, 4344 .resume = hpsa_resume, 4345 }; 4346 4347 /* Fill in bucket_map[], given nsgs (the max number of 4348 * scatter gather elements supported) and bucket[], 4349 * which is an array of 8 integers. The bucket[] array 4350 * contains 8 different DMA transfer sizes (in 16 4351 * byte increments) which the controller uses to fetch 4352 * commands. This function fills in bucket_map[], which 4353 * maps a given number of scatter gather elements to one of 4354 * the 8 DMA transfer sizes. The point of it is to allow the 4355 * controller to only do as much DMA as needed to fetch the 4356 * command, with the DMA transfer size encoded in the lower 4357 * bits of the command address. 4358 */ 4359 static void calc_bucket_map(int bucket[], int num_buckets, 4360 int nsgs, int *bucket_map) 4361 { 4362 int i, j, b, size; 4363 4364 /* even a command with 0 SGs requires 4 blocks */ 4365 #define MINIMUM_TRANSFER_BLOCKS 4 4366 #define NUM_BUCKETS 8 4367 /* Note, bucket_map must have nsgs+1 entries. */ 4368 for (i = 0; i <= nsgs; i++) { 4369 /* Compute size of a command with i SG entries */ 4370 size = i + MINIMUM_TRANSFER_BLOCKS; 4371 b = num_buckets; /* Assume the biggest bucket */ 4372 /* Find the bucket that is just big enough */ 4373 for (j = 0; j < 8; j++) { 4374 if (bucket[j] >= size) { 4375 b = j; 4376 break; 4377 } 4378 } 4379 /* for a command with i SG entries, use bucket b. */ 4380 bucket_map[i] = b; 4381 } 4382 } 4383 4384 static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h, 4385 u32 use_short_tags) 4386 { 4387 int i; 4388 unsigned long register_value; 4389 4390 /* This is a bit complicated. There are 8 registers on 4391 * the controller which we write to to tell it 8 different 4392 * sizes of commands which there may be. It's a way of 4393 * reducing the DMA done to fetch each command. Encoded into 4394 * each command's tag are 3 bits which communicate to the controller 4395 * which of the eight sizes that command fits within. The size of 4396 * each command depends on how many scatter gather entries there are. 4397 * Each SG entry requires 16 bytes. The eight registers are programmed 4398 * with the number of 16-byte blocks a command of that size requires. 4399 * The smallest command possible requires 5 such 16 byte blocks. 4400 * the largest command possible requires MAXSGENTRIES + 4 16-byte 4401 * blocks. Note, this only extends to the SG entries contained 4402 * within the command block, and does not extend to chained blocks 4403 * of SG elements. bft[] contains the eight values we write to 4404 * the registers. They are not evenly distributed, but have more 4405 * sizes for small commands, and fewer sizes for larger commands. 4406 */ 4407 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; 4408 BUILD_BUG_ON(28 > MAXSGENTRIES + 4); 4409 /* 5 = 1 s/g entry or 4k 4410 * 6 = 2 s/g entry or 8k 4411 * 8 = 4 s/g entry or 16k 4412 * 10 = 6 s/g entry or 24k 4413 */ 4414 4415 h->reply_pool_wraparound = 1; /* spec: init to 1 */ 4416 4417 /* Controller spec: zero out this buffer. */ 4418 memset(h->reply_pool, 0, h->reply_pool_size); 4419 h->reply_pool_head = h->reply_pool; 4420 4421 bft[7] = h->max_sg_entries + 4; 4422 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); 4423 for (i = 0; i < 8; i++) 4424 writel(bft[i], &h->transtable->BlockFetch[i]); 4425 4426 /* size of controller ring buffer */ 4427 writel(h->max_commands, &h->transtable->RepQSize); 4428 writel(1, &h->transtable->RepQCount); 4429 writel(0, &h->transtable->RepQCtrAddrLow32); 4430 writel(0, &h->transtable->RepQCtrAddrHigh32); 4431 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 4432 writel(0, &h->transtable->RepQAddr0High32); 4433 writel(CFGTBL_Trans_Performant | use_short_tags, 4434 &(h->cfgtable->HostWrite.TransportRequest)); 4435 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 4436 hpsa_wait_for_mode_change_ack(h); 4437 register_value = readl(&(h->cfgtable->TransportActive)); 4438 if (!(register_value & CFGTBL_Trans_Performant)) { 4439 dev_warn(&h->pdev->dev, "unable to get board into" 4440 " performant mode\n"); 4441 return; 4442 } 4443 /* Change the access methods to the performant access methods */ 4444 h->access = SA5_performant_access; 4445 h->transMethod = CFGTBL_Trans_Performant; 4446 } 4447 4448 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 4449 { 4450 u32 trans_support; 4451 4452 if (hpsa_simple_mode) 4453 return; 4454 4455 trans_support = readl(&(h->cfgtable->TransportSupport)); 4456 if (!(trans_support & PERFORMANT_MODE)) 4457 return; 4458 4459 hpsa_get_max_perf_mode_cmds(h); 4460 h->max_sg_entries = 32; 4461 /* Performant mode ring buffer and supporting data structures */ 4462 h->reply_pool_size = h->max_commands * sizeof(u64); 4463 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 4464 &(h->reply_pool_dhandle)); 4465 4466 /* Need a block fetch table for performant mode */ 4467 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * 4468 sizeof(u32)), GFP_KERNEL); 4469 4470 if ((h->reply_pool == NULL) 4471 || (h->blockFetchTable == NULL)) 4472 goto clean_up; 4473 4474 hpsa_enter_performant_mode(h, 4475 trans_support & CFGTBL_Trans_use_short_tags); 4476 4477 return; 4478 4479 clean_up: 4480 if (h->reply_pool) 4481 pci_free_consistent(h->pdev, h->reply_pool_size, 4482 h->reply_pool, h->reply_pool_dhandle); 4483 kfree(h->blockFetchTable); 4484 } 4485 4486 /* 4487 * This is it. Register the PCI driver information for the cards we control 4488 * the OS will call our registered routines when it finds one of our cards. 4489 */ 4490 static int __init hpsa_init(void) 4491 { 4492 return pci_register_driver(&hpsa_pci_driver); 4493 } 4494 4495 static void __exit hpsa_cleanup(void) 4496 { 4497 pci_unregister_driver(&hpsa_pci_driver); 4498 } 4499 4500 module_init(hpsa_init); 4501 module_exit(hpsa_cleanup); 4502