1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 22 #include <linux/module.h> 23 #include <linux/interrupt.h> 24 #include <linux/types.h> 25 #include <linux/pci.h> 26 #include <linux/kernel.h> 27 #include <linux/slab.h> 28 #include <linux/delay.h> 29 #include <linux/fs.h> 30 #include <linux/timer.h> 31 #include <linux/seq_file.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/compat.h> 35 #include <linux/blktrace_api.h> 36 #include <linux/uaccess.h> 37 #include <linux/io.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/completion.h> 40 #include <linux/moduleparam.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_tcq.h> 46 #include <linux/cciss_ioctl.h> 47 #include <linux/string.h> 48 #include <linux/bitmap.h> 49 #include <asm/atomic.h> 50 #include <linux/kthread.h> 51 #include "hpsa_cmd.h" 52 #include "hpsa.h" 53 54 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 55 #define HPSA_DRIVER_VERSION "2.0.2-1" 56 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 57 58 /* How long to wait (in milliseconds) for board to go into simple mode */ 59 #define MAX_CONFIG_WAIT 30000 60 #define MAX_IOCTL_CONFIG_WAIT 1000 61 62 /*define how many times we will try a command because of bus resets */ 63 #define MAX_CMD_RETRIES 3 64 65 /* Embedded module documentation macros - see modules.h */ 66 MODULE_AUTHOR("Hewlett-Packard Company"); 67 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ 68 HPSA_DRIVER_VERSION); 69 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 70 MODULE_VERSION(HPSA_DRIVER_VERSION); 71 MODULE_LICENSE("GPL"); 72 73 static int hpsa_allow_any; 74 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 75 MODULE_PARM_DESC(hpsa_allow_any, 76 "Allow hpsa driver to access unknown HP Smart Array hardware"); 77 static int hpsa_simple_mode; 78 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); 79 MODULE_PARM_DESC(hpsa_simple_mode, 80 "Use 'simple mode' rather than 'performant mode'"); 81 82 /* define the PCI info for the cards we can control */ 83 static const struct pci_device_id hpsa_pci_device_id[] = { 84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, 85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, 86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 99 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 100 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 101 {0,} 102 }; 103 104 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); 105 106 /* board_id = Subsystem Device ID & Vendor ID 107 * product = Marketing Name for the board 108 * access = Address of the struct of function pointers 109 */ 110 static struct board_type products[] = { 111 {0x3241103C, "Smart Array P212", &SA5_access}, 112 {0x3243103C, "Smart Array P410", &SA5_access}, 113 {0x3245103C, "Smart Array P410i", &SA5_access}, 114 {0x3247103C, "Smart Array P411", &SA5_access}, 115 {0x3249103C, "Smart Array P812", &SA5_access}, 116 {0x324a103C, "Smart Array P712m", &SA5_access}, 117 {0x324b103C, "Smart Array P711m", &SA5_access}, 118 {0x3350103C, "Smart Array", &SA5_access}, 119 {0x3351103C, "Smart Array", &SA5_access}, 120 {0x3352103C, "Smart Array", &SA5_access}, 121 {0x3353103C, "Smart Array", &SA5_access}, 122 {0x3354103C, "Smart Array", &SA5_access}, 123 {0x3355103C, "Smart Array", &SA5_access}, 124 {0x3356103C, "Smart Array", &SA5_access}, 125 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 126 }; 127 128 static int number_of_controllers; 129 130 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); 131 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); 132 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); 133 static void start_io(struct ctlr_info *h); 134 135 #ifdef CONFIG_COMPAT 136 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); 137 #endif 138 139 static void cmd_free(struct ctlr_info *h, struct CommandList *c); 140 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); 141 static struct CommandList *cmd_alloc(struct ctlr_info *h); 142 static struct CommandList *cmd_special_alloc(struct ctlr_info *h); 143 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 144 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 145 int cmd_type); 146 147 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 148 static void hpsa_scan_start(struct Scsi_Host *); 149 static int hpsa_scan_finished(struct Scsi_Host *sh, 150 unsigned long elapsed_time); 151 static int hpsa_change_queue_depth(struct scsi_device *sdev, 152 int qdepth, int reason); 153 154 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 155 static int hpsa_slave_alloc(struct scsi_device *sdev); 156 static void hpsa_slave_destroy(struct scsi_device *sdev); 157 158 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 159 static int check_for_unit_attention(struct ctlr_info *h, 160 struct CommandList *c); 161 static void check_ioctl_unit_attention(struct ctlr_info *h, 162 struct CommandList *c); 163 /* performant mode helper functions */ 164 static void calc_bucket_map(int *bucket, int num_buckets, 165 int nsgs, int *bucket_map); 166 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); 167 static inline u32 next_command(struct ctlr_info *h); 168 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 169 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 170 u64 *cfg_offset); 171 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 172 unsigned long *memory_bar); 173 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 174 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 175 void __iomem *vaddr, int wait_for_ready); 176 #define BOARD_NOT_READY 0 177 #define BOARD_READY 1 178 179 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 180 { 181 unsigned long *priv = shost_priv(sdev->host); 182 return (struct ctlr_info *) *priv; 183 } 184 185 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) 186 { 187 unsigned long *priv = shost_priv(sh); 188 return (struct ctlr_info *) *priv; 189 } 190 191 static int check_for_unit_attention(struct ctlr_info *h, 192 struct CommandList *c) 193 { 194 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 195 return 0; 196 197 switch (c->err_info->SenseInfo[12]) { 198 case STATE_CHANGED: 199 dev_warn(&h->pdev->dev, "hpsa%d: a state change " 200 "detected, command retried\n", h->ctlr); 201 break; 202 case LUN_FAILED: 203 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure " 204 "detected, action required\n", h->ctlr); 205 break; 206 case REPORT_LUNS_CHANGED: 207 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " 208 "changed, action required\n", h->ctlr); 209 /* 210 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. 211 */ 212 break; 213 case POWER_OR_RESET: 214 dev_warn(&h->pdev->dev, "hpsa%d: a power on " 215 "or device reset detected\n", h->ctlr); 216 break; 217 case UNIT_ATTENTION_CLEARED: 218 dev_warn(&h->pdev->dev, "hpsa%d: unit attention " 219 "cleared by another initiator\n", h->ctlr); 220 break; 221 default: 222 dev_warn(&h->pdev->dev, "hpsa%d: unknown " 223 "unit attention detected\n", h->ctlr); 224 break; 225 } 226 return 1; 227 } 228 229 static ssize_t host_store_rescan(struct device *dev, 230 struct device_attribute *attr, 231 const char *buf, size_t count) 232 { 233 struct ctlr_info *h; 234 struct Scsi_Host *shost = class_to_shost(dev); 235 h = shost_to_hba(shost); 236 hpsa_scan_start(h->scsi_host); 237 return count; 238 } 239 240 static ssize_t host_show_firmware_revision(struct device *dev, 241 struct device_attribute *attr, char *buf) 242 { 243 struct ctlr_info *h; 244 struct Scsi_Host *shost = class_to_shost(dev); 245 unsigned char *fwrev; 246 247 h = shost_to_hba(shost); 248 if (!h->hba_inquiry_data) 249 return 0; 250 fwrev = &h->hba_inquiry_data[32]; 251 return snprintf(buf, 20, "%c%c%c%c\n", 252 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 253 } 254 255 static ssize_t host_show_commands_outstanding(struct device *dev, 256 struct device_attribute *attr, char *buf) 257 { 258 struct Scsi_Host *shost = class_to_shost(dev); 259 struct ctlr_info *h = shost_to_hba(shost); 260 261 return snprintf(buf, 20, "%d\n", h->commands_outstanding); 262 } 263 264 static ssize_t host_show_transport_mode(struct device *dev, 265 struct device_attribute *attr, char *buf) 266 { 267 struct ctlr_info *h; 268 struct Scsi_Host *shost = class_to_shost(dev); 269 270 h = shost_to_hba(shost); 271 return snprintf(buf, 20, "%s\n", 272 h->transMethod & CFGTBL_Trans_Performant ? 273 "performant" : "simple"); 274 } 275 276 /* List of controllers which cannot be reset on kexec with reset_devices */ 277 static u32 unresettable_controller[] = { 278 0x324a103C, /* Smart Array P712m */ 279 0x324b103C, /* SmartArray P711m */ 280 0x3223103C, /* Smart Array P800 */ 281 0x3234103C, /* Smart Array P400 */ 282 0x3235103C, /* Smart Array P400i */ 283 0x3211103C, /* Smart Array E200i */ 284 0x3212103C, /* Smart Array E200 */ 285 0x3213103C, /* Smart Array E200i */ 286 0x3214103C, /* Smart Array E200i */ 287 0x3215103C, /* Smart Array E200i */ 288 0x3237103C, /* Smart Array E500 */ 289 0x323D103C, /* Smart Array P700m */ 290 0x409C0E11, /* Smart Array 6400 */ 291 0x409D0E11, /* Smart Array 6400 EM */ 292 }; 293 294 static int ctlr_is_resettable(struct ctlr_info *h) 295 { 296 int i; 297 298 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 299 if (unresettable_controller[i] == h->board_id) 300 return 0; 301 return 1; 302 } 303 304 static ssize_t host_show_resettable(struct device *dev, 305 struct device_attribute *attr, char *buf) 306 { 307 struct ctlr_info *h; 308 struct Scsi_Host *shost = class_to_shost(dev); 309 310 h = shost_to_hba(shost); 311 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); 312 } 313 314 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 315 { 316 return (scsi3addr[3] & 0xC0) == 0x40; 317 } 318 319 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 320 "UNKNOWN" 321 }; 322 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) 323 324 static ssize_t raid_level_show(struct device *dev, 325 struct device_attribute *attr, char *buf) 326 { 327 ssize_t l = 0; 328 unsigned char rlevel; 329 struct ctlr_info *h; 330 struct scsi_device *sdev; 331 struct hpsa_scsi_dev_t *hdev; 332 unsigned long flags; 333 334 sdev = to_scsi_device(dev); 335 h = sdev_to_hba(sdev); 336 spin_lock_irqsave(&h->lock, flags); 337 hdev = sdev->hostdata; 338 if (!hdev) { 339 spin_unlock_irqrestore(&h->lock, flags); 340 return -ENODEV; 341 } 342 343 /* Is this even a logical drive? */ 344 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { 345 spin_unlock_irqrestore(&h->lock, flags); 346 l = snprintf(buf, PAGE_SIZE, "N/A\n"); 347 return l; 348 } 349 350 rlevel = hdev->raid_level; 351 spin_unlock_irqrestore(&h->lock, flags); 352 if (rlevel > RAID_UNKNOWN) 353 rlevel = RAID_UNKNOWN; 354 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); 355 return l; 356 } 357 358 static ssize_t lunid_show(struct device *dev, 359 struct device_attribute *attr, char *buf) 360 { 361 struct ctlr_info *h; 362 struct scsi_device *sdev; 363 struct hpsa_scsi_dev_t *hdev; 364 unsigned long flags; 365 unsigned char lunid[8]; 366 367 sdev = to_scsi_device(dev); 368 h = sdev_to_hba(sdev); 369 spin_lock_irqsave(&h->lock, flags); 370 hdev = sdev->hostdata; 371 if (!hdev) { 372 spin_unlock_irqrestore(&h->lock, flags); 373 return -ENODEV; 374 } 375 memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); 376 spin_unlock_irqrestore(&h->lock, flags); 377 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 378 lunid[0], lunid[1], lunid[2], lunid[3], 379 lunid[4], lunid[5], lunid[6], lunid[7]); 380 } 381 382 static ssize_t unique_id_show(struct device *dev, 383 struct device_attribute *attr, char *buf) 384 { 385 struct ctlr_info *h; 386 struct scsi_device *sdev; 387 struct hpsa_scsi_dev_t *hdev; 388 unsigned long flags; 389 unsigned char sn[16]; 390 391 sdev = to_scsi_device(dev); 392 h = sdev_to_hba(sdev); 393 spin_lock_irqsave(&h->lock, flags); 394 hdev = sdev->hostdata; 395 if (!hdev) { 396 spin_unlock_irqrestore(&h->lock, flags); 397 return -ENODEV; 398 } 399 memcpy(sn, hdev->device_id, sizeof(sn)); 400 spin_unlock_irqrestore(&h->lock, flags); 401 return snprintf(buf, 16 * 2 + 2, 402 "%02X%02X%02X%02X%02X%02X%02X%02X" 403 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 404 sn[0], sn[1], sn[2], sn[3], 405 sn[4], sn[5], sn[6], sn[7], 406 sn[8], sn[9], sn[10], sn[11], 407 sn[12], sn[13], sn[14], sn[15]); 408 } 409 410 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 411 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 412 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 413 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 414 static DEVICE_ATTR(firmware_revision, S_IRUGO, 415 host_show_firmware_revision, NULL); 416 static DEVICE_ATTR(commands_outstanding, S_IRUGO, 417 host_show_commands_outstanding, NULL); 418 static DEVICE_ATTR(transport_mode, S_IRUGO, 419 host_show_transport_mode, NULL); 420 static DEVICE_ATTR(resettable, S_IRUGO, 421 host_show_resettable, NULL); 422 423 static struct device_attribute *hpsa_sdev_attrs[] = { 424 &dev_attr_raid_level, 425 &dev_attr_lunid, 426 &dev_attr_unique_id, 427 NULL, 428 }; 429 430 static struct device_attribute *hpsa_shost_attrs[] = { 431 &dev_attr_rescan, 432 &dev_attr_firmware_revision, 433 &dev_attr_commands_outstanding, 434 &dev_attr_transport_mode, 435 &dev_attr_resettable, 436 NULL, 437 }; 438 439 static struct scsi_host_template hpsa_driver_template = { 440 .module = THIS_MODULE, 441 .name = "hpsa", 442 .proc_name = "hpsa", 443 .queuecommand = hpsa_scsi_queue_command, 444 .scan_start = hpsa_scan_start, 445 .scan_finished = hpsa_scan_finished, 446 .change_queue_depth = hpsa_change_queue_depth, 447 .this_id = -1, 448 .use_clustering = ENABLE_CLUSTERING, 449 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 450 .ioctl = hpsa_ioctl, 451 .slave_alloc = hpsa_slave_alloc, 452 .slave_destroy = hpsa_slave_destroy, 453 #ifdef CONFIG_COMPAT 454 .compat_ioctl = hpsa_compat_ioctl, 455 #endif 456 .sdev_attrs = hpsa_sdev_attrs, 457 .shost_attrs = hpsa_shost_attrs, 458 }; 459 460 461 /* Enqueuing and dequeuing functions for cmdlists. */ 462 static inline void addQ(struct list_head *list, struct CommandList *c) 463 { 464 list_add_tail(&c->list, list); 465 } 466 467 static inline u32 next_command(struct ctlr_info *h) 468 { 469 u32 a; 470 471 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 472 return h->access.command_completed(h); 473 474 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 475 a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 476 (h->reply_pool_head)++; 477 h->commands_outstanding--; 478 } else { 479 a = FIFO_EMPTY; 480 } 481 /* Check for wraparound */ 482 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 483 h->reply_pool_head = h->reply_pool; 484 h->reply_pool_wraparound ^= 1; 485 } 486 return a; 487 } 488 489 /* set_performant_mode: Modify the tag for cciss performant 490 * set bit 0 for pull model, bits 3-1 for block fetch 491 * register number 492 */ 493 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 494 { 495 if (likely(h->transMethod & CFGTBL_Trans_Performant)) 496 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 497 } 498 499 static void enqueue_cmd_and_start_io(struct ctlr_info *h, 500 struct CommandList *c) 501 { 502 unsigned long flags; 503 504 set_performant_mode(h, c); 505 spin_lock_irqsave(&h->lock, flags); 506 addQ(&h->reqQ, c); 507 h->Qdepth++; 508 start_io(h); 509 spin_unlock_irqrestore(&h->lock, flags); 510 } 511 512 static inline void removeQ(struct CommandList *c) 513 { 514 if (WARN_ON(list_empty(&c->list))) 515 return; 516 list_del_init(&c->list); 517 } 518 519 static inline int is_hba_lunid(unsigned char scsi3addr[]) 520 { 521 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 522 } 523 524 static inline int is_scsi_rev_5(struct ctlr_info *h) 525 { 526 if (!h->hba_inquiry_data) 527 return 0; 528 if ((h->hba_inquiry_data[2] & 0x07) == 5) 529 return 1; 530 return 0; 531 } 532 533 static int hpsa_find_target_lun(struct ctlr_info *h, 534 unsigned char scsi3addr[], int bus, int *target, int *lun) 535 { 536 /* finds an unused bus, target, lun for a new physical device 537 * assumes h->devlock is held 538 */ 539 int i, found = 0; 540 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA); 541 542 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3); 543 544 for (i = 0; i < h->ndevices; i++) { 545 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 546 set_bit(h->dev[i]->target, lun_taken); 547 } 548 549 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) { 550 if (!test_bit(i, lun_taken)) { 551 /* *bus = 1; */ 552 *target = i; 553 *lun = 0; 554 found = 1; 555 break; 556 } 557 } 558 return !found; 559 } 560 561 /* Add an entry into h->dev[] array. */ 562 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, 563 struct hpsa_scsi_dev_t *device, 564 struct hpsa_scsi_dev_t *added[], int *nadded) 565 { 566 /* assumes h->devlock is held */ 567 int n = h->ndevices; 568 int i; 569 unsigned char addr1[8], addr2[8]; 570 struct hpsa_scsi_dev_t *sd; 571 572 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) { 573 dev_err(&h->pdev->dev, "too many devices, some will be " 574 "inaccessible.\n"); 575 return -1; 576 } 577 578 /* physical devices do not have lun or target assigned until now. */ 579 if (device->lun != -1) 580 /* Logical device, lun is already assigned. */ 581 goto lun_assigned; 582 583 /* If this device a non-zero lun of a multi-lun device 584 * byte 4 of the 8-byte LUN addr will contain the logical 585 * unit no, zero otherise. 586 */ 587 if (device->scsi3addr[4] == 0) { 588 /* This is not a non-zero lun of a multi-lun device */ 589 if (hpsa_find_target_lun(h, device->scsi3addr, 590 device->bus, &device->target, &device->lun) != 0) 591 return -1; 592 goto lun_assigned; 593 } 594 595 /* This is a non-zero lun of a multi-lun device. 596 * Search through our list and find the device which 597 * has the same 8 byte LUN address, excepting byte 4. 598 * Assign the same bus and target for this new LUN. 599 * Use the logical unit number from the firmware. 600 */ 601 memcpy(addr1, device->scsi3addr, 8); 602 addr1[4] = 0; 603 for (i = 0; i < n; i++) { 604 sd = h->dev[i]; 605 memcpy(addr2, sd->scsi3addr, 8); 606 addr2[4] = 0; 607 /* differ only in byte 4? */ 608 if (memcmp(addr1, addr2, 8) == 0) { 609 device->bus = sd->bus; 610 device->target = sd->target; 611 device->lun = device->scsi3addr[4]; 612 break; 613 } 614 } 615 if (device->lun == -1) { 616 dev_warn(&h->pdev->dev, "physical device with no LUN=0," 617 " suspect firmware bug or unsupported hardware " 618 "configuration.\n"); 619 return -1; 620 } 621 622 lun_assigned: 623 624 h->dev[n] = device; 625 h->ndevices++; 626 added[*nadded] = device; 627 (*nadded)++; 628 629 /* initially, (before registering with scsi layer) we don't 630 * know our hostno and we don't want to print anything first 631 * time anyway (the scsi layer's inquiries will show that info) 632 */ 633 /* if (hostno != -1) */ 634 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 635 scsi_device_type(device->devtype), hostno, 636 device->bus, device->target, device->lun); 637 return 0; 638 } 639 640 /* Replace an entry from h->dev[] array. */ 641 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 642 int entry, struct hpsa_scsi_dev_t *new_entry, 643 struct hpsa_scsi_dev_t *added[], int *nadded, 644 struct hpsa_scsi_dev_t *removed[], int *nremoved) 645 { 646 /* assumes h->devlock is held */ 647 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); 648 removed[*nremoved] = h->dev[entry]; 649 (*nremoved)++; 650 h->dev[entry] = new_entry; 651 added[*nadded] = new_entry; 652 (*nadded)++; 653 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", 654 scsi_device_type(new_entry->devtype), hostno, new_entry->bus, 655 new_entry->target, new_entry->lun); 656 } 657 658 /* Remove an entry from h->dev[] array. */ 659 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, 660 struct hpsa_scsi_dev_t *removed[], int *nremoved) 661 { 662 /* assumes h->devlock is held */ 663 int i; 664 struct hpsa_scsi_dev_t *sd; 665 666 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); 667 668 sd = h->dev[entry]; 669 removed[*nremoved] = h->dev[entry]; 670 (*nremoved)++; 671 672 for (i = entry; i < h->ndevices-1; i++) 673 h->dev[i] = h->dev[i+1]; 674 h->ndevices--; 675 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 676 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, 677 sd->lun); 678 } 679 680 #define SCSI3ADDR_EQ(a, b) ( \ 681 (a)[7] == (b)[7] && \ 682 (a)[6] == (b)[6] && \ 683 (a)[5] == (b)[5] && \ 684 (a)[4] == (b)[4] && \ 685 (a)[3] == (b)[3] && \ 686 (a)[2] == (b)[2] && \ 687 (a)[1] == (b)[1] && \ 688 (a)[0] == (b)[0]) 689 690 static void fixup_botched_add(struct ctlr_info *h, 691 struct hpsa_scsi_dev_t *added) 692 { 693 /* called when scsi_add_device fails in order to re-adjust 694 * h->dev[] to match the mid layer's view. 695 */ 696 unsigned long flags; 697 int i, j; 698 699 spin_lock_irqsave(&h->lock, flags); 700 for (i = 0; i < h->ndevices; i++) { 701 if (h->dev[i] == added) { 702 for (j = i; j < h->ndevices-1; j++) 703 h->dev[j] = h->dev[j+1]; 704 h->ndevices--; 705 break; 706 } 707 } 708 spin_unlock_irqrestore(&h->lock, flags); 709 kfree(added); 710 } 711 712 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 713 struct hpsa_scsi_dev_t *dev2) 714 { 715 /* we compare everything except lun and target as these 716 * are not yet assigned. Compare parts likely 717 * to differ first 718 */ 719 if (memcmp(dev1->scsi3addr, dev2->scsi3addr, 720 sizeof(dev1->scsi3addr)) != 0) 721 return 0; 722 if (memcmp(dev1->device_id, dev2->device_id, 723 sizeof(dev1->device_id)) != 0) 724 return 0; 725 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) 726 return 0; 727 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 728 return 0; 729 if (dev1->devtype != dev2->devtype) 730 return 0; 731 if (dev1->bus != dev2->bus) 732 return 0; 733 return 1; 734 } 735 736 /* Find needle in haystack. If exact match found, return DEVICE_SAME, 737 * and return needle location in *index. If scsi3addr matches, but not 738 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 739 * location in *index. If needle not found, return DEVICE_NOT_FOUND. 740 */ 741 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 742 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 743 int *index) 744 { 745 int i; 746 #define DEVICE_NOT_FOUND 0 747 #define DEVICE_CHANGED 1 748 #define DEVICE_SAME 2 749 for (i = 0; i < haystack_size; i++) { 750 if (haystack[i] == NULL) /* previously removed. */ 751 continue; 752 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 753 *index = i; 754 if (device_is_the_same(needle, haystack[i])) 755 return DEVICE_SAME; 756 else 757 return DEVICE_CHANGED; 758 } 759 } 760 *index = -1; 761 return DEVICE_NOT_FOUND; 762 } 763 764 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, 765 struct hpsa_scsi_dev_t *sd[], int nsds) 766 { 767 /* sd contains scsi3 addresses and devtypes, and inquiry 768 * data. This function takes what's in sd to be the current 769 * reality and updates h->dev[] to reflect that reality. 770 */ 771 int i, entry, device_change, changes = 0; 772 struct hpsa_scsi_dev_t *csd; 773 unsigned long flags; 774 struct hpsa_scsi_dev_t **added, **removed; 775 int nadded, nremoved; 776 struct Scsi_Host *sh = NULL; 777 778 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA, 779 GFP_KERNEL); 780 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA, 781 GFP_KERNEL); 782 783 if (!added || !removed) { 784 dev_warn(&h->pdev->dev, "out of memory in " 785 "adjust_hpsa_scsi_table\n"); 786 goto free_and_out; 787 } 788 789 spin_lock_irqsave(&h->devlock, flags); 790 791 /* find any devices in h->dev[] that are not in 792 * sd[] and remove them from h->dev[], and for any 793 * devices which have changed, remove the old device 794 * info and add the new device info. 795 */ 796 i = 0; 797 nremoved = 0; 798 nadded = 0; 799 while (i < h->ndevices) { 800 csd = h->dev[i]; 801 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); 802 if (device_change == DEVICE_NOT_FOUND) { 803 changes++; 804 hpsa_scsi_remove_entry(h, hostno, i, 805 removed, &nremoved); 806 continue; /* remove ^^^, hence i not incremented */ 807 } else if (device_change == DEVICE_CHANGED) { 808 changes++; 809 hpsa_scsi_replace_entry(h, hostno, i, sd[entry], 810 added, &nadded, removed, &nremoved); 811 /* Set it to NULL to prevent it from being freed 812 * at the bottom of hpsa_update_scsi_devices() 813 */ 814 sd[entry] = NULL; 815 } 816 i++; 817 } 818 819 /* Now, make sure every device listed in sd[] is also 820 * listed in h->dev[], adding them if they aren't found 821 */ 822 823 for (i = 0; i < nsds; i++) { 824 if (!sd[i]) /* if already added above. */ 825 continue; 826 device_change = hpsa_scsi_find_entry(sd[i], h->dev, 827 h->ndevices, &entry); 828 if (device_change == DEVICE_NOT_FOUND) { 829 changes++; 830 if (hpsa_scsi_add_entry(h, hostno, sd[i], 831 added, &nadded) != 0) 832 break; 833 sd[i] = NULL; /* prevent from being freed later. */ 834 } else if (device_change == DEVICE_CHANGED) { 835 /* should never happen... */ 836 changes++; 837 dev_warn(&h->pdev->dev, 838 "device unexpectedly changed.\n"); 839 /* but if it does happen, we just ignore that device */ 840 } 841 } 842 spin_unlock_irqrestore(&h->devlock, flags); 843 844 /* Don't notify scsi mid layer of any changes the first time through 845 * (or if there are no changes) scsi_scan_host will do it later the 846 * first time through. 847 */ 848 if (hostno == -1 || !changes) 849 goto free_and_out; 850 851 sh = h->scsi_host; 852 /* Notify scsi mid layer of any removed devices */ 853 for (i = 0; i < nremoved; i++) { 854 struct scsi_device *sdev = 855 scsi_device_lookup(sh, removed[i]->bus, 856 removed[i]->target, removed[i]->lun); 857 if (sdev != NULL) { 858 scsi_remove_device(sdev); 859 scsi_device_put(sdev); 860 } else { 861 /* We don't expect to get here. 862 * future cmds to this device will get selection 863 * timeout as if the device was gone. 864 */ 865 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " 866 " for removal.", hostno, removed[i]->bus, 867 removed[i]->target, removed[i]->lun); 868 } 869 kfree(removed[i]); 870 removed[i] = NULL; 871 } 872 873 /* Notify scsi mid layer of any added devices */ 874 for (i = 0; i < nadded; i++) { 875 if (scsi_add_device(sh, added[i]->bus, 876 added[i]->target, added[i]->lun) == 0) 877 continue; 878 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " 879 "device not added.\n", hostno, added[i]->bus, 880 added[i]->target, added[i]->lun); 881 /* now we have to remove it from h->dev, 882 * since it didn't get added to scsi mid layer 883 */ 884 fixup_botched_add(h, added[i]); 885 } 886 887 free_and_out: 888 kfree(added); 889 kfree(removed); 890 } 891 892 /* 893 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * 894 * Assume's h->devlock is held. 895 */ 896 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, 897 int bus, int target, int lun) 898 { 899 int i; 900 struct hpsa_scsi_dev_t *sd; 901 902 for (i = 0; i < h->ndevices; i++) { 903 sd = h->dev[i]; 904 if (sd->bus == bus && sd->target == target && sd->lun == lun) 905 return sd; 906 } 907 return NULL; 908 } 909 910 /* link sdev->hostdata to our per-device structure. */ 911 static int hpsa_slave_alloc(struct scsi_device *sdev) 912 { 913 struct hpsa_scsi_dev_t *sd; 914 unsigned long flags; 915 struct ctlr_info *h; 916 917 h = sdev_to_hba(sdev); 918 spin_lock_irqsave(&h->devlock, flags); 919 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 920 sdev_id(sdev), sdev->lun); 921 if (sd != NULL) 922 sdev->hostdata = sd; 923 spin_unlock_irqrestore(&h->devlock, flags); 924 return 0; 925 } 926 927 static void hpsa_slave_destroy(struct scsi_device *sdev) 928 { 929 /* nothing to do. */ 930 } 931 932 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 933 { 934 int i; 935 936 if (!h->cmd_sg_list) 937 return; 938 for (i = 0; i < h->nr_cmds; i++) { 939 kfree(h->cmd_sg_list[i]); 940 h->cmd_sg_list[i] = NULL; 941 } 942 kfree(h->cmd_sg_list); 943 h->cmd_sg_list = NULL; 944 } 945 946 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) 947 { 948 int i; 949 950 if (h->chainsize <= 0) 951 return 0; 952 953 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, 954 GFP_KERNEL); 955 if (!h->cmd_sg_list) 956 return -ENOMEM; 957 for (i = 0; i < h->nr_cmds; i++) { 958 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * 959 h->chainsize, GFP_KERNEL); 960 if (!h->cmd_sg_list[i]) 961 goto clean; 962 } 963 return 0; 964 965 clean: 966 hpsa_free_sg_chain_blocks(h); 967 return -ENOMEM; 968 } 969 970 static void hpsa_map_sg_chain_block(struct ctlr_info *h, 971 struct CommandList *c) 972 { 973 struct SGDescriptor *chain_sg, *chain_block; 974 u64 temp64; 975 976 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 977 chain_block = h->cmd_sg_list[c->cmdindex]; 978 chain_sg->Ext = HPSA_SG_CHAIN; 979 chain_sg->Len = sizeof(*chain_sg) * 980 (c->Header.SGTotal - h->max_cmd_sg_entries); 981 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, 982 PCI_DMA_TODEVICE); 983 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); 984 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); 985 } 986 987 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, 988 struct CommandList *c) 989 { 990 struct SGDescriptor *chain_sg; 991 union u64bit temp64; 992 993 if (c->Header.SGTotal <= h->max_cmd_sg_entries) 994 return; 995 996 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 997 temp64.val32.lower = chain_sg->Addr.lower; 998 temp64.val32.upper = chain_sg->Addr.upper; 999 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1000 } 1001 1002 static void complete_scsi_command(struct CommandList *cp) 1003 { 1004 struct scsi_cmnd *cmd; 1005 struct ctlr_info *h; 1006 struct ErrorInfo *ei; 1007 1008 unsigned char sense_key; 1009 unsigned char asc; /* additional sense code */ 1010 unsigned char ascq; /* additional sense code qualifier */ 1011 1012 ei = cp->err_info; 1013 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1014 h = cp->h; 1015 1016 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 1017 if (cp->Header.SGTotal > h->max_cmd_sg_entries) 1018 hpsa_unmap_sg_chain_block(h, cp); 1019 1020 cmd->result = (DID_OK << 16); /* host byte */ 1021 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 1022 cmd->result |= ei->ScsiStatus; 1023 1024 /* copy the sense data whether we need to or not. */ 1025 memcpy(cmd->sense_buffer, ei->SenseInfo, 1026 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? 1027 SCSI_SENSE_BUFFERSIZE : 1028 ei->SenseLen); 1029 scsi_set_resid(cmd, ei->ResidualCnt); 1030 1031 if (ei->CommandStatus == 0) { 1032 cmd->scsi_done(cmd); 1033 cmd_free(h, cp); 1034 return; 1035 } 1036 1037 /* an error has occurred */ 1038 switch (ei->CommandStatus) { 1039 1040 case CMD_TARGET_STATUS: 1041 if (ei->ScsiStatus) { 1042 /* Get sense key */ 1043 sense_key = 0xf & ei->SenseInfo[2]; 1044 /* Get additional sense code */ 1045 asc = ei->SenseInfo[12]; 1046 /* Get addition sense code qualifier */ 1047 ascq = ei->SenseInfo[13]; 1048 } 1049 1050 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1051 if (check_for_unit_attention(h, cp)) { 1052 cmd->result = DID_SOFT_ERROR << 16; 1053 break; 1054 } 1055 if (sense_key == ILLEGAL_REQUEST) { 1056 /* 1057 * SCSI REPORT_LUNS is commonly unsupported on 1058 * Smart Array. Suppress noisy complaint. 1059 */ 1060 if (cp->Request.CDB[0] == REPORT_LUNS) 1061 break; 1062 1063 /* If ASC/ASCQ indicate Logical Unit 1064 * Not Supported condition, 1065 */ 1066 if ((asc == 0x25) && (ascq == 0x0)) { 1067 dev_warn(&h->pdev->dev, "cp %p " 1068 "has check condition\n", cp); 1069 break; 1070 } 1071 } 1072 1073 if (sense_key == NOT_READY) { 1074 /* If Sense is Not Ready, Logical Unit 1075 * Not ready, Manual Intervention 1076 * required 1077 */ 1078 if ((asc == 0x04) && (ascq == 0x03)) { 1079 dev_warn(&h->pdev->dev, "cp %p " 1080 "has check condition: unit " 1081 "not ready, manual " 1082 "intervention required\n", cp); 1083 break; 1084 } 1085 } 1086 if (sense_key == ABORTED_COMMAND) { 1087 /* Aborted command is retryable */ 1088 dev_warn(&h->pdev->dev, "cp %p " 1089 "has check condition: aborted command: " 1090 "ASC: 0x%x, ASCQ: 0x%x\n", 1091 cp, asc, ascq); 1092 cmd->result = DID_SOFT_ERROR << 16; 1093 break; 1094 } 1095 /* Must be some other type of check condition */ 1096 dev_warn(&h->pdev->dev, "cp %p has check condition: " 1097 "unknown type: " 1098 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1099 "Returning result: 0x%x, " 1100 "cmd=[%02x %02x %02x %02x %02x " 1101 "%02x %02x %02x %02x %02x %02x " 1102 "%02x %02x %02x %02x %02x]\n", 1103 cp, sense_key, asc, ascq, 1104 cmd->result, 1105 cmd->cmnd[0], cmd->cmnd[1], 1106 cmd->cmnd[2], cmd->cmnd[3], 1107 cmd->cmnd[4], cmd->cmnd[5], 1108 cmd->cmnd[6], cmd->cmnd[7], 1109 cmd->cmnd[8], cmd->cmnd[9], 1110 cmd->cmnd[10], cmd->cmnd[11], 1111 cmd->cmnd[12], cmd->cmnd[13], 1112 cmd->cmnd[14], cmd->cmnd[15]); 1113 break; 1114 } 1115 1116 1117 /* Problem was not a check condition 1118 * Pass it up to the upper layers... 1119 */ 1120 if (ei->ScsiStatus) { 1121 dev_warn(&h->pdev->dev, "cp %p has status 0x%x " 1122 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " 1123 "Returning result: 0x%x\n", 1124 cp, ei->ScsiStatus, 1125 sense_key, asc, ascq, 1126 cmd->result); 1127 } else { /* scsi status is zero??? How??? */ 1128 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " 1129 "Returning no connection.\n", cp), 1130 1131 /* Ordinarily, this case should never happen, 1132 * but there is a bug in some released firmware 1133 * revisions that allows it to happen if, for 1134 * example, a 4100 backplane loses power and 1135 * the tape drive is in it. We assume that 1136 * it's a fatal error of some kind because we 1137 * can't show that it wasn't. We will make it 1138 * look like selection timeout since that is 1139 * the most common reason for this to occur, 1140 * and it's severe enough. 1141 */ 1142 1143 cmd->result = DID_NO_CONNECT << 16; 1144 } 1145 break; 1146 1147 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1148 break; 1149 case CMD_DATA_OVERRUN: 1150 dev_warn(&h->pdev->dev, "cp %p has" 1151 " completed with data overrun " 1152 "reported\n", cp); 1153 break; 1154 case CMD_INVALID: { 1155 /* print_bytes(cp, sizeof(*cp), 1, 0); 1156 print_cmd(cp); */ 1157 /* We get CMD_INVALID if you address a non-existent device 1158 * instead of a selection timeout (no response). You will 1159 * see this if you yank out a drive, then try to access it. 1160 * This is kind of a shame because it means that any other 1161 * CMD_INVALID (e.g. driver bug) will get interpreted as a 1162 * missing target. */ 1163 cmd->result = DID_NO_CONNECT << 16; 1164 } 1165 break; 1166 case CMD_PROTOCOL_ERR: 1167 dev_warn(&h->pdev->dev, "cp %p has " 1168 "protocol error \n", cp); 1169 break; 1170 case CMD_HARDWARE_ERR: 1171 cmd->result = DID_ERROR << 16; 1172 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); 1173 break; 1174 case CMD_CONNECTION_LOST: 1175 cmd->result = DID_ERROR << 16; 1176 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); 1177 break; 1178 case CMD_ABORTED: 1179 cmd->result = DID_ABORT << 16; 1180 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", 1181 cp, ei->ScsiStatus); 1182 break; 1183 case CMD_ABORT_FAILED: 1184 cmd->result = DID_ERROR << 16; 1185 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1186 break; 1187 case CMD_UNSOLICITED_ABORT: 1188 cmd->result = DID_RESET << 16; 1189 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " 1190 "abort\n", cp); 1191 break; 1192 case CMD_TIMEOUT: 1193 cmd->result = DID_TIME_OUT << 16; 1194 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1195 break; 1196 case CMD_UNABORTABLE: 1197 cmd->result = DID_ERROR << 16; 1198 dev_warn(&h->pdev->dev, "Command unabortable\n"); 1199 break; 1200 default: 1201 cmd->result = DID_ERROR << 16; 1202 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1203 cp, ei->CommandStatus); 1204 } 1205 cmd->scsi_done(cmd); 1206 cmd_free(h, cp); 1207 } 1208 1209 static int hpsa_scsi_detect(struct ctlr_info *h) 1210 { 1211 struct Scsi_Host *sh; 1212 int error; 1213 1214 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); 1215 if (sh == NULL) 1216 goto fail; 1217 1218 sh->io_port = 0; 1219 sh->n_io_port = 0; 1220 sh->this_id = -1; 1221 sh->max_channel = 3; 1222 sh->max_cmd_len = MAX_COMMAND_SIZE; 1223 sh->max_lun = HPSA_MAX_LUN; 1224 sh->max_id = HPSA_MAX_LUN; 1225 sh->can_queue = h->nr_cmds; 1226 sh->cmd_per_lun = h->nr_cmds; 1227 sh->sg_tablesize = h->maxsgentries; 1228 h->scsi_host = sh; 1229 sh->hostdata[0] = (unsigned long) h; 1230 sh->irq = h->intr[h->intr_mode]; 1231 sh->unique_id = sh->irq; 1232 error = scsi_add_host(sh, &h->pdev->dev); 1233 if (error) 1234 goto fail_host_put; 1235 scsi_scan_host(sh); 1236 return 0; 1237 1238 fail_host_put: 1239 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" 1240 " failed for controller %d\n", h->ctlr); 1241 scsi_host_put(sh); 1242 return error; 1243 fail: 1244 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" 1245 " failed for controller %d\n", h->ctlr); 1246 return -ENOMEM; 1247 } 1248 1249 static void hpsa_pci_unmap(struct pci_dev *pdev, 1250 struct CommandList *c, int sg_used, int data_direction) 1251 { 1252 int i; 1253 union u64bit addr64; 1254 1255 for (i = 0; i < sg_used; i++) { 1256 addr64.val32.lower = c->SG[i].Addr.lower; 1257 addr64.val32.upper = c->SG[i].Addr.upper; 1258 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, 1259 data_direction); 1260 } 1261 } 1262 1263 static void hpsa_map_one(struct pci_dev *pdev, 1264 struct CommandList *cp, 1265 unsigned char *buf, 1266 size_t buflen, 1267 int data_direction) 1268 { 1269 u64 addr64; 1270 1271 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 1272 cp->Header.SGList = 0; 1273 cp->Header.SGTotal = 0; 1274 return; 1275 } 1276 1277 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); 1278 cp->SG[0].Addr.lower = 1279 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1280 cp->SG[0].Addr.upper = 1281 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1282 cp->SG[0].Len = buflen; 1283 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ 1284 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ 1285 } 1286 1287 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, 1288 struct CommandList *c) 1289 { 1290 DECLARE_COMPLETION_ONSTACK(wait); 1291 1292 c->waiting = &wait; 1293 enqueue_cmd_and_start_io(h, c); 1294 wait_for_completion(&wait); 1295 } 1296 1297 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 1298 struct CommandList *c, int data_direction) 1299 { 1300 int retry_count = 0; 1301 1302 do { 1303 memset(c->err_info, 0, sizeof(c->err_info)); 1304 hpsa_scsi_do_simple_cmd_core(h, c); 1305 retry_count++; 1306 } while (check_for_unit_attention(h, c) && retry_count <= 3); 1307 hpsa_pci_unmap(h->pdev, c, 1, data_direction); 1308 } 1309 1310 static void hpsa_scsi_interpret_error(struct CommandList *cp) 1311 { 1312 struct ErrorInfo *ei; 1313 struct device *d = &cp->h->pdev->dev; 1314 1315 ei = cp->err_info; 1316 switch (ei->CommandStatus) { 1317 case CMD_TARGET_STATUS: 1318 dev_warn(d, "cmd %p has completed with errors\n", cp); 1319 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, 1320 ei->ScsiStatus); 1321 if (ei->ScsiStatus == 0) 1322 dev_warn(d, "SCSI status is abnormally zero. " 1323 "(probably indicates selection timeout " 1324 "reported incorrectly due to a known " 1325 "firmware bug, circa July, 2001.)\n"); 1326 break; 1327 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 1328 dev_info(d, "UNDERRUN\n"); 1329 break; 1330 case CMD_DATA_OVERRUN: 1331 dev_warn(d, "cp %p has completed with data overrun\n", cp); 1332 break; 1333 case CMD_INVALID: { 1334 /* controller unfortunately reports SCSI passthru's 1335 * to non-existent targets as invalid commands. 1336 */ 1337 dev_warn(d, "cp %p is reported invalid (probably means " 1338 "target device no longer present)\n", cp); 1339 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); 1340 print_cmd(cp); */ 1341 } 1342 break; 1343 case CMD_PROTOCOL_ERR: 1344 dev_warn(d, "cp %p has protocol error \n", cp); 1345 break; 1346 case CMD_HARDWARE_ERR: 1347 /* cmd->result = DID_ERROR << 16; */ 1348 dev_warn(d, "cp %p had hardware error\n", cp); 1349 break; 1350 case CMD_CONNECTION_LOST: 1351 dev_warn(d, "cp %p had connection lost\n", cp); 1352 break; 1353 case CMD_ABORTED: 1354 dev_warn(d, "cp %p was aborted\n", cp); 1355 break; 1356 case CMD_ABORT_FAILED: 1357 dev_warn(d, "cp %p reports abort failed\n", cp); 1358 break; 1359 case CMD_UNSOLICITED_ABORT: 1360 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); 1361 break; 1362 case CMD_TIMEOUT: 1363 dev_warn(d, "cp %p timed out\n", cp); 1364 break; 1365 case CMD_UNABORTABLE: 1366 dev_warn(d, "Command unabortable\n"); 1367 break; 1368 default: 1369 dev_warn(d, "cp %p returned unknown status %x\n", cp, 1370 ei->CommandStatus); 1371 } 1372 } 1373 1374 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 1375 unsigned char page, unsigned char *buf, 1376 unsigned char bufsize) 1377 { 1378 int rc = IO_OK; 1379 struct CommandList *c; 1380 struct ErrorInfo *ei; 1381 1382 c = cmd_special_alloc(h); 1383 1384 if (c == NULL) { /* trouble... */ 1385 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1386 return -ENOMEM; 1387 } 1388 1389 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); 1390 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1391 ei = c->err_info; 1392 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 1393 hpsa_scsi_interpret_error(c); 1394 rc = -1; 1395 } 1396 cmd_special_free(h, c); 1397 return rc; 1398 } 1399 1400 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) 1401 { 1402 int rc = IO_OK; 1403 struct CommandList *c; 1404 struct ErrorInfo *ei; 1405 1406 c = cmd_special_alloc(h); 1407 1408 if (c == NULL) { /* trouble... */ 1409 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1410 return -ENOMEM; 1411 } 1412 1413 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); 1414 hpsa_scsi_do_simple_cmd_core(h, c); 1415 /* no unmap needed here because no data xfer. */ 1416 1417 ei = c->err_info; 1418 if (ei->CommandStatus != 0) { 1419 hpsa_scsi_interpret_error(c); 1420 rc = -1; 1421 } 1422 cmd_special_free(h, c); 1423 return rc; 1424 } 1425 1426 static void hpsa_get_raid_level(struct ctlr_info *h, 1427 unsigned char *scsi3addr, unsigned char *raid_level) 1428 { 1429 int rc; 1430 unsigned char *buf; 1431 1432 *raid_level = RAID_UNKNOWN; 1433 buf = kzalloc(64, GFP_KERNEL); 1434 if (!buf) 1435 return; 1436 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); 1437 if (rc == 0) 1438 *raid_level = buf[8]; 1439 if (*raid_level > RAID_UNKNOWN) 1440 *raid_level = RAID_UNKNOWN; 1441 kfree(buf); 1442 return; 1443 } 1444 1445 /* Get the device id from inquiry page 0x83 */ 1446 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, 1447 unsigned char *device_id, int buflen) 1448 { 1449 int rc; 1450 unsigned char *buf; 1451 1452 if (buflen > 16) 1453 buflen = 16; 1454 buf = kzalloc(64, GFP_KERNEL); 1455 if (!buf) 1456 return -1; 1457 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); 1458 if (rc == 0) 1459 memcpy(device_id, &buf[8], buflen); 1460 kfree(buf); 1461 return rc != 0; 1462 } 1463 1464 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 1465 struct ReportLUNdata *buf, int bufsize, 1466 int extended_response) 1467 { 1468 int rc = IO_OK; 1469 struct CommandList *c; 1470 unsigned char scsi3addr[8]; 1471 struct ErrorInfo *ei; 1472 1473 c = cmd_special_alloc(h); 1474 if (c == NULL) { /* trouble... */ 1475 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1476 return -1; 1477 } 1478 /* address the controller */ 1479 memset(scsi3addr, 0, sizeof(scsi3addr)); 1480 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, 1481 buf, bufsize, 0, scsi3addr, TYPE_CMD); 1482 if (extended_response) 1483 c->Request.CDB[1] = extended_response; 1484 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); 1485 ei = c->err_info; 1486 if (ei->CommandStatus != 0 && 1487 ei->CommandStatus != CMD_DATA_UNDERRUN) { 1488 hpsa_scsi_interpret_error(c); 1489 rc = -1; 1490 } 1491 cmd_special_free(h, c); 1492 return rc; 1493 } 1494 1495 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 1496 struct ReportLUNdata *buf, 1497 int bufsize, int extended_response) 1498 { 1499 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); 1500 } 1501 1502 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, 1503 struct ReportLUNdata *buf, int bufsize) 1504 { 1505 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); 1506 } 1507 1508 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, 1509 int bus, int target, int lun) 1510 { 1511 device->bus = bus; 1512 device->target = target; 1513 device->lun = lun; 1514 } 1515 1516 static int hpsa_update_device_info(struct ctlr_info *h, 1517 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) 1518 { 1519 #define OBDR_TAPE_INQ_SIZE 49 1520 unsigned char *inq_buff; 1521 1522 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1523 if (!inq_buff) 1524 goto bail_out; 1525 1526 /* Do an inquiry to the device to see what it is. */ 1527 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 1528 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 1529 /* Inquiry failed (msg printed already) */ 1530 dev_err(&h->pdev->dev, 1531 "hpsa_update_device_info: inquiry failed\n"); 1532 goto bail_out; 1533 } 1534 1535 this_device->devtype = (inq_buff[0] & 0x1f); 1536 memcpy(this_device->scsi3addr, scsi3addr, 8); 1537 memcpy(this_device->vendor, &inq_buff[8], 1538 sizeof(this_device->vendor)); 1539 memcpy(this_device->model, &inq_buff[16], 1540 sizeof(this_device->model)); 1541 memset(this_device->device_id, 0, 1542 sizeof(this_device->device_id)); 1543 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 1544 sizeof(this_device->device_id)); 1545 1546 if (this_device->devtype == TYPE_DISK && 1547 is_logical_dev_addr_mode(scsi3addr)) 1548 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 1549 else 1550 this_device->raid_level = RAID_UNKNOWN; 1551 1552 kfree(inq_buff); 1553 return 0; 1554 1555 bail_out: 1556 kfree(inq_buff); 1557 return 1; 1558 } 1559 1560 static unsigned char *msa2xxx_model[] = { 1561 "MSA2012", 1562 "MSA2024", 1563 "MSA2312", 1564 "MSA2324", 1565 NULL, 1566 }; 1567 1568 static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 1569 { 1570 int i; 1571 1572 for (i = 0; msa2xxx_model[i]; i++) 1573 if (strncmp(device->model, msa2xxx_model[i], 1574 strlen(msa2xxx_model[i])) == 0) 1575 return 1; 1576 return 0; 1577 } 1578 1579 /* Helper function to assign bus, target, lun mapping of devices. 1580 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical 1581 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 1582 * Logical drive target and lun are assigned at this time, but 1583 * physical device lun and target assignment are deferred (assigned 1584 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 1585 */ 1586 static void figure_bus_target_lun(struct ctlr_info *h, 1587 u8 *lunaddrbytes, int *bus, int *target, int *lun, 1588 struct hpsa_scsi_dev_t *device) 1589 { 1590 u32 lunid; 1591 1592 if (is_logical_dev_addr_mode(lunaddrbytes)) { 1593 /* logical device */ 1594 if (unlikely(is_scsi_rev_5(h))) { 1595 /* p1210m, logical drives lun assignments 1596 * match SCSI REPORT LUNS data. 1597 */ 1598 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 1599 *bus = 0; 1600 *target = 0; 1601 *lun = (lunid & 0x3fff) + 1; 1602 } else { 1603 /* not p1210m... */ 1604 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); 1605 if (is_msa2xxx(h, device)) { 1606 /* msa2xxx way, put logicals on bus 1 1607 * and match target/lun numbers box 1608 * reports. 1609 */ 1610 *bus = 1; 1611 *target = (lunid >> 16) & 0x3fff; 1612 *lun = lunid & 0x00ff; 1613 } else { 1614 /* Traditional smart array way. */ 1615 *bus = 0; 1616 *lun = 0; 1617 *target = lunid & 0x3fff; 1618 } 1619 } 1620 } else { 1621 /* physical device */ 1622 if (is_hba_lunid(lunaddrbytes)) 1623 if (unlikely(is_scsi_rev_5(h))) { 1624 *bus = 0; /* put p1210m ctlr at 0,0,0 */ 1625 *target = 0; 1626 *lun = 0; 1627 return; 1628 } else 1629 *bus = 3; /* traditional smartarray */ 1630 else 1631 *bus = 2; /* physical disk */ 1632 *target = -1; 1633 *lun = -1; /* we will fill these in later. */ 1634 } 1635 } 1636 1637 /* 1638 * If there is no lun 0 on a target, linux won't find any devices. 1639 * For the MSA2xxx boxes, we have to manually detect the enclosure 1640 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 1641 * it for some reason. *tmpdevice is the target we're adding, 1642 * this_device is a pointer into the current element of currentsd[] 1643 * that we're building up in update_scsi_devices(), below. 1644 * lunzerobits is a bitmap that tracks which targets already have a 1645 * lun 0 assigned. 1646 * Returns 1 if an enclosure was added, 0 if not. 1647 */ 1648 static int add_msa2xxx_enclosure_device(struct ctlr_info *h, 1649 struct hpsa_scsi_dev_t *tmpdevice, 1650 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 1651 int bus, int target, int lun, unsigned long lunzerobits[], 1652 int *nmsa2xxx_enclosures) 1653 { 1654 unsigned char scsi3addr[8]; 1655 1656 if (test_bit(target, lunzerobits)) 1657 return 0; /* There is already a lun 0 on this target. */ 1658 1659 if (!is_logical_dev_addr_mode(lunaddrbytes)) 1660 return 0; /* It's the logical targets that may lack lun 0. */ 1661 1662 if (!is_msa2xxx(h, tmpdevice)) 1663 return 0; /* It's only the MSA2xxx that have this problem. */ 1664 1665 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ 1666 return 0; 1667 1668 memset(scsi3addr, 0, 8); 1669 scsi3addr[3] = target; 1670 if (is_hba_lunid(scsi3addr)) 1671 return 0; /* Don't add the RAID controller here. */ 1672 1673 if (is_scsi_rev_5(h)) 1674 return 0; /* p1210m doesn't need to do this. */ 1675 1676 #define MAX_MSA2XXX_ENCLOSURES 32 1677 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { 1678 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " 1679 "enclosures exceeded. Check your hardware " 1680 "configuration."); 1681 return 0; 1682 } 1683 1684 if (hpsa_update_device_info(h, scsi3addr, this_device)) 1685 return 0; 1686 (*nmsa2xxx_enclosures)++; 1687 hpsa_set_bus_target_lun(this_device, bus, target, 0); 1688 set_bit(target, lunzerobits); 1689 return 1; 1690 } 1691 1692 /* 1693 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, 1694 * logdev. The number of luns in physdev and logdev are returned in 1695 * *nphysicals and *nlogicals, respectively. 1696 * Returns 0 on success, -1 otherwise. 1697 */ 1698 static int hpsa_gather_lun_info(struct ctlr_info *h, 1699 int reportlunsize, 1700 struct ReportLUNdata *physdev, u32 *nphysicals, 1701 struct ReportLUNdata *logdev, u32 *nlogicals) 1702 { 1703 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { 1704 dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); 1705 return -1; 1706 } 1707 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; 1708 if (*nphysicals > HPSA_MAX_PHYS_LUN) { 1709 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." 1710 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1711 *nphysicals - HPSA_MAX_PHYS_LUN); 1712 *nphysicals = HPSA_MAX_PHYS_LUN; 1713 } 1714 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { 1715 dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); 1716 return -1; 1717 } 1718 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; 1719 /* Reject Logicals in excess of our max capability. */ 1720 if (*nlogicals > HPSA_MAX_LUN) { 1721 dev_warn(&h->pdev->dev, 1722 "maximum logical LUNs (%d) exceeded. " 1723 "%d LUNs ignored.\n", HPSA_MAX_LUN, 1724 *nlogicals - HPSA_MAX_LUN); 1725 *nlogicals = HPSA_MAX_LUN; 1726 } 1727 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { 1728 dev_warn(&h->pdev->dev, 1729 "maximum logical + physical LUNs (%d) exceeded. " 1730 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, 1731 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); 1732 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; 1733 } 1734 return 0; 1735 } 1736 1737 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, 1738 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, 1739 struct ReportLUNdata *logdev_list) 1740 { 1741 /* Helper function, figure out where the LUN ID info is coming from 1742 * given index i, lists of physical and logical devices, where in 1743 * the list the raid controller is supposed to appear (first or last) 1744 */ 1745 1746 int logicals_start = nphysicals + (raid_ctlr_position == 0); 1747 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); 1748 1749 if (i == raid_ctlr_position) 1750 return RAID_CTLR_LUNID; 1751 1752 if (i < logicals_start) 1753 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; 1754 1755 if (i < last_device) 1756 return &logdev_list->LUN[i - nphysicals - 1757 (raid_ctlr_position == 0)][0]; 1758 BUG(); 1759 return NULL; 1760 } 1761 1762 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 1763 { 1764 /* the idea here is we could get notified 1765 * that some devices have changed, so we do a report 1766 * physical luns and report logical luns cmd, and adjust 1767 * our list of devices accordingly. 1768 * 1769 * The scsi3addr's of devices won't change so long as the 1770 * adapter is not reset. That means we can rescan and 1771 * tell which devices we already know about, vs. new 1772 * devices, vs. disappearing devices. 1773 */ 1774 struct ReportLUNdata *physdev_list = NULL; 1775 struct ReportLUNdata *logdev_list = NULL; 1776 unsigned char *inq_buff = NULL; 1777 u32 nphysicals = 0; 1778 u32 nlogicals = 0; 1779 u32 ndev_allocated = 0; 1780 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 1781 int ncurrent = 0; 1782 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; 1783 int i, nmsa2xxx_enclosures, ndevs_to_allocate; 1784 int bus, target, lun; 1785 int raid_ctlr_position; 1786 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); 1787 1788 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, 1789 GFP_KERNEL); 1790 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1791 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1792 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1793 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 1794 1795 if (!currentsd || !physdev_list || !logdev_list || 1796 !inq_buff || !tmpdevice) { 1797 dev_err(&h->pdev->dev, "out of memory\n"); 1798 goto out; 1799 } 1800 memset(lunzerobits, 0, sizeof(lunzerobits)); 1801 1802 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, 1803 logdev_list, &nlogicals)) 1804 goto out; 1805 1806 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them 1807 * but each of them 4 times through different paths. The plus 1 1808 * is for the RAID controller. 1809 */ 1810 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1; 1811 1812 /* Allocate the per device structures */ 1813 for (i = 0; i < ndevs_to_allocate; i++) { 1814 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); 1815 if (!currentsd[i]) { 1816 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", 1817 __FILE__, __LINE__); 1818 goto out; 1819 } 1820 ndev_allocated++; 1821 } 1822 1823 if (unlikely(is_scsi_rev_5(h))) 1824 raid_ctlr_position = 0; 1825 else 1826 raid_ctlr_position = nphysicals + nlogicals; 1827 1828 /* adjust our table of devices */ 1829 nmsa2xxx_enclosures = 0; 1830 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1831 u8 *lunaddrbytes; 1832 1833 /* Figure out where the LUN ID info is coming from */ 1834 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 1835 i, nphysicals, nlogicals, physdev_list, logdev_list); 1836 /* skip masked physical devices. */ 1837 if (lunaddrbytes[3] & 0xC0 && 1838 i < nphysicals + (raid_ctlr_position == 0)) 1839 continue; 1840 1841 /* Get device type, vendor, model, device id */ 1842 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) 1843 continue; /* skip it if we can't talk to it. */ 1844 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, 1845 tmpdevice); 1846 this_device = currentsd[ncurrent]; 1847 1848 /* 1849 * For the msa2xxx boxes, we have to insert a LUN 0 which 1850 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 1851 * is nonetheless an enclosure device there. We have to 1852 * present that otherwise linux won't find anything if 1853 * there is no lun 0. 1854 */ 1855 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device, 1856 lunaddrbytes, bus, target, lun, lunzerobits, 1857 &nmsa2xxx_enclosures)) { 1858 ncurrent++; 1859 this_device = currentsd[ncurrent]; 1860 } 1861 1862 *this_device = *tmpdevice; 1863 hpsa_set_bus_target_lun(this_device, bus, target, lun); 1864 1865 switch (this_device->devtype) { 1866 case TYPE_ROM: { 1867 /* We don't *really* support actual CD-ROM devices, 1868 * just "One Button Disaster Recovery" tape drive 1869 * which temporarily pretends to be a CD-ROM drive. 1870 * So we check that the device is really an OBDR tape 1871 * device by checking for "$DR-10" in bytes 43-48 of 1872 * the inquiry data. 1873 */ 1874 char obdr_sig[7]; 1875 #define OBDR_TAPE_SIG "$DR-10" 1876 strncpy(obdr_sig, &inq_buff[43], 6); 1877 obdr_sig[6] = '\0'; 1878 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) 1879 /* Not OBDR device, ignore it. */ 1880 break; 1881 } 1882 ncurrent++; 1883 break; 1884 case TYPE_DISK: 1885 if (i < nphysicals) 1886 break; 1887 ncurrent++; 1888 break; 1889 case TYPE_TAPE: 1890 case TYPE_MEDIUM_CHANGER: 1891 ncurrent++; 1892 break; 1893 case TYPE_RAID: 1894 /* Only present the Smartarray HBA as a RAID controller. 1895 * If it's a RAID controller other than the HBA itself 1896 * (an external RAID controller, MSA500 or similar) 1897 * don't present it. 1898 */ 1899 if (!is_hba_lunid(lunaddrbytes)) 1900 break; 1901 ncurrent++; 1902 break; 1903 default: 1904 break; 1905 } 1906 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA) 1907 break; 1908 } 1909 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); 1910 out: 1911 kfree(tmpdevice); 1912 for (i = 0; i < ndev_allocated; i++) 1913 kfree(currentsd[i]); 1914 kfree(currentsd); 1915 kfree(inq_buff); 1916 kfree(physdev_list); 1917 kfree(logdev_list); 1918 } 1919 1920 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 1921 * dma mapping and fills in the scatter gather entries of the 1922 * hpsa command, cp. 1923 */ 1924 static int hpsa_scatter_gather(struct ctlr_info *h, 1925 struct CommandList *cp, 1926 struct scsi_cmnd *cmd) 1927 { 1928 unsigned int len; 1929 struct scatterlist *sg; 1930 u64 addr64; 1931 int use_sg, i, sg_index, chained; 1932 struct SGDescriptor *curr_sg; 1933 1934 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 1935 1936 use_sg = scsi_dma_map(cmd); 1937 if (use_sg < 0) 1938 return use_sg; 1939 1940 if (!use_sg) 1941 goto sglist_finished; 1942 1943 curr_sg = cp->SG; 1944 chained = 0; 1945 sg_index = 0; 1946 scsi_for_each_sg(cmd, sg, use_sg, i) { 1947 if (i == h->max_cmd_sg_entries - 1 && 1948 use_sg > h->max_cmd_sg_entries) { 1949 chained = 1; 1950 curr_sg = h->cmd_sg_list[cp->cmdindex]; 1951 sg_index = 0; 1952 } 1953 addr64 = (u64) sg_dma_address(sg); 1954 len = sg_dma_len(sg); 1955 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); 1956 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); 1957 curr_sg->Len = len; 1958 curr_sg->Ext = 0; /* we are not chaining */ 1959 curr_sg++; 1960 } 1961 1962 if (use_sg + chained > h->maxSG) 1963 h->maxSG = use_sg + chained; 1964 1965 if (chained) { 1966 cp->Header.SGList = h->max_cmd_sg_entries; 1967 cp->Header.SGTotal = (u16) (use_sg + 1); 1968 hpsa_map_sg_chain_block(h, cp); 1969 return 0; 1970 } 1971 1972 sglist_finished: 1973 1974 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 1975 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ 1976 return 0; 1977 } 1978 1979 1980 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, 1981 void (*done)(struct scsi_cmnd *)) 1982 { 1983 struct ctlr_info *h; 1984 struct hpsa_scsi_dev_t *dev; 1985 unsigned char scsi3addr[8]; 1986 struct CommandList *c; 1987 unsigned long flags; 1988 1989 /* Get the ptr to our adapter structure out of cmd->host. */ 1990 h = sdev_to_hba(cmd->device); 1991 dev = cmd->device->hostdata; 1992 if (!dev) { 1993 cmd->result = DID_NO_CONNECT << 16; 1994 done(cmd); 1995 return 0; 1996 } 1997 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 1998 1999 /* Need a lock as this is being allocated from the pool */ 2000 spin_lock_irqsave(&h->lock, flags); 2001 c = cmd_alloc(h); 2002 spin_unlock_irqrestore(&h->lock, flags); 2003 if (c == NULL) { /* trouble... */ 2004 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 2005 return SCSI_MLQUEUE_HOST_BUSY; 2006 } 2007 2008 /* Fill in the command list header */ 2009 2010 cmd->scsi_done = done; /* save this for use by completion code */ 2011 2012 /* save c in case we have to abort it */ 2013 cmd->host_scribble = (unsigned char *) c; 2014 2015 c->cmd_type = CMD_SCSI; 2016 c->scsi_cmd = cmd; 2017 c->Header.ReplyQueue = 0; /* unused in simple mode */ 2018 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 2019 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); 2020 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; 2021 2022 /* Fill in the request block... */ 2023 2024 c->Request.Timeout = 0; 2025 memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 2026 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 2027 c->Request.CDBLen = cmd->cmd_len; 2028 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 2029 c->Request.Type.Type = TYPE_CMD; 2030 c->Request.Type.Attribute = ATTR_SIMPLE; 2031 switch (cmd->sc_data_direction) { 2032 case DMA_TO_DEVICE: 2033 c->Request.Type.Direction = XFER_WRITE; 2034 break; 2035 case DMA_FROM_DEVICE: 2036 c->Request.Type.Direction = XFER_READ; 2037 break; 2038 case DMA_NONE: 2039 c->Request.Type.Direction = XFER_NONE; 2040 break; 2041 case DMA_BIDIRECTIONAL: 2042 /* This can happen if a buggy application does a scsi passthru 2043 * and sets both inlen and outlen to non-zero. ( see 2044 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 2045 */ 2046 2047 c->Request.Type.Direction = XFER_RSVD; 2048 /* This is technically wrong, and hpsa controllers should 2049 * reject it with CMD_INVALID, which is the most correct 2050 * response, but non-fibre backends appear to let it 2051 * slide by, and give the same results as if this field 2052 * were set correctly. Either way is acceptable for 2053 * our purposes here. 2054 */ 2055 2056 break; 2057 2058 default: 2059 dev_err(&h->pdev->dev, "unknown data direction: %d\n", 2060 cmd->sc_data_direction); 2061 BUG(); 2062 break; 2063 } 2064 2065 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ 2066 cmd_free(h, c); 2067 return SCSI_MLQUEUE_HOST_BUSY; 2068 } 2069 enqueue_cmd_and_start_io(h, c); 2070 /* the cmd'll come back via intr handler in complete_scsi_command() */ 2071 return 0; 2072 } 2073 2074 static DEF_SCSI_QCMD(hpsa_scsi_queue_command) 2075 2076 static void hpsa_scan_start(struct Scsi_Host *sh) 2077 { 2078 struct ctlr_info *h = shost_to_hba(sh); 2079 unsigned long flags; 2080 2081 /* wait until any scan already in progress is finished. */ 2082 while (1) { 2083 spin_lock_irqsave(&h->scan_lock, flags); 2084 if (h->scan_finished) 2085 break; 2086 spin_unlock_irqrestore(&h->scan_lock, flags); 2087 wait_event(h->scan_wait_queue, h->scan_finished); 2088 /* Note: We don't need to worry about a race between this 2089 * thread and driver unload because the midlayer will 2090 * have incremented the reference count, so unload won't 2091 * happen if we're in here. 2092 */ 2093 } 2094 h->scan_finished = 0; /* mark scan as in progress */ 2095 spin_unlock_irqrestore(&h->scan_lock, flags); 2096 2097 hpsa_update_scsi_devices(h, h->scsi_host->host_no); 2098 2099 spin_lock_irqsave(&h->scan_lock, flags); 2100 h->scan_finished = 1; /* mark scan as finished. */ 2101 wake_up_all(&h->scan_wait_queue); 2102 spin_unlock_irqrestore(&h->scan_lock, flags); 2103 } 2104 2105 static int hpsa_scan_finished(struct Scsi_Host *sh, 2106 unsigned long elapsed_time) 2107 { 2108 struct ctlr_info *h = shost_to_hba(sh); 2109 unsigned long flags; 2110 int finished; 2111 2112 spin_lock_irqsave(&h->scan_lock, flags); 2113 finished = h->scan_finished; 2114 spin_unlock_irqrestore(&h->scan_lock, flags); 2115 return finished; 2116 } 2117 2118 static int hpsa_change_queue_depth(struct scsi_device *sdev, 2119 int qdepth, int reason) 2120 { 2121 struct ctlr_info *h = sdev_to_hba(sdev); 2122 2123 if (reason != SCSI_QDEPTH_DEFAULT) 2124 return -ENOTSUPP; 2125 2126 if (qdepth < 1) 2127 qdepth = 1; 2128 else 2129 if (qdepth > h->nr_cmds) 2130 qdepth = h->nr_cmds; 2131 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 2132 return sdev->queue_depth; 2133 } 2134 2135 static void hpsa_unregister_scsi(struct ctlr_info *h) 2136 { 2137 /* we are being forcibly unloaded, and may not refuse. */ 2138 scsi_remove_host(h->scsi_host); 2139 scsi_host_put(h->scsi_host); 2140 h->scsi_host = NULL; 2141 } 2142 2143 static int hpsa_register_scsi(struct ctlr_info *h) 2144 { 2145 int rc; 2146 2147 rc = hpsa_scsi_detect(h); 2148 if (rc != 0) 2149 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" 2150 " hpsa_scsi_detect(), rc is %d\n", rc); 2151 return rc; 2152 } 2153 2154 static int wait_for_device_to_become_ready(struct ctlr_info *h, 2155 unsigned char lunaddr[]) 2156 { 2157 int rc = 0; 2158 int count = 0; 2159 int waittime = 1; /* seconds */ 2160 struct CommandList *c; 2161 2162 c = cmd_special_alloc(h); 2163 if (!c) { 2164 dev_warn(&h->pdev->dev, "out of memory in " 2165 "wait_for_device_to_become_ready.\n"); 2166 return IO_ERROR; 2167 } 2168 2169 /* Send test unit ready until device ready, or give up. */ 2170 while (count < HPSA_TUR_RETRY_LIMIT) { 2171 2172 /* Wait for a bit. do this first, because if we send 2173 * the TUR right away, the reset will just abort it. 2174 */ 2175 msleep(1000 * waittime); 2176 count++; 2177 2178 /* Increase wait time with each try, up to a point. */ 2179 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) 2180 waittime = waittime * 2; 2181 2182 /* Send the Test Unit Ready */ 2183 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); 2184 hpsa_scsi_do_simple_cmd_core(h, c); 2185 /* no unmap needed here because no data xfer. */ 2186 2187 if (c->err_info->CommandStatus == CMD_SUCCESS) 2188 break; 2189 2190 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 2191 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && 2192 (c->err_info->SenseInfo[2] == NO_SENSE || 2193 c->err_info->SenseInfo[2] == UNIT_ATTENTION)) 2194 break; 2195 2196 dev_warn(&h->pdev->dev, "waiting %d secs " 2197 "for device to become ready.\n", waittime); 2198 rc = 1; /* device not ready. */ 2199 } 2200 2201 if (rc) 2202 dev_warn(&h->pdev->dev, "giving up on device.\n"); 2203 else 2204 dev_warn(&h->pdev->dev, "device is ready.\n"); 2205 2206 cmd_special_free(h, c); 2207 return rc; 2208 } 2209 2210 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 2211 * complaining. Doing a host- or bus-reset can't do anything good here. 2212 */ 2213 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 2214 { 2215 int rc; 2216 struct ctlr_info *h; 2217 struct hpsa_scsi_dev_t *dev; 2218 2219 /* find the controller to which the command to be aborted was sent */ 2220 h = sdev_to_hba(scsicmd->device); 2221 if (h == NULL) /* paranoia */ 2222 return FAILED; 2223 dev = scsicmd->device->hostdata; 2224 if (!dev) { 2225 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " 2226 "device lookup failed.\n"); 2227 return FAILED; 2228 } 2229 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", 2230 h->scsi_host->host_no, dev->bus, dev->target, dev->lun); 2231 /* send a reset to the SCSI LUN which the command was sent to */ 2232 rc = hpsa_send_reset(h, dev->scsi3addr); 2233 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) 2234 return SUCCESS; 2235 2236 dev_warn(&h->pdev->dev, "resetting device failed.\n"); 2237 return FAILED; 2238 } 2239 2240 /* 2241 * For operations that cannot sleep, a command block is allocated at init, 2242 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 2243 * which ones are free or in use. Lock must be held when calling this. 2244 * cmd_free() is the complement. 2245 */ 2246 static struct CommandList *cmd_alloc(struct ctlr_info *h) 2247 { 2248 struct CommandList *c; 2249 int i; 2250 union u64bit temp64; 2251 dma_addr_t cmd_dma_handle, err_dma_handle; 2252 2253 do { 2254 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 2255 if (i == h->nr_cmds) 2256 return NULL; 2257 } while (test_and_set_bit 2258 (i & (BITS_PER_LONG - 1), 2259 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 2260 c = h->cmd_pool + i; 2261 memset(c, 0, sizeof(*c)); 2262 cmd_dma_handle = h->cmd_pool_dhandle 2263 + i * sizeof(*c); 2264 c->err_info = h->errinfo_pool + i; 2265 memset(c->err_info, 0, sizeof(*c->err_info)); 2266 err_dma_handle = h->errinfo_pool_dhandle 2267 + i * sizeof(*c->err_info); 2268 h->nr_allocs++; 2269 2270 c->cmdindex = i; 2271 2272 INIT_LIST_HEAD(&c->list); 2273 c->busaddr = (u32) cmd_dma_handle; 2274 temp64.val = (u64) err_dma_handle; 2275 c->ErrDesc.Addr.lower = temp64.val32.lower; 2276 c->ErrDesc.Addr.upper = temp64.val32.upper; 2277 c->ErrDesc.Len = sizeof(*c->err_info); 2278 2279 c->h = h; 2280 return c; 2281 } 2282 2283 /* For operations that can wait for kmalloc to possibly sleep, 2284 * this routine can be called. Lock need not be held to call 2285 * cmd_special_alloc. cmd_special_free() is the complement. 2286 */ 2287 static struct CommandList *cmd_special_alloc(struct ctlr_info *h) 2288 { 2289 struct CommandList *c; 2290 union u64bit temp64; 2291 dma_addr_t cmd_dma_handle, err_dma_handle; 2292 2293 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); 2294 if (c == NULL) 2295 return NULL; 2296 memset(c, 0, sizeof(*c)); 2297 2298 c->cmdindex = -1; 2299 2300 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), 2301 &err_dma_handle); 2302 2303 if (c->err_info == NULL) { 2304 pci_free_consistent(h->pdev, 2305 sizeof(*c), c, cmd_dma_handle); 2306 return NULL; 2307 } 2308 memset(c->err_info, 0, sizeof(*c->err_info)); 2309 2310 INIT_LIST_HEAD(&c->list); 2311 c->busaddr = (u32) cmd_dma_handle; 2312 temp64.val = (u64) err_dma_handle; 2313 c->ErrDesc.Addr.lower = temp64.val32.lower; 2314 c->ErrDesc.Addr.upper = temp64.val32.upper; 2315 c->ErrDesc.Len = sizeof(*c->err_info); 2316 2317 c->h = h; 2318 return c; 2319 } 2320 2321 static void cmd_free(struct ctlr_info *h, struct CommandList *c) 2322 { 2323 int i; 2324 2325 i = c - h->cmd_pool; 2326 clear_bit(i & (BITS_PER_LONG - 1), 2327 h->cmd_pool_bits + (i / BITS_PER_LONG)); 2328 h->nr_frees++; 2329 } 2330 2331 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) 2332 { 2333 union u64bit temp64; 2334 2335 temp64.val32.lower = c->ErrDesc.Addr.lower; 2336 temp64.val32.upper = c->ErrDesc.Addr.upper; 2337 pci_free_consistent(h->pdev, sizeof(*c->err_info), 2338 c->err_info, (dma_addr_t) temp64.val); 2339 pci_free_consistent(h->pdev, sizeof(*c), 2340 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); 2341 } 2342 2343 #ifdef CONFIG_COMPAT 2344 2345 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) 2346 { 2347 IOCTL32_Command_struct __user *arg32 = 2348 (IOCTL32_Command_struct __user *) arg; 2349 IOCTL_Command_struct arg64; 2350 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 2351 int err; 2352 u32 cp; 2353 2354 memset(&arg64, 0, sizeof(arg64)); 2355 err = 0; 2356 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2357 sizeof(arg64.LUN_info)); 2358 err |= copy_from_user(&arg64.Request, &arg32->Request, 2359 sizeof(arg64.Request)); 2360 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 2361 sizeof(arg64.error_info)); 2362 err |= get_user(arg64.buf_size, &arg32->buf_size); 2363 err |= get_user(cp, &arg32->buf); 2364 arg64.buf = compat_ptr(cp); 2365 err |= copy_to_user(p, &arg64, sizeof(arg64)); 2366 2367 if (err) 2368 return -EFAULT; 2369 2370 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); 2371 if (err) 2372 return err; 2373 err |= copy_in_user(&arg32->error_info, &p->error_info, 2374 sizeof(arg32->error_info)); 2375 if (err) 2376 return -EFAULT; 2377 return err; 2378 } 2379 2380 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, 2381 int cmd, void *arg) 2382 { 2383 BIG_IOCTL32_Command_struct __user *arg32 = 2384 (BIG_IOCTL32_Command_struct __user *) arg; 2385 BIG_IOCTL_Command_struct arg64; 2386 BIG_IOCTL_Command_struct __user *p = 2387 compat_alloc_user_space(sizeof(arg64)); 2388 int err; 2389 u32 cp; 2390 2391 memset(&arg64, 0, sizeof(arg64)); 2392 err = 0; 2393 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2394 sizeof(arg64.LUN_info)); 2395 err |= copy_from_user(&arg64.Request, &arg32->Request, 2396 sizeof(arg64.Request)); 2397 err |= copy_from_user(&arg64.error_info, &arg32->error_info, 2398 sizeof(arg64.error_info)); 2399 err |= get_user(arg64.buf_size, &arg32->buf_size); 2400 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 2401 err |= get_user(cp, &arg32->buf); 2402 arg64.buf = compat_ptr(cp); 2403 err |= copy_to_user(p, &arg64, sizeof(arg64)); 2404 2405 if (err) 2406 return -EFAULT; 2407 2408 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); 2409 if (err) 2410 return err; 2411 err |= copy_in_user(&arg32->error_info, &p->error_info, 2412 sizeof(arg32->error_info)); 2413 if (err) 2414 return -EFAULT; 2415 return err; 2416 } 2417 2418 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) 2419 { 2420 switch (cmd) { 2421 case CCISS_GETPCIINFO: 2422 case CCISS_GETINTINFO: 2423 case CCISS_SETINTINFO: 2424 case CCISS_GETNODENAME: 2425 case CCISS_SETNODENAME: 2426 case CCISS_GETHEARTBEAT: 2427 case CCISS_GETBUSTYPES: 2428 case CCISS_GETFIRMVER: 2429 case CCISS_GETDRIVVER: 2430 case CCISS_REVALIDVOLS: 2431 case CCISS_DEREGDISK: 2432 case CCISS_REGNEWDISK: 2433 case CCISS_REGNEWD: 2434 case CCISS_RESCANDISK: 2435 case CCISS_GETLUNINFO: 2436 return hpsa_ioctl(dev, cmd, arg); 2437 2438 case CCISS_PASSTHRU32: 2439 return hpsa_ioctl32_passthru(dev, cmd, arg); 2440 case CCISS_BIG_PASSTHRU32: 2441 return hpsa_ioctl32_big_passthru(dev, cmd, arg); 2442 2443 default: 2444 return -ENOIOCTLCMD; 2445 } 2446 } 2447 #endif 2448 2449 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) 2450 { 2451 struct hpsa_pci_info pciinfo; 2452 2453 if (!argp) 2454 return -EINVAL; 2455 pciinfo.domain = pci_domain_nr(h->pdev->bus); 2456 pciinfo.bus = h->pdev->bus->number; 2457 pciinfo.dev_fn = h->pdev->devfn; 2458 pciinfo.board_id = h->board_id; 2459 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) 2460 return -EFAULT; 2461 return 0; 2462 } 2463 2464 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) 2465 { 2466 DriverVer_type DriverVer; 2467 unsigned char vmaj, vmin, vsubmin; 2468 int rc; 2469 2470 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", 2471 &vmaj, &vmin, &vsubmin); 2472 if (rc != 3) { 2473 dev_info(&h->pdev->dev, "driver version string '%s' " 2474 "unrecognized.", HPSA_DRIVER_VERSION); 2475 vmaj = 0; 2476 vmin = 0; 2477 vsubmin = 0; 2478 } 2479 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; 2480 if (!argp) 2481 return -EINVAL; 2482 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 2483 return -EFAULT; 2484 return 0; 2485 } 2486 2487 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) 2488 { 2489 IOCTL_Command_struct iocommand; 2490 struct CommandList *c; 2491 char *buff = NULL; 2492 union u64bit temp64; 2493 2494 if (!argp) 2495 return -EINVAL; 2496 if (!capable(CAP_SYS_RAWIO)) 2497 return -EPERM; 2498 if (copy_from_user(&iocommand, argp, sizeof(iocommand))) 2499 return -EFAULT; 2500 if ((iocommand.buf_size < 1) && 2501 (iocommand.Request.Type.Direction != XFER_NONE)) { 2502 return -EINVAL; 2503 } 2504 if (iocommand.buf_size > 0) { 2505 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 2506 if (buff == NULL) 2507 return -EFAULT; 2508 if (iocommand.Request.Type.Direction == XFER_WRITE) { 2509 /* Copy the data into the buffer we created */ 2510 if (copy_from_user(buff, iocommand.buf, 2511 iocommand.buf_size)) { 2512 kfree(buff); 2513 return -EFAULT; 2514 } 2515 } else { 2516 memset(buff, 0, iocommand.buf_size); 2517 } 2518 } 2519 c = cmd_special_alloc(h); 2520 if (c == NULL) { 2521 kfree(buff); 2522 return -ENOMEM; 2523 } 2524 /* Fill in the command type */ 2525 c->cmd_type = CMD_IOCTL_PEND; 2526 /* Fill in Command Header */ 2527 c->Header.ReplyQueue = 0; /* unused in simple mode */ 2528 if (iocommand.buf_size > 0) { /* buffer to fill */ 2529 c->Header.SGList = 1; 2530 c->Header.SGTotal = 1; 2531 } else { /* no buffers to fill */ 2532 c->Header.SGList = 0; 2533 c->Header.SGTotal = 0; 2534 } 2535 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); 2536 /* use the kernel address the cmd block for tag */ 2537 c->Header.Tag.lower = c->busaddr; 2538 2539 /* Fill in Request block */ 2540 memcpy(&c->Request, &iocommand.Request, 2541 sizeof(c->Request)); 2542 2543 /* Fill in the scatter gather information */ 2544 if (iocommand.buf_size > 0) { 2545 temp64.val = pci_map_single(h->pdev, buff, 2546 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 2547 c->SG[0].Addr.lower = temp64.val32.lower; 2548 c->SG[0].Addr.upper = temp64.val32.upper; 2549 c->SG[0].Len = iocommand.buf_size; 2550 c->SG[0].Ext = 0; /* we are not chaining*/ 2551 } 2552 hpsa_scsi_do_simple_cmd_core(h, c); 2553 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 2554 check_ioctl_unit_attention(h, c); 2555 2556 /* Copy the error information out */ 2557 memcpy(&iocommand.error_info, c->err_info, 2558 sizeof(iocommand.error_info)); 2559 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { 2560 kfree(buff); 2561 cmd_special_free(h, c); 2562 return -EFAULT; 2563 } 2564 if (iocommand.Request.Type.Direction == XFER_READ && 2565 iocommand.buf_size > 0) { 2566 /* Copy the data out of the buffer we created */ 2567 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 2568 kfree(buff); 2569 cmd_special_free(h, c); 2570 return -EFAULT; 2571 } 2572 } 2573 kfree(buff); 2574 cmd_special_free(h, c); 2575 return 0; 2576 } 2577 2578 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) 2579 { 2580 BIG_IOCTL_Command_struct *ioc; 2581 struct CommandList *c; 2582 unsigned char **buff = NULL; 2583 int *buff_size = NULL; 2584 union u64bit temp64; 2585 BYTE sg_used = 0; 2586 int status = 0; 2587 int i; 2588 u32 left; 2589 u32 sz; 2590 BYTE __user *data_ptr; 2591 2592 if (!argp) 2593 return -EINVAL; 2594 if (!capable(CAP_SYS_RAWIO)) 2595 return -EPERM; 2596 ioc = (BIG_IOCTL_Command_struct *) 2597 kmalloc(sizeof(*ioc), GFP_KERNEL); 2598 if (!ioc) { 2599 status = -ENOMEM; 2600 goto cleanup1; 2601 } 2602 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 2603 status = -EFAULT; 2604 goto cleanup1; 2605 } 2606 if ((ioc->buf_size < 1) && 2607 (ioc->Request.Type.Direction != XFER_NONE)) { 2608 status = -EINVAL; 2609 goto cleanup1; 2610 } 2611 /* Check kmalloc limits using all SGs */ 2612 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 2613 status = -EINVAL; 2614 goto cleanup1; 2615 } 2616 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { 2617 status = -EINVAL; 2618 goto cleanup1; 2619 } 2620 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); 2621 if (!buff) { 2622 status = -ENOMEM; 2623 goto cleanup1; 2624 } 2625 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); 2626 if (!buff_size) { 2627 status = -ENOMEM; 2628 goto cleanup1; 2629 } 2630 left = ioc->buf_size; 2631 data_ptr = ioc->buf; 2632 while (left) { 2633 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 2634 buff_size[sg_used] = sz; 2635 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 2636 if (buff[sg_used] == NULL) { 2637 status = -ENOMEM; 2638 goto cleanup1; 2639 } 2640 if (ioc->Request.Type.Direction == XFER_WRITE) { 2641 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 2642 status = -ENOMEM; 2643 goto cleanup1; 2644 } 2645 } else 2646 memset(buff[sg_used], 0, sz); 2647 left -= sz; 2648 data_ptr += sz; 2649 sg_used++; 2650 } 2651 c = cmd_special_alloc(h); 2652 if (c == NULL) { 2653 status = -ENOMEM; 2654 goto cleanup1; 2655 } 2656 c->cmd_type = CMD_IOCTL_PEND; 2657 c->Header.ReplyQueue = 0; 2658 c->Header.SGList = c->Header.SGTotal = sg_used; 2659 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 2660 c->Header.Tag.lower = c->busaddr; 2661 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 2662 if (ioc->buf_size > 0) { 2663 int i; 2664 for (i = 0; i < sg_used; i++) { 2665 temp64.val = pci_map_single(h->pdev, buff[i], 2666 buff_size[i], PCI_DMA_BIDIRECTIONAL); 2667 c->SG[i].Addr.lower = temp64.val32.lower; 2668 c->SG[i].Addr.upper = temp64.val32.upper; 2669 c->SG[i].Len = buff_size[i]; 2670 /* we are not chaining */ 2671 c->SG[i].Ext = 0; 2672 } 2673 } 2674 hpsa_scsi_do_simple_cmd_core(h, c); 2675 if (sg_used) 2676 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 2677 check_ioctl_unit_attention(h, c); 2678 /* Copy the error information out */ 2679 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 2680 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 2681 cmd_special_free(h, c); 2682 status = -EFAULT; 2683 goto cleanup1; 2684 } 2685 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { 2686 /* Copy the data out of the buffer we created */ 2687 BYTE __user *ptr = ioc->buf; 2688 for (i = 0; i < sg_used; i++) { 2689 if (copy_to_user(ptr, buff[i], buff_size[i])) { 2690 cmd_special_free(h, c); 2691 status = -EFAULT; 2692 goto cleanup1; 2693 } 2694 ptr += buff_size[i]; 2695 } 2696 } 2697 cmd_special_free(h, c); 2698 status = 0; 2699 cleanup1: 2700 if (buff) { 2701 for (i = 0; i < sg_used; i++) 2702 kfree(buff[i]); 2703 kfree(buff); 2704 } 2705 kfree(buff_size); 2706 kfree(ioc); 2707 return status; 2708 } 2709 2710 static void check_ioctl_unit_attention(struct ctlr_info *h, 2711 struct CommandList *c) 2712 { 2713 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 2714 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 2715 (void) check_for_unit_attention(h, c); 2716 } 2717 /* 2718 * ioctl 2719 */ 2720 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) 2721 { 2722 struct ctlr_info *h; 2723 void __user *argp = (void __user *)arg; 2724 2725 h = sdev_to_hba(dev); 2726 2727 switch (cmd) { 2728 case CCISS_DEREGDISK: 2729 case CCISS_REGNEWDISK: 2730 case CCISS_REGNEWD: 2731 hpsa_scan_start(h->scsi_host); 2732 return 0; 2733 case CCISS_GETPCIINFO: 2734 return hpsa_getpciinfo_ioctl(h, argp); 2735 case CCISS_GETDRIVVER: 2736 return hpsa_getdrivver_ioctl(h, argp); 2737 case CCISS_PASSTHRU: 2738 return hpsa_passthru_ioctl(h, argp); 2739 case CCISS_BIG_PASSTHRU: 2740 return hpsa_big_passthru_ioctl(h, argp); 2741 default: 2742 return -ENOTTY; 2743 } 2744 } 2745 2746 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 2747 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 2748 int cmd_type) 2749 { 2750 int pci_dir = XFER_NONE; 2751 2752 c->cmd_type = CMD_IOCTL_PEND; 2753 c->Header.ReplyQueue = 0; 2754 if (buff != NULL && size > 0) { 2755 c->Header.SGList = 1; 2756 c->Header.SGTotal = 1; 2757 } else { 2758 c->Header.SGList = 0; 2759 c->Header.SGTotal = 0; 2760 } 2761 c->Header.Tag.lower = c->busaddr; 2762 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 2763 2764 c->Request.Type.Type = cmd_type; 2765 if (cmd_type == TYPE_CMD) { 2766 switch (cmd) { 2767 case HPSA_INQUIRY: 2768 /* are we trying to read a vital product page */ 2769 if (page_code != 0) { 2770 c->Request.CDB[1] = 0x01; 2771 c->Request.CDB[2] = page_code; 2772 } 2773 c->Request.CDBLen = 6; 2774 c->Request.Type.Attribute = ATTR_SIMPLE; 2775 c->Request.Type.Direction = XFER_READ; 2776 c->Request.Timeout = 0; 2777 c->Request.CDB[0] = HPSA_INQUIRY; 2778 c->Request.CDB[4] = size & 0xFF; 2779 break; 2780 case HPSA_REPORT_LOG: 2781 case HPSA_REPORT_PHYS: 2782 /* Talking to controller so It's a physical command 2783 mode = 00 target = 0. Nothing to write. 2784 */ 2785 c->Request.CDBLen = 12; 2786 c->Request.Type.Attribute = ATTR_SIMPLE; 2787 c->Request.Type.Direction = XFER_READ; 2788 c->Request.Timeout = 0; 2789 c->Request.CDB[0] = cmd; 2790 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 2791 c->Request.CDB[7] = (size >> 16) & 0xFF; 2792 c->Request.CDB[8] = (size >> 8) & 0xFF; 2793 c->Request.CDB[9] = size & 0xFF; 2794 break; 2795 case HPSA_CACHE_FLUSH: 2796 c->Request.CDBLen = 12; 2797 c->Request.Type.Attribute = ATTR_SIMPLE; 2798 c->Request.Type.Direction = XFER_WRITE; 2799 c->Request.Timeout = 0; 2800 c->Request.CDB[0] = BMIC_WRITE; 2801 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2802 break; 2803 case TEST_UNIT_READY: 2804 c->Request.CDBLen = 6; 2805 c->Request.Type.Attribute = ATTR_SIMPLE; 2806 c->Request.Type.Direction = XFER_NONE; 2807 c->Request.Timeout = 0; 2808 break; 2809 default: 2810 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); 2811 BUG(); 2812 return; 2813 } 2814 } else if (cmd_type == TYPE_MSG) { 2815 switch (cmd) { 2816 2817 case HPSA_DEVICE_RESET_MSG: 2818 c->Request.CDBLen = 16; 2819 c->Request.Type.Type = 1; /* It is a MSG not a CMD */ 2820 c->Request.Type.Attribute = ATTR_SIMPLE; 2821 c->Request.Type.Direction = XFER_NONE; 2822 c->Request.Timeout = 0; /* Don't time out */ 2823 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */ 2824 c->Request.CDB[1] = 0x03; /* Reset target above */ 2825 /* If bytes 4-7 are zero, it means reset the */ 2826 /* LunID device */ 2827 c->Request.CDB[4] = 0x00; 2828 c->Request.CDB[5] = 0x00; 2829 c->Request.CDB[6] = 0x00; 2830 c->Request.CDB[7] = 0x00; 2831 break; 2832 2833 default: 2834 dev_warn(&h->pdev->dev, "unknown message type %d\n", 2835 cmd); 2836 BUG(); 2837 } 2838 } else { 2839 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 2840 BUG(); 2841 } 2842 2843 switch (c->Request.Type.Direction) { 2844 case XFER_READ: 2845 pci_dir = PCI_DMA_FROMDEVICE; 2846 break; 2847 case XFER_WRITE: 2848 pci_dir = PCI_DMA_TODEVICE; 2849 break; 2850 case XFER_NONE: 2851 pci_dir = PCI_DMA_NONE; 2852 break; 2853 default: 2854 pci_dir = PCI_DMA_BIDIRECTIONAL; 2855 } 2856 2857 hpsa_map_one(h->pdev, c, buff, size, pci_dir); 2858 2859 return; 2860 } 2861 2862 /* 2863 * Map (physical) PCI mem into (virtual) kernel space 2864 */ 2865 static void __iomem *remap_pci_mem(ulong base, ulong size) 2866 { 2867 ulong page_base = ((ulong) base) & PAGE_MASK; 2868 ulong page_offs = ((ulong) base) - page_base; 2869 void __iomem *page_remapped = ioremap(page_base, page_offs + size); 2870 2871 return page_remapped ? (page_remapped + page_offs) : NULL; 2872 } 2873 2874 /* Takes cmds off the submission queue and sends them to the hardware, 2875 * then puts them on the queue of cmds waiting for completion. 2876 */ 2877 static void start_io(struct ctlr_info *h) 2878 { 2879 struct CommandList *c; 2880 2881 while (!list_empty(&h->reqQ)) { 2882 c = list_entry(h->reqQ.next, struct CommandList, list); 2883 /* can't do anything if fifo is full */ 2884 if ((h->access.fifo_full(h))) { 2885 dev_warn(&h->pdev->dev, "fifo full\n"); 2886 break; 2887 } 2888 2889 /* Get the first entry from the Request Q */ 2890 removeQ(c); 2891 h->Qdepth--; 2892 2893 /* Tell the controller execute command */ 2894 h->access.submit_command(h, c); 2895 2896 /* Put job onto the completed Q */ 2897 addQ(&h->cmpQ, c); 2898 } 2899 } 2900 2901 static inline unsigned long get_next_completion(struct ctlr_info *h) 2902 { 2903 return h->access.command_completed(h); 2904 } 2905 2906 static inline bool interrupt_pending(struct ctlr_info *h) 2907 { 2908 return h->access.intr_pending(h); 2909 } 2910 2911 static inline long interrupt_not_for_us(struct ctlr_info *h) 2912 { 2913 return (h->access.intr_pending(h) == 0) || 2914 (h->interrupts_enabled == 0); 2915 } 2916 2917 static inline int bad_tag(struct ctlr_info *h, u32 tag_index, 2918 u32 raw_tag) 2919 { 2920 if (unlikely(tag_index >= h->nr_cmds)) { 2921 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 2922 return 1; 2923 } 2924 return 0; 2925 } 2926 2927 static inline void finish_cmd(struct CommandList *c, u32 raw_tag) 2928 { 2929 removeQ(c); 2930 if (likely(c->cmd_type == CMD_SCSI)) 2931 complete_scsi_command(c); 2932 else if (c->cmd_type == CMD_IOCTL_PEND) 2933 complete(c->waiting); 2934 } 2935 2936 static inline u32 hpsa_tag_contains_index(u32 tag) 2937 { 2938 return tag & DIRECT_LOOKUP_BIT; 2939 } 2940 2941 static inline u32 hpsa_tag_to_index(u32 tag) 2942 { 2943 return tag >> DIRECT_LOOKUP_SHIFT; 2944 } 2945 2946 2947 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) 2948 { 2949 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 2950 #define HPSA_SIMPLE_ERROR_BITS 0x03 2951 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 2952 return tag & ~HPSA_SIMPLE_ERROR_BITS; 2953 return tag & ~HPSA_PERF_ERROR_BITS; 2954 } 2955 2956 /* process completion of an indexed ("direct lookup") command */ 2957 static inline u32 process_indexed_cmd(struct ctlr_info *h, 2958 u32 raw_tag) 2959 { 2960 u32 tag_index; 2961 struct CommandList *c; 2962 2963 tag_index = hpsa_tag_to_index(raw_tag); 2964 if (bad_tag(h, tag_index, raw_tag)) 2965 return next_command(h); 2966 c = h->cmd_pool + tag_index; 2967 finish_cmd(c, raw_tag); 2968 return next_command(h); 2969 } 2970 2971 /* process completion of a non-indexed command */ 2972 static inline u32 process_nonindexed_cmd(struct ctlr_info *h, 2973 u32 raw_tag) 2974 { 2975 u32 tag; 2976 struct CommandList *c = NULL; 2977 2978 tag = hpsa_tag_discard_error_bits(h, raw_tag); 2979 list_for_each_entry(c, &h->cmpQ, list) { 2980 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 2981 finish_cmd(c, raw_tag); 2982 return next_command(h); 2983 } 2984 } 2985 bad_tag(h, h->nr_cmds + 1, raw_tag); 2986 return next_command(h); 2987 } 2988 2989 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) 2990 { 2991 struct ctlr_info *h = dev_id; 2992 unsigned long flags; 2993 u32 raw_tag; 2994 2995 if (interrupt_not_for_us(h)) 2996 return IRQ_NONE; 2997 spin_lock_irqsave(&h->lock, flags); 2998 while (interrupt_pending(h)) { 2999 raw_tag = get_next_completion(h); 3000 while (raw_tag != FIFO_EMPTY) { 3001 if (hpsa_tag_contains_index(raw_tag)) 3002 raw_tag = process_indexed_cmd(h, raw_tag); 3003 else 3004 raw_tag = process_nonindexed_cmd(h, raw_tag); 3005 } 3006 } 3007 spin_unlock_irqrestore(&h->lock, flags); 3008 return IRQ_HANDLED; 3009 } 3010 3011 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) 3012 { 3013 struct ctlr_info *h = dev_id; 3014 unsigned long flags; 3015 u32 raw_tag; 3016 3017 spin_lock_irqsave(&h->lock, flags); 3018 raw_tag = get_next_completion(h); 3019 while (raw_tag != FIFO_EMPTY) { 3020 if (hpsa_tag_contains_index(raw_tag)) 3021 raw_tag = process_indexed_cmd(h, raw_tag); 3022 else 3023 raw_tag = process_nonindexed_cmd(h, raw_tag); 3024 } 3025 spin_unlock_irqrestore(&h->lock, flags); 3026 return IRQ_HANDLED; 3027 } 3028 3029 /* Send a message CDB to the firmware. Careful, this only works 3030 * in simple mode, not performant mode due to the tag lookup. 3031 * We only ever use this immediately after a controller reset. 3032 */ 3033 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 3034 unsigned char type) 3035 { 3036 struct Command { 3037 struct CommandListHeader CommandHeader; 3038 struct RequestBlock Request; 3039 struct ErrDescriptor ErrorDescriptor; 3040 }; 3041 struct Command *cmd; 3042 static const size_t cmd_sz = sizeof(*cmd) + 3043 sizeof(cmd->ErrorDescriptor); 3044 dma_addr_t paddr64; 3045 uint32_t paddr32, tag; 3046 void __iomem *vaddr; 3047 int i, err; 3048 3049 vaddr = pci_ioremap_bar(pdev, 0); 3050 if (vaddr == NULL) 3051 return -ENOMEM; 3052 3053 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 3054 * CCISS commands, so they must be allocated from the lower 4GiB of 3055 * memory. 3056 */ 3057 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3058 if (err) { 3059 iounmap(vaddr); 3060 return -ENOMEM; 3061 } 3062 3063 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 3064 if (cmd == NULL) { 3065 iounmap(vaddr); 3066 return -ENOMEM; 3067 } 3068 3069 /* This must fit, because of the 32-bit consistent DMA mask. Also, 3070 * although there's no guarantee, we assume that the address is at 3071 * least 4-byte aligned (most likely, it's page-aligned). 3072 */ 3073 paddr32 = paddr64; 3074 3075 cmd->CommandHeader.ReplyQueue = 0; 3076 cmd->CommandHeader.SGList = 0; 3077 cmd->CommandHeader.SGTotal = 0; 3078 cmd->CommandHeader.Tag.lower = paddr32; 3079 cmd->CommandHeader.Tag.upper = 0; 3080 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 3081 3082 cmd->Request.CDBLen = 16; 3083 cmd->Request.Type.Type = TYPE_MSG; 3084 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 3085 cmd->Request.Type.Direction = XFER_NONE; 3086 cmd->Request.Timeout = 0; /* Don't time out */ 3087 cmd->Request.CDB[0] = opcode; 3088 cmd->Request.CDB[1] = type; 3089 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ 3090 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); 3091 cmd->ErrorDescriptor.Addr.upper = 0; 3092 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); 3093 3094 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 3095 3096 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 3097 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 3098 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) 3099 break; 3100 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 3101 } 3102 3103 iounmap(vaddr); 3104 3105 /* we leak the DMA buffer here ... no choice since the controller could 3106 * still complete the command. 3107 */ 3108 if (i == HPSA_MSG_SEND_RETRY_LIMIT) { 3109 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", 3110 opcode, type); 3111 return -ETIMEDOUT; 3112 } 3113 3114 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 3115 3116 if (tag & HPSA_ERROR_BIT) { 3117 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 3118 opcode, type); 3119 return -EIO; 3120 } 3121 3122 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 3123 opcode, type); 3124 return 0; 3125 } 3126 3127 #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0) 3128 #define hpsa_noop(p) hpsa_message(p, 3, 0) 3129 3130 static int hpsa_controller_hard_reset(struct pci_dev *pdev, 3131 void * __iomem vaddr, bool use_doorbell) 3132 { 3133 u16 pmcsr; 3134 int pos; 3135 3136 if (use_doorbell) { 3137 /* For everything after the P600, the PCI power state method 3138 * of resetting the controller doesn't work, so we have this 3139 * other way using the doorbell register. 3140 */ 3141 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 3142 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); 3143 msleep(1000); 3144 } else { /* Try to do it the PCI power state way */ 3145 3146 /* Quoting from the Open CISS Specification: "The Power 3147 * Management Control/Status Register (CSR) controls the power 3148 * state of the device. The normal operating state is D0, 3149 * CSR=00h. The software off state is D3, CSR=03h. To reset 3150 * the controller, place the interface device in D3 then to D0, 3151 * this causes a secondary PCI reset which will reset the 3152 * controller." */ 3153 3154 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 3155 if (pos == 0) { 3156 dev_err(&pdev->dev, 3157 "hpsa_reset_controller: " 3158 "PCI PM not supported\n"); 3159 return -ENODEV; 3160 } 3161 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 3162 /* enter the D3hot power management state */ 3163 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 3164 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3165 pmcsr |= PCI_D3hot; 3166 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3167 3168 msleep(500); 3169 3170 /* enter the D0 power management state */ 3171 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3172 pmcsr |= PCI_D0; 3173 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3174 3175 msleep(500); 3176 } 3177 return 0; 3178 } 3179 3180 static __devinit void init_driver_version(char *driver_version, int len) 3181 { 3182 memset(driver_version, 0, len); 3183 strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1); 3184 } 3185 3186 static __devinit int write_driver_ver_to_cfgtable( 3187 struct CfgTable __iomem *cfgtable) 3188 { 3189 char *driver_version; 3190 int i, size = sizeof(cfgtable->driver_version); 3191 3192 driver_version = kmalloc(size, GFP_KERNEL); 3193 if (!driver_version) 3194 return -ENOMEM; 3195 3196 init_driver_version(driver_version, size); 3197 for (i = 0; i < size; i++) 3198 writeb(driver_version[i], &cfgtable->driver_version[i]); 3199 kfree(driver_version); 3200 return 0; 3201 } 3202 3203 static __devinit void read_driver_ver_from_cfgtable( 3204 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver) 3205 { 3206 int i; 3207 3208 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 3209 driver_ver[i] = readb(&cfgtable->driver_version[i]); 3210 } 3211 3212 static __devinit int controller_reset_failed( 3213 struct CfgTable __iomem *cfgtable) 3214 { 3215 3216 char *driver_ver, *old_driver_ver; 3217 int rc, size = sizeof(cfgtable->driver_version); 3218 3219 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 3220 if (!old_driver_ver) 3221 return -ENOMEM; 3222 driver_ver = old_driver_ver + size; 3223 3224 /* After a reset, the 32 bytes of "driver version" in the cfgtable 3225 * should have been changed, otherwise we know the reset failed. 3226 */ 3227 init_driver_version(old_driver_ver, size); 3228 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 3229 rc = !memcmp(driver_ver, old_driver_ver, size); 3230 kfree(old_driver_ver); 3231 return rc; 3232 } 3233 /* This does a hard reset of the controller using PCI power management 3234 * states or the using the doorbell register. 3235 */ 3236 static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 3237 { 3238 u64 cfg_offset; 3239 u32 cfg_base_addr; 3240 u64 cfg_base_addr_index; 3241 void __iomem *vaddr; 3242 unsigned long paddr; 3243 u32 misc_fw_support; 3244 int rc; 3245 struct CfgTable __iomem *cfgtable; 3246 bool use_doorbell; 3247 u32 board_id; 3248 u16 command_register; 3249 3250 /* For controllers as old as the P600, this is very nearly 3251 * the same thing as 3252 * 3253 * pci_save_state(pci_dev); 3254 * pci_set_power_state(pci_dev, PCI_D3hot); 3255 * pci_set_power_state(pci_dev, PCI_D0); 3256 * pci_restore_state(pci_dev); 3257 * 3258 * For controllers newer than the P600, the pci power state 3259 * method of resetting doesn't work so we have another way 3260 * using the doorbell register. 3261 */ 3262 3263 /* Exclude 640x boards. These are two pci devices in one slot 3264 * which share a battery backed cache module. One controls the 3265 * cache, the other accesses the cache through the one that controls 3266 * it. If we reset the one controlling the cache, the other will 3267 * likely not be happy. Just forbid resetting this conjoined mess. 3268 * The 640x isn't really supported by hpsa anyway. 3269 */ 3270 rc = hpsa_lookup_board_id(pdev, &board_id); 3271 if (rc < 0) { 3272 dev_warn(&pdev->dev, "Not resetting device.\n"); 3273 return -ENODEV; 3274 } 3275 if (board_id == 0x409C0E11 || board_id == 0x409D0E11) 3276 return -ENOTSUPP; 3277 3278 /* Save the PCI command register */ 3279 pci_read_config_word(pdev, 4, &command_register); 3280 /* Turn the board off. This is so that later pci_restore_state() 3281 * won't turn the board on before the rest of config space is ready. 3282 */ 3283 pci_disable_device(pdev); 3284 pci_save_state(pdev); 3285 3286 /* find the first memory BAR, so we can find the cfg table */ 3287 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 3288 if (rc) 3289 return rc; 3290 vaddr = remap_pci_mem(paddr, 0x250); 3291 if (!vaddr) 3292 return -ENOMEM; 3293 3294 /* find cfgtable in order to check if reset via doorbell is supported */ 3295 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 3296 &cfg_base_addr_index, &cfg_offset); 3297 if (rc) 3298 goto unmap_vaddr; 3299 cfgtable = remap_pci_mem(pci_resource_start(pdev, 3300 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 3301 if (!cfgtable) { 3302 rc = -ENOMEM; 3303 goto unmap_vaddr; 3304 } 3305 rc = write_driver_ver_to_cfgtable(cfgtable); 3306 if (rc) 3307 goto unmap_vaddr; 3308 3309 /* If reset via doorbell register is supported, use that. */ 3310 misc_fw_support = readl(&cfgtable->misc_fw_support); 3311 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3312 3313 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3314 if (rc) 3315 goto unmap_cfgtable; 3316 3317 pci_restore_state(pdev); 3318 rc = pci_enable_device(pdev); 3319 if (rc) { 3320 dev_warn(&pdev->dev, "failed to enable device.\n"); 3321 goto unmap_cfgtable; 3322 } 3323 pci_write_config_word(pdev, 4, command_register); 3324 3325 /* Some devices (notably the HP Smart Array 5i Controller) 3326 need a little pause here */ 3327 msleep(HPSA_POST_RESET_PAUSE_MSECS); 3328 3329 /* Wait for board to become not ready, then ready. */ 3330 dev_info(&pdev->dev, "Waiting for board to reset.\n"); 3331 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 3332 if (rc) 3333 dev_warn(&pdev->dev, 3334 "failed waiting for board to reset\n"); 3335 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); 3336 if (rc) { 3337 dev_warn(&pdev->dev, 3338 "failed waiting for board to become ready\n"); 3339 goto unmap_cfgtable; 3340 } 3341 3342 rc = controller_reset_failed(vaddr); 3343 if (rc < 0) 3344 goto unmap_cfgtable; 3345 if (rc) { 3346 dev_warn(&pdev->dev, "Unable to successfully reset controller," 3347 " Ignoring controller.\n"); 3348 rc = -ENODEV; 3349 } else { 3350 dev_info(&pdev->dev, "board ready.\n"); 3351 } 3352 3353 unmap_cfgtable: 3354 iounmap(cfgtable); 3355 3356 unmap_vaddr: 3357 iounmap(vaddr); 3358 return rc; 3359 } 3360 3361 /* 3362 * We cannot read the structure directly, for portability we must use 3363 * the io functions. 3364 * This is for debug only. 3365 */ 3366 static void print_cfg_table(struct device *dev, struct CfgTable *tb) 3367 { 3368 #ifdef HPSA_DEBUG 3369 int i; 3370 char temp_name[17]; 3371 3372 dev_info(dev, "Controller Configuration information\n"); 3373 dev_info(dev, "------------------------------------\n"); 3374 for (i = 0; i < 4; i++) 3375 temp_name[i] = readb(&(tb->Signature[i])); 3376 temp_name[4] = '\0'; 3377 dev_info(dev, " Signature = %s\n", temp_name); 3378 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); 3379 dev_info(dev, " Transport methods supported = 0x%x\n", 3380 readl(&(tb->TransportSupport))); 3381 dev_info(dev, " Transport methods active = 0x%x\n", 3382 readl(&(tb->TransportActive))); 3383 dev_info(dev, " Requested transport Method = 0x%x\n", 3384 readl(&(tb->HostWrite.TransportRequest))); 3385 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", 3386 readl(&(tb->HostWrite.CoalIntDelay))); 3387 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", 3388 readl(&(tb->HostWrite.CoalIntCount))); 3389 dev_info(dev, " Max outstanding commands = 0x%d\n", 3390 readl(&(tb->CmdsOutMax))); 3391 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 3392 for (i = 0; i < 16; i++) 3393 temp_name[i] = readb(&(tb->ServerName[i])); 3394 temp_name[16] = '\0'; 3395 dev_info(dev, " Server Name = %s\n", temp_name); 3396 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", 3397 readl(&(tb->HeartBeat))); 3398 #endif /* HPSA_DEBUG */ 3399 } 3400 3401 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 3402 { 3403 int i, offset, mem_type, bar_type; 3404 3405 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 3406 return 0; 3407 offset = 0; 3408 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3409 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 3410 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 3411 offset += 4; 3412 else { 3413 mem_type = pci_resource_flags(pdev, i) & 3414 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 3415 switch (mem_type) { 3416 case PCI_BASE_ADDRESS_MEM_TYPE_32: 3417 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 3418 offset += 4; /* 32 bit */ 3419 break; 3420 case PCI_BASE_ADDRESS_MEM_TYPE_64: 3421 offset += 8; 3422 break; 3423 default: /* reserved in PCI 2.2 */ 3424 dev_warn(&pdev->dev, 3425 "base address is invalid\n"); 3426 return -1; 3427 break; 3428 } 3429 } 3430 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 3431 return i + 1; 3432 } 3433 return -1; 3434 } 3435 3436 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 3437 * controllers that are capable. If not, we use IO-APIC mode. 3438 */ 3439 3440 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) 3441 { 3442 #ifdef CONFIG_PCI_MSI 3443 int err; 3444 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1}, 3445 {0, 2}, {0, 3} 3446 }; 3447 3448 /* Some boards advertise MSI but don't really support it */ 3449 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 3450 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 3451 goto default_int_mode; 3452 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 3453 dev_info(&h->pdev->dev, "MSIX\n"); 3454 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4); 3455 if (!err) { 3456 h->intr[0] = hpsa_msix_entries[0].vector; 3457 h->intr[1] = hpsa_msix_entries[1].vector; 3458 h->intr[2] = hpsa_msix_entries[2].vector; 3459 h->intr[3] = hpsa_msix_entries[3].vector; 3460 h->msix_vector = 1; 3461 return; 3462 } 3463 if (err > 0) { 3464 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 3465 "available\n", err); 3466 goto default_int_mode; 3467 } else { 3468 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", 3469 err); 3470 goto default_int_mode; 3471 } 3472 } 3473 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 3474 dev_info(&h->pdev->dev, "MSI\n"); 3475 if (!pci_enable_msi(h->pdev)) 3476 h->msi_vector = 1; 3477 else 3478 dev_warn(&h->pdev->dev, "MSI init failed\n"); 3479 } 3480 default_int_mode: 3481 #endif /* CONFIG_PCI_MSI */ 3482 /* if we get here we're going to use the default interrupt mode */ 3483 h->intr[h->intr_mode] = h->pdev->irq; 3484 } 3485 3486 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 3487 { 3488 int i; 3489 u32 subsystem_vendor_id, subsystem_device_id; 3490 3491 subsystem_vendor_id = pdev->subsystem_vendor; 3492 subsystem_device_id = pdev->subsystem_device; 3493 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 3494 subsystem_vendor_id; 3495 3496 for (i = 0; i < ARRAY_SIZE(products); i++) 3497 if (*board_id == products[i].board_id) 3498 return i; 3499 3500 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && 3501 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || 3502 !hpsa_allow_any) { 3503 dev_warn(&pdev->dev, "unrecognized board ID: " 3504 "0x%08x, ignoring.\n", *board_id); 3505 return -ENODEV; 3506 } 3507 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ 3508 } 3509 3510 static inline bool hpsa_board_disabled(struct pci_dev *pdev) 3511 { 3512 u16 command; 3513 3514 (void) pci_read_config_word(pdev, PCI_COMMAND, &command); 3515 return ((command & PCI_COMMAND_MEMORY) == 0); 3516 } 3517 3518 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 3519 unsigned long *memory_bar) 3520 { 3521 int i; 3522 3523 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 3524 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3525 /* addressing mode bits already removed */ 3526 *memory_bar = pci_resource_start(pdev, i); 3527 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 3528 *memory_bar); 3529 return 0; 3530 } 3531 dev_warn(&pdev->dev, "no memory BAR found\n"); 3532 return -ENODEV; 3533 } 3534 3535 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, 3536 void __iomem *vaddr, int wait_for_ready) 3537 { 3538 int i, iterations; 3539 u32 scratchpad; 3540 if (wait_for_ready) 3541 iterations = HPSA_BOARD_READY_ITERATIONS; 3542 else 3543 iterations = HPSA_BOARD_NOT_READY_ITERATIONS; 3544 3545 for (i = 0; i < iterations; i++) { 3546 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 3547 if (wait_for_ready) { 3548 if (scratchpad == HPSA_FIRMWARE_READY) 3549 return 0; 3550 } else { 3551 if (scratchpad != HPSA_FIRMWARE_READY) 3552 return 0; 3553 } 3554 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 3555 } 3556 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 3557 return -ENODEV; 3558 } 3559 3560 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, 3561 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 3562 u64 *cfg_offset) 3563 { 3564 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 3565 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 3566 *cfg_base_addr &= (u32) 0x0000ffff; 3567 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 3568 if (*cfg_base_addr_index == -1) { 3569 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); 3570 return -ENODEV; 3571 } 3572 return 0; 3573 } 3574 3575 static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) 3576 { 3577 u64 cfg_offset; 3578 u32 cfg_base_addr; 3579 u64 cfg_base_addr_index; 3580 u32 trans_offset; 3581 int rc; 3582 3583 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 3584 &cfg_base_addr_index, &cfg_offset); 3585 if (rc) 3586 return rc; 3587 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 3588 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 3589 if (!h->cfgtable) 3590 return -ENOMEM; 3591 rc = write_driver_ver_to_cfgtable(h->cfgtable); 3592 if (rc) 3593 return rc; 3594 /* Find performant mode table. */ 3595 trans_offset = readl(&h->cfgtable->TransMethodOffset); 3596 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 3597 cfg_base_addr_index)+cfg_offset+trans_offset, 3598 sizeof(*h->transtable)); 3599 if (!h->transtable) 3600 return -ENOMEM; 3601 return 0; 3602 } 3603 3604 static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 3605 { 3606 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3607 3608 /* Limit commands in memory limited kdump scenario. */ 3609 if (reset_devices && h->max_commands > 32) 3610 h->max_commands = 32; 3611 3612 if (h->max_commands < 16) { 3613 dev_warn(&h->pdev->dev, "Controller reports " 3614 "max supported commands of %d, an obvious lie. " 3615 "Using 16. Ensure that firmware is up to date.\n", 3616 h->max_commands); 3617 h->max_commands = 16; 3618 } 3619 } 3620 3621 /* Interrogate the hardware for some limits: 3622 * max commands, max SG elements without chaining, and with chaining, 3623 * SG chain block size, etc. 3624 */ 3625 static void __devinit hpsa_find_board_params(struct ctlr_info *h) 3626 { 3627 hpsa_get_max_perf_mode_cmds(h); 3628 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 3629 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); 3630 /* 3631 * Limit in-command s/g elements to 32 save dma'able memory. 3632 * Howvever spec says if 0, use 31 3633 */ 3634 h->max_cmd_sg_entries = 31; 3635 if (h->maxsgentries > 512) { 3636 h->max_cmd_sg_entries = 32; 3637 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; 3638 h->maxsgentries--; /* save one for chain pointer */ 3639 } else { 3640 h->maxsgentries = 31; /* default to traditional values */ 3641 h->chainsize = 0; 3642 } 3643 } 3644 3645 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) 3646 { 3647 if ((readb(&h->cfgtable->Signature[0]) != 'C') || 3648 (readb(&h->cfgtable->Signature[1]) != 'I') || 3649 (readb(&h->cfgtable->Signature[2]) != 'S') || 3650 (readb(&h->cfgtable->Signature[3]) != 'S')) { 3651 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 3652 return false; 3653 } 3654 return true; 3655 } 3656 3657 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 3658 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) 3659 { 3660 #ifdef CONFIG_X86 3661 u32 prefetch; 3662 3663 prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); 3664 prefetch |= 0x100; 3665 writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); 3666 #endif 3667 } 3668 3669 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 3670 * in a prefetch beyond physical memory. 3671 */ 3672 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) 3673 { 3674 u32 dma_prefetch; 3675 3676 if (h->board_id != 0x3225103C) 3677 return; 3678 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 3679 dma_prefetch |= 0x8000; 3680 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 3681 } 3682 3683 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 3684 { 3685 int i; 3686 u32 doorbell_value; 3687 unsigned long flags; 3688 3689 /* under certain very rare conditions, this can take awhile. 3690 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 3691 * as we enter this code.) 3692 */ 3693 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3694 spin_lock_irqsave(&h->lock, flags); 3695 doorbell_value = readl(h->vaddr + SA5_DOORBELL); 3696 spin_unlock_irqrestore(&h->lock, flags); 3697 if (!(doorbell_value & CFGTBL_ChangeReq)) 3698 break; 3699 /* delay and try again */ 3700 usleep_range(10000, 20000); 3701 } 3702 } 3703 3704 static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h) 3705 { 3706 u32 trans_support; 3707 3708 trans_support = readl(&(h->cfgtable->TransportSupport)); 3709 if (!(trans_support & SIMPLE_MODE)) 3710 return -ENOTSUPP; 3711 3712 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 3713 /* Update the field, and then ring the doorbell */ 3714 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 3715 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 3716 hpsa_wait_for_mode_change_ack(h); 3717 print_cfg_table(&h->pdev->dev, h->cfgtable); 3718 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { 3719 dev_warn(&h->pdev->dev, 3720 "unable to get board into simple mode\n"); 3721 return -ENODEV; 3722 } 3723 h->transMethod = CFGTBL_Trans_Simple; 3724 return 0; 3725 } 3726 3727 static int __devinit hpsa_pci_init(struct ctlr_info *h) 3728 { 3729 int prod_index, err; 3730 3731 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 3732 if (prod_index < 0) 3733 return -ENODEV; 3734 h->product_name = products[prod_index].product_name; 3735 h->access = *(products[prod_index].access); 3736 3737 if (hpsa_board_disabled(h->pdev)) { 3738 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 3739 return -ENODEV; 3740 } 3741 err = pci_enable_device(h->pdev); 3742 if (err) { 3743 dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); 3744 return err; 3745 } 3746 3747 err = pci_request_regions(h->pdev, "hpsa"); 3748 if (err) { 3749 dev_err(&h->pdev->dev, 3750 "cannot obtain PCI resources, aborting\n"); 3751 return err; 3752 } 3753 hpsa_interrupt_mode(h); 3754 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); 3755 if (err) 3756 goto err_out_free_res; 3757 h->vaddr = remap_pci_mem(h->paddr, 0x250); 3758 if (!h->vaddr) { 3759 err = -ENOMEM; 3760 goto err_out_free_res; 3761 } 3762 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 3763 if (err) 3764 goto err_out_free_res; 3765 err = hpsa_find_cfgtables(h); 3766 if (err) 3767 goto err_out_free_res; 3768 hpsa_find_board_params(h); 3769 3770 if (!hpsa_CISS_signature_present(h)) { 3771 err = -ENODEV; 3772 goto err_out_free_res; 3773 } 3774 hpsa_enable_scsi_prefetch(h); 3775 hpsa_p600_dma_prefetch_quirk(h); 3776 err = hpsa_enter_simple_mode(h); 3777 if (err) 3778 goto err_out_free_res; 3779 return 0; 3780 3781 err_out_free_res: 3782 if (h->transtable) 3783 iounmap(h->transtable); 3784 if (h->cfgtable) 3785 iounmap(h->cfgtable); 3786 if (h->vaddr) 3787 iounmap(h->vaddr); 3788 /* 3789 * Deliberately omit pci_disable_device(): it does something nasty to 3790 * Smart Array controllers that pci_enable_device does not undo 3791 */ 3792 pci_release_regions(h->pdev); 3793 return err; 3794 } 3795 3796 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) 3797 { 3798 int rc; 3799 3800 #define HBA_INQUIRY_BYTE_COUNT 64 3801 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); 3802 if (!h->hba_inquiry_data) 3803 return; 3804 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, 3805 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); 3806 if (rc != 0) { 3807 kfree(h->hba_inquiry_data); 3808 h->hba_inquiry_data = NULL; 3809 } 3810 } 3811 3812 static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) 3813 { 3814 int rc, i; 3815 3816 if (!reset_devices) 3817 return 0; 3818 3819 /* Reset the controller with a PCI power-cycle or via doorbell */ 3820 rc = hpsa_kdump_hard_reset_controller(pdev); 3821 3822 /* -ENOTSUPP here means we cannot reset the controller 3823 * but it's already (and still) up and running in 3824 * "performant mode". Or, it might be 640x, which can't reset 3825 * due to concerns about shared bbwc between 6402/6404 pair. 3826 */ 3827 if (rc == -ENOTSUPP) 3828 return 0; /* just try to do the kdump anyhow. */ 3829 if (rc) 3830 return -ENODEV; 3831 3832 /* Now try to get the controller to respond to a no-op */ 3833 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); 3834 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 3835 if (hpsa_noop(pdev) == 0) 3836 break; 3837 else 3838 dev_warn(&pdev->dev, "no-op failed%s\n", 3839 (i < 11 ? "; re-trying" : "")); 3840 } 3841 return 0; 3842 } 3843 3844 static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h) 3845 { 3846 h->cmd_pool_bits = kzalloc( 3847 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * 3848 sizeof(unsigned long), GFP_KERNEL); 3849 h->cmd_pool = pci_alloc_consistent(h->pdev, 3850 h->nr_cmds * sizeof(*h->cmd_pool), 3851 &(h->cmd_pool_dhandle)); 3852 h->errinfo_pool = pci_alloc_consistent(h->pdev, 3853 h->nr_cmds * sizeof(*h->errinfo_pool), 3854 &(h->errinfo_pool_dhandle)); 3855 if ((h->cmd_pool_bits == NULL) 3856 || (h->cmd_pool == NULL) 3857 || (h->errinfo_pool == NULL)) { 3858 dev_err(&h->pdev->dev, "out of memory in %s", __func__); 3859 return -ENOMEM; 3860 } 3861 return 0; 3862 } 3863 3864 static void hpsa_free_cmd_pool(struct ctlr_info *h) 3865 { 3866 kfree(h->cmd_pool_bits); 3867 if (h->cmd_pool) 3868 pci_free_consistent(h->pdev, 3869 h->nr_cmds * sizeof(struct CommandList), 3870 h->cmd_pool, h->cmd_pool_dhandle); 3871 if (h->errinfo_pool) 3872 pci_free_consistent(h->pdev, 3873 h->nr_cmds * sizeof(struct ErrorInfo), 3874 h->errinfo_pool, 3875 h->errinfo_pool_dhandle); 3876 } 3877 3878 static int hpsa_request_irq(struct ctlr_info *h, 3879 irqreturn_t (*msixhandler)(int, void *), 3880 irqreturn_t (*intxhandler)(int, void *)) 3881 { 3882 int rc; 3883 3884 if (h->msix_vector || h->msi_vector) 3885 rc = request_irq(h->intr[h->intr_mode], msixhandler, 3886 IRQF_DISABLED, h->devname, h); 3887 else 3888 rc = request_irq(h->intr[h->intr_mode], intxhandler, 3889 IRQF_DISABLED, h->devname, h); 3890 if (rc) { 3891 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", 3892 h->intr[h->intr_mode], h->devname); 3893 return -ENODEV; 3894 } 3895 return 0; 3896 } 3897 3898 static int __devinit hpsa_init_one(struct pci_dev *pdev, 3899 const struct pci_device_id *ent) 3900 { 3901 int dac, rc; 3902 struct ctlr_info *h; 3903 3904 if (number_of_controllers == 0) 3905 printk(KERN_INFO DRIVER_NAME "\n"); 3906 3907 rc = hpsa_init_reset_devices(pdev); 3908 if (rc) 3909 return rc; 3910 3911 /* Command structures must be aligned on a 32-byte boundary because 3912 * the 5 lower bits of the address are used by the hardware. and by 3913 * the driver. See comments in hpsa.h for more info. 3914 */ 3915 #define COMMANDLIST_ALIGNMENT 32 3916 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); 3917 h = kzalloc(sizeof(*h), GFP_KERNEL); 3918 if (!h) 3919 return -ENOMEM; 3920 3921 h->pdev = pdev; 3922 h->busy_initializing = 1; 3923 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 3924 INIT_LIST_HEAD(&h->cmpQ); 3925 INIT_LIST_HEAD(&h->reqQ); 3926 spin_lock_init(&h->lock); 3927 spin_lock_init(&h->scan_lock); 3928 rc = hpsa_pci_init(h); 3929 if (rc != 0) 3930 goto clean1; 3931 3932 sprintf(h->devname, "hpsa%d", number_of_controllers); 3933 h->ctlr = number_of_controllers; 3934 number_of_controllers++; 3935 3936 /* configure PCI DMA stuff */ 3937 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3938 if (rc == 0) { 3939 dac = 1; 3940 } else { 3941 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3942 if (rc == 0) { 3943 dac = 0; 3944 } else { 3945 dev_err(&pdev->dev, "no suitable DMA available\n"); 3946 goto clean1; 3947 } 3948 } 3949 3950 /* make sure the board interrupts are off */ 3951 h->access.set_intr_mask(h, HPSA_INTR_OFF); 3952 3953 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) 3954 goto clean2; 3955 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 3956 h->devname, pdev->device, 3957 h->intr[h->intr_mode], dac ? "" : " not"); 3958 if (hpsa_allocate_cmd_pool(h)) 3959 goto clean4; 3960 if (hpsa_allocate_sg_chain_blocks(h)) 3961 goto clean4; 3962 init_waitqueue_head(&h->scan_wait_queue); 3963 h->scan_finished = 1; /* no scan currently in progress */ 3964 3965 pci_set_drvdata(pdev, h); 3966 h->ndevices = 0; 3967 h->scsi_host = NULL; 3968 spin_lock_init(&h->devlock); 3969 3970 /* Turn the interrupts on so we can service requests */ 3971 h->access.set_intr_mask(h, HPSA_INTR_ON); 3972 3973 hpsa_put_ctlr_into_performant_mode(h); 3974 hpsa_hba_inquiry(h); 3975 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 3976 h->busy_initializing = 0; 3977 return 1; 3978 3979 clean4: 3980 hpsa_free_sg_chain_blocks(h); 3981 hpsa_free_cmd_pool(h); 3982 free_irq(h->intr[h->intr_mode], h); 3983 clean2: 3984 clean1: 3985 h->busy_initializing = 0; 3986 kfree(h); 3987 return rc; 3988 } 3989 3990 static void hpsa_flush_cache(struct ctlr_info *h) 3991 { 3992 char *flush_buf; 3993 struct CommandList *c; 3994 3995 flush_buf = kzalloc(4, GFP_KERNEL); 3996 if (!flush_buf) 3997 return; 3998 3999 c = cmd_special_alloc(h); 4000 if (!c) { 4001 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 4002 goto out_of_memory; 4003 } 4004 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, 4005 RAID_CTLR_LUNID, TYPE_CMD); 4006 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); 4007 if (c->err_info->CommandStatus != 0) 4008 dev_warn(&h->pdev->dev, 4009 "error flushing cache on controller\n"); 4010 cmd_special_free(h, c); 4011 out_of_memory: 4012 kfree(flush_buf); 4013 } 4014 4015 static void hpsa_shutdown(struct pci_dev *pdev) 4016 { 4017 struct ctlr_info *h; 4018 4019 h = pci_get_drvdata(pdev); 4020 /* Turn board interrupts off and send the flush cache command 4021 * sendcmd will turn off interrupt, and send the flush... 4022 * To write all data in the battery backed cache to disks 4023 */ 4024 hpsa_flush_cache(h); 4025 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4026 free_irq(h->intr[h->intr_mode], h); 4027 #ifdef CONFIG_PCI_MSI 4028 if (h->msix_vector) 4029 pci_disable_msix(h->pdev); 4030 else if (h->msi_vector) 4031 pci_disable_msi(h->pdev); 4032 #endif /* CONFIG_PCI_MSI */ 4033 } 4034 4035 static void __devexit hpsa_remove_one(struct pci_dev *pdev) 4036 { 4037 struct ctlr_info *h; 4038 4039 if (pci_get_drvdata(pdev) == NULL) { 4040 dev_err(&pdev->dev, "unable to remove device \n"); 4041 return; 4042 } 4043 h = pci_get_drvdata(pdev); 4044 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 4045 hpsa_shutdown(pdev); 4046 iounmap(h->vaddr); 4047 iounmap(h->transtable); 4048 iounmap(h->cfgtable); 4049 hpsa_free_sg_chain_blocks(h); 4050 pci_free_consistent(h->pdev, 4051 h->nr_cmds * sizeof(struct CommandList), 4052 h->cmd_pool, h->cmd_pool_dhandle); 4053 pci_free_consistent(h->pdev, 4054 h->nr_cmds * sizeof(struct ErrorInfo), 4055 h->errinfo_pool, h->errinfo_pool_dhandle); 4056 pci_free_consistent(h->pdev, h->reply_pool_size, 4057 h->reply_pool, h->reply_pool_dhandle); 4058 kfree(h->cmd_pool_bits); 4059 kfree(h->blockFetchTable); 4060 kfree(h->hba_inquiry_data); 4061 /* 4062 * Deliberately omit pci_disable_device(): it does something nasty to 4063 * Smart Array controllers that pci_enable_device does not undo 4064 */ 4065 pci_release_regions(pdev); 4066 pci_set_drvdata(pdev, NULL); 4067 kfree(h); 4068 } 4069 4070 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, 4071 __attribute__((unused)) pm_message_t state) 4072 { 4073 return -ENOSYS; 4074 } 4075 4076 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) 4077 { 4078 return -ENOSYS; 4079 } 4080 4081 static struct pci_driver hpsa_pci_driver = { 4082 .name = "hpsa", 4083 .probe = hpsa_init_one, 4084 .remove = __devexit_p(hpsa_remove_one), 4085 .id_table = hpsa_pci_device_id, /* id_table */ 4086 .shutdown = hpsa_shutdown, 4087 .suspend = hpsa_suspend, 4088 .resume = hpsa_resume, 4089 }; 4090 4091 /* Fill in bucket_map[], given nsgs (the max number of 4092 * scatter gather elements supported) and bucket[], 4093 * which is an array of 8 integers. The bucket[] array 4094 * contains 8 different DMA transfer sizes (in 16 4095 * byte increments) which the controller uses to fetch 4096 * commands. This function fills in bucket_map[], which 4097 * maps a given number of scatter gather elements to one of 4098 * the 8 DMA transfer sizes. The point of it is to allow the 4099 * controller to only do as much DMA as needed to fetch the 4100 * command, with the DMA transfer size encoded in the lower 4101 * bits of the command address. 4102 */ 4103 static void calc_bucket_map(int bucket[], int num_buckets, 4104 int nsgs, int *bucket_map) 4105 { 4106 int i, j, b, size; 4107 4108 /* even a command with 0 SGs requires 4 blocks */ 4109 #define MINIMUM_TRANSFER_BLOCKS 4 4110 #define NUM_BUCKETS 8 4111 /* Note, bucket_map must have nsgs+1 entries. */ 4112 for (i = 0; i <= nsgs; i++) { 4113 /* Compute size of a command with i SG entries */ 4114 size = i + MINIMUM_TRANSFER_BLOCKS; 4115 b = num_buckets; /* Assume the biggest bucket */ 4116 /* Find the bucket that is just big enough */ 4117 for (j = 0; j < 8; j++) { 4118 if (bucket[j] >= size) { 4119 b = j; 4120 break; 4121 } 4122 } 4123 /* for a command with i SG entries, use bucket b. */ 4124 bucket_map[i] = b; 4125 } 4126 } 4127 4128 static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h, 4129 u32 use_short_tags) 4130 { 4131 int i; 4132 unsigned long register_value; 4133 4134 /* This is a bit complicated. There are 8 registers on 4135 * the controller which we write to to tell it 8 different 4136 * sizes of commands which there may be. It's a way of 4137 * reducing the DMA done to fetch each command. Encoded into 4138 * each command's tag are 3 bits which communicate to the controller 4139 * which of the eight sizes that command fits within. The size of 4140 * each command depends on how many scatter gather entries there are. 4141 * Each SG entry requires 16 bytes. The eight registers are programmed 4142 * with the number of 16-byte blocks a command of that size requires. 4143 * The smallest command possible requires 5 such 16 byte blocks. 4144 * the largest command possible requires MAXSGENTRIES + 4 16-byte 4145 * blocks. Note, this only extends to the SG entries contained 4146 * within the command block, and does not extend to chained blocks 4147 * of SG elements. bft[] contains the eight values we write to 4148 * the registers. They are not evenly distributed, but have more 4149 * sizes for small commands, and fewer sizes for larger commands. 4150 */ 4151 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; 4152 BUILD_BUG_ON(28 > MAXSGENTRIES + 4); 4153 /* 5 = 1 s/g entry or 4k 4154 * 6 = 2 s/g entry or 8k 4155 * 8 = 4 s/g entry or 16k 4156 * 10 = 6 s/g entry or 24k 4157 */ 4158 4159 h->reply_pool_wraparound = 1; /* spec: init to 1 */ 4160 4161 /* Controller spec: zero out this buffer. */ 4162 memset(h->reply_pool, 0, h->reply_pool_size); 4163 h->reply_pool_head = h->reply_pool; 4164 4165 bft[7] = h->max_sg_entries + 4; 4166 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); 4167 for (i = 0; i < 8; i++) 4168 writel(bft[i], &h->transtable->BlockFetch[i]); 4169 4170 /* size of controller ring buffer */ 4171 writel(h->max_commands, &h->transtable->RepQSize); 4172 writel(1, &h->transtable->RepQCount); 4173 writel(0, &h->transtable->RepQCtrAddrLow32); 4174 writel(0, &h->transtable->RepQCtrAddrHigh32); 4175 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 4176 writel(0, &h->transtable->RepQAddr0High32); 4177 writel(CFGTBL_Trans_Performant | use_short_tags, 4178 &(h->cfgtable->HostWrite.TransportRequest)); 4179 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 4180 hpsa_wait_for_mode_change_ack(h); 4181 register_value = readl(&(h->cfgtable->TransportActive)); 4182 if (!(register_value & CFGTBL_Trans_Performant)) { 4183 dev_warn(&h->pdev->dev, "unable to get board into" 4184 " performant mode\n"); 4185 return; 4186 } 4187 /* Change the access methods to the performant access methods */ 4188 h->access = SA5_performant_access; 4189 h->transMethod = CFGTBL_Trans_Performant; 4190 } 4191 4192 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 4193 { 4194 u32 trans_support; 4195 4196 if (hpsa_simple_mode) 4197 return; 4198 4199 trans_support = readl(&(h->cfgtable->TransportSupport)); 4200 if (!(trans_support & PERFORMANT_MODE)) 4201 return; 4202 4203 hpsa_get_max_perf_mode_cmds(h); 4204 h->max_sg_entries = 32; 4205 /* Performant mode ring buffer and supporting data structures */ 4206 h->reply_pool_size = h->max_commands * sizeof(u64); 4207 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 4208 &(h->reply_pool_dhandle)); 4209 4210 /* Need a block fetch table for performant mode */ 4211 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * 4212 sizeof(u32)), GFP_KERNEL); 4213 4214 if ((h->reply_pool == NULL) 4215 || (h->blockFetchTable == NULL)) 4216 goto clean_up; 4217 4218 hpsa_enter_performant_mode(h, 4219 trans_support & CFGTBL_Trans_use_short_tags); 4220 4221 return; 4222 4223 clean_up: 4224 if (h->reply_pool) 4225 pci_free_consistent(h->pdev, h->reply_pool_size, 4226 h->reply_pool, h->reply_pool_dhandle); 4227 kfree(h->blockFetchTable); 4228 } 4229 4230 /* 4231 * This is it. Register the PCI driver information for the cards we control 4232 * the OS will call our registered routines when it finds one of our cards. 4233 */ 4234 static int __init hpsa_init(void) 4235 { 4236 return pci_register_driver(&hpsa_pci_driver); 4237 } 4238 4239 static void __exit hpsa_cleanup(void) 4240 { 4241 pci_unregister_driver(&hpsa_pci_driver); 4242 } 4243 4244 module_init(hpsa_init); 4245 module_exit(hpsa_cleanup); 4246