1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 #ifndef HPSA_H 22 #define HPSA_H 23 24 #include <scsi/scsicam.h> 25 26 #define IO_OK 0 27 #define IO_ERROR 1 28 29 struct ctlr_info; 30 31 struct access_method { 32 void (*submit_command)(struct ctlr_info *h, 33 struct CommandList *c); 34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); 35 unsigned long (*fifo_full)(struct ctlr_info *h); 36 bool (*intr_pending)(struct ctlr_info *h); 37 unsigned long (*command_completed)(struct ctlr_info *h, u8 q); 38 }; 39 40 struct hpsa_scsi_dev_t { 41 int devtype; 42 int bus, target, lun; /* as presented to the OS */ 43 unsigned char scsi3addr[8]; /* as presented to the HW */ 44 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" 45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */ 46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 47 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 48 unsigned char raid_level; /* from inquiry page 0xC1 */ 49 unsigned char volume_offline; /* discovered via TUR or VPD */ 50 u32 ioaccel_handle; 51 int offload_config; /* I/O accel RAID offload configured */ 52 int offload_enabled; /* I/O accel RAID offload enabled */ 53 int offload_to_mirror; /* Send next I/O accelerator RAID 54 * offload request to mirror drive 55 */ 56 struct raid_map_data raid_map; /* I/O accelerator RAID map */ 57 58 }; 59 60 struct reply_queue_buffer { 61 u64 *head; 62 size_t size; 63 u8 wraparound; 64 u32 current_entry; 65 dma_addr_t busaddr; 66 }; 67 68 #pragma pack(1) 69 struct bmic_controller_parameters { 70 u8 led_flags; 71 u8 enable_command_list_verification; 72 u8 backed_out_write_drives; 73 u16 stripes_for_parity; 74 u8 parity_distribution_mode_flags; 75 u16 max_driver_requests; 76 u16 elevator_trend_count; 77 u8 disable_elevator; 78 u8 force_scan_complete; 79 u8 scsi_transfer_mode; 80 u8 force_narrow; 81 u8 rebuild_priority; 82 u8 expand_priority; 83 u8 host_sdb_asic_fix; 84 u8 pdpi_burst_from_host_disabled; 85 char software_name[64]; 86 char hardware_name[32]; 87 u8 bridge_revision; 88 u8 snapshot_priority; 89 u32 os_specific; 90 u8 post_prompt_timeout; 91 u8 automatic_drive_slamming; 92 u8 reserved1; 93 u8 nvram_flags; 94 #define HBA_MODE_ENABLED_FLAG (1 << 3) 95 u8 cache_nvram_flags; 96 u8 drive_config_flags; 97 u16 reserved2; 98 u8 temp_warning_level; 99 u8 temp_shutdown_level; 100 u8 temp_condition_reset; 101 u8 max_coalesce_commands; 102 u32 max_coalesce_delay; 103 u8 orca_password[4]; 104 u8 access_id[16]; 105 u8 reserved[356]; 106 }; 107 #pragma pack() 108 109 struct ctlr_info { 110 int ctlr; 111 char devname[8]; 112 char *product_name; 113 struct pci_dev *pdev; 114 u32 board_id; 115 void __iomem *vaddr; 116 unsigned long paddr; 117 int nr_cmds; /* Number of commands allowed on this controller */ 118 struct CfgTable __iomem *cfgtable; 119 int interrupts_enabled; 120 int max_commands; 121 atomic_t commands_outstanding; 122 # define PERF_MODE_INT 0 123 # define DOORBELL_INT 1 124 # define SIMPLE_MODE_INT 2 125 # define MEMQ_MODE_INT 3 126 unsigned int intr[MAX_REPLY_QUEUES]; 127 unsigned int msix_vector; 128 unsigned int msi_vector; 129 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ 130 struct access_method access; 131 char hba_mode_enabled; 132 133 /* queue and queue Info */ 134 struct list_head reqQ; 135 struct list_head cmpQ; 136 unsigned int Qdepth; 137 unsigned int maxSG; 138 spinlock_t lock; 139 int maxsgentries; 140 u8 max_cmd_sg_entries; 141 int chainsize; 142 struct SGDescriptor **cmd_sg_list; 143 144 /* pointers to command and error info pool */ 145 struct CommandList *cmd_pool; 146 dma_addr_t cmd_pool_dhandle; 147 struct io_accel1_cmd *ioaccel_cmd_pool; 148 dma_addr_t ioaccel_cmd_pool_dhandle; 149 struct io_accel2_cmd *ioaccel2_cmd_pool; 150 dma_addr_t ioaccel2_cmd_pool_dhandle; 151 struct ErrorInfo *errinfo_pool; 152 dma_addr_t errinfo_pool_dhandle; 153 unsigned long *cmd_pool_bits; 154 int scan_finished; 155 spinlock_t scan_lock; 156 wait_queue_head_t scan_wait_queue; 157 158 struct Scsi_Host *scsi_host; 159 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ 160 int ndevices; /* number of used elements in .dev[] array. */ 161 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES]; 162 /* 163 * Performant mode tables. 164 */ 165 u32 trans_support; 166 u32 trans_offset; 167 struct TransTable_struct __iomem *transtable; 168 unsigned long transMethod; 169 170 /* cap concurrent passthrus at some reasonable maximum */ 171 #define HPSA_MAX_CONCURRENT_PASSTHRUS (20) 172 spinlock_t passthru_count_lock; /* protects passthru_count */ 173 int passthru_count; 174 175 /* 176 * Performant mode completion buffers 177 */ 178 size_t reply_queue_size; 179 struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES]; 180 u8 nreply_queues; 181 u32 *blockFetchTable; 182 u32 *ioaccel1_blockFetchTable; 183 u32 *ioaccel2_blockFetchTable; 184 u32 __iomem *ioaccel2_bft2_regs; 185 unsigned char *hba_inquiry_data; 186 u32 driver_support; 187 u32 fw_support; 188 int ioaccel_support; 189 int ioaccel_maxsg; 190 u64 last_intr_timestamp; 191 u32 last_heartbeat; 192 u64 last_heartbeat_timestamp; 193 u32 heartbeat_sample_interval; 194 atomic_t firmware_flash_in_progress; 195 u32 __percpu *lockup_detected; 196 struct delayed_work monitor_ctlr_work; 197 int remove_in_progress; 198 u32 fifo_recently_full; 199 /* Address of h->q[x] is passed to intr handler to know which queue */ 200 u8 q[MAX_REPLY_QUEUES]; 201 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ 202 #define HPSATMF_BITS_SUPPORTED (1 << 0) 203 #define HPSATMF_PHYS_LUN_RESET (1 << 1) 204 #define HPSATMF_PHYS_NEX_RESET (1 << 2) 205 #define HPSATMF_PHYS_TASK_ABORT (1 << 3) 206 #define HPSATMF_PHYS_TSET_ABORT (1 << 4) 207 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5) 208 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6) 209 #define HPSATMF_PHYS_QRY_TASK (1 << 7) 210 #define HPSATMF_PHYS_QRY_TSET (1 << 8) 211 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9) 212 #define HPSATMF_MASK_SUPPORTED (1 << 16) 213 #define HPSATMF_LOG_LUN_RESET (1 << 17) 214 #define HPSATMF_LOG_NEX_RESET (1 << 18) 215 #define HPSATMF_LOG_TASK_ABORT (1 << 19) 216 #define HPSATMF_LOG_TSET_ABORT (1 << 20) 217 #define HPSATMF_LOG_CLEAR_ACA (1 << 21) 218 #define HPSATMF_LOG_CLEAR_TSET (1 << 22) 219 #define HPSATMF_LOG_QRY_TASK (1 << 23) 220 #define HPSATMF_LOG_QRY_TSET (1 << 24) 221 #define HPSATMF_LOG_QRY_ASYNC (1 << 25) 222 u32 events; 223 #define CTLR_STATE_CHANGE_EVENT (1 << 0) 224 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1) 225 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4) 226 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5) 227 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6) 228 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30) 229 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) 230 231 #define RESCAN_REQUIRED_EVENT_BITS \ 232 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ 233 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ 234 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ 235 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ 236 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) 237 spinlock_t offline_device_lock; 238 struct list_head offline_device_list; 239 int acciopath_status; 240 int drv_req_rescan; /* flag for driver to request rescan event */ 241 int raid_offload_debug; 242 }; 243 244 struct offline_device_entry { 245 unsigned char scsi3addr[8]; 246 struct list_head offline_list; 247 }; 248 249 #define HPSA_ABORT_MSG 0 250 #define HPSA_DEVICE_RESET_MSG 1 251 #define HPSA_RESET_TYPE_CONTROLLER 0x00 252 #define HPSA_RESET_TYPE_BUS 0x01 253 #define HPSA_RESET_TYPE_TARGET 0x03 254 #define HPSA_RESET_TYPE_LUN 0x04 255 #define HPSA_MSG_SEND_RETRY_LIMIT 10 256 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000) 257 258 /* Maximum time in seconds driver will wait for command completions 259 * when polling before giving up. 260 */ 261 #define HPSA_MAX_POLL_TIME_SECS (20) 262 263 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines 264 * how many times to retry TEST UNIT READY on a device 265 * while waiting for it to become ready before giving up. 266 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval 267 * between sending TURs while waiting for a device 268 * to become ready. 269 */ 270 #define HPSA_TUR_RETRY_LIMIT (20) 271 #define HPSA_MAX_WAIT_INTERVAL_SECS (30) 272 273 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board 274 * to become ready, in seconds, before giving up on it. 275 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait 276 * between polling the board to see if it is ready, in 277 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and 278 * HPSA_BOARD_READY_ITERATIONS are derived from those. 279 */ 280 #define HPSA_BOARD_READY_WAIT_SECS (120) 281 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100) 282 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) 283 #define HPSA_BOARD_READY_POLL_INTERVAL \ 284 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) 285 #define HPSA_BOARD_READY_ITERATIONS \ 286 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ 287 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 288 #define HPSA_BOARD_NOT_READY_ITERATIONS \ 289 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ 290 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 291 #define HPSA_POST_RESET_PAUSE_MSECS (3000) 292 #define HPSA_POST_RESET_NOOP_RETRIES (12) 293 294 /* Defining the diffent access_menthods */ 295 /* 296 * Memory mapped FIFO interface (SMART 53xx cards) 297 */ 298 #define SA5_DOORBELL 0x20 299 #define SA5_REQUEST_PORT_OFFSET 0x40 300 #define SA5_REPLY_INTR_MASK_OFFSET 0x34 301 #define SA5_REPLY_PORT_OFFSET 0x44 302 #define SA5_INTR_STATUS 0x30 303 #define SA5_SCRATCHPAD_OFFSET 0xB0 304 305 #define SA5_CTCFG_OFFSET 0xB4 306 #define SA5_CTMEM_OFFSET 0xB8 307 308 #define SA5_INTR_OFF 0x08 309 #define SA5B_INTR_OFF 0x04 310 #define SA5_INTR_PENDING 0x08 311 #define SA5B_INTR_PENDING 0x04 312 #define FIFO_EMPTY 0xffffffff 313 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 314 315 #define HPSA_ERROR_BIT 0x02 316 317 /* Performant mode flags */ 318 #define SA5_PERF_INTR_PENDING 0x04 319 #define SA5_PERF_INTR_OFF 0x05 320 #define SA5_OUTDB_STATUS_PERF_BIT 0x01 321 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 322 #define SA5_OUTDB_CLEAR 0xA0 323 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 324 #define SA5_OUTDB_STATUS 0x9C 325 326 327 #define HPSA_INTR_ON 1 328 #define HPSA_INTR_OFF 0 329 330 /* 331 * Inbound Post Queue offsets for IO Accelerator Mode 2 332 */ 333 #define IOACCEL2_INBOUND_POSTQ_32 0x48 334 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0 335 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4 336 337 /* 338 Send the command to the hardware 339 */ 340 static void SA5_submit_command(struct ctlr_info *h, 341 struct CommandList *c) 342 { 343 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 344 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 345 } 346 347 static void SA5_submit_command_no_read(struct ctlr_info *h, 348 struct CommandList *c) 349 { 350 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 351 } 352 353 static void SA5_submit_command_ioaccel2(struct ctlr_info *h, 354 struct CommandList *c) 355 { 356 if (c->cmd_type == CMD_IOACCEL2) 357 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 358 else 359 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 360 } 361 362 /* 363 * This card is the opposite of the other cards. 364 * 0 turns interrupts on... 365 * 0x08 turns them off... 366 */ 367 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) 368 { 369 if (val) { /* Turn interrupts on */ 370 h->interrupts_enabled = 1; 371 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 372 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 373 } else { /* Turn them off */ 374 h->interrupts_enabled = 0; 375 writel(SA5_INTR_OFF, 376 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 377 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 378 } 379 } 380 381 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) 382 { 383 if (val) { /* turn on interrupts */ 384 h->interrupts_enabled = 1; 385 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 386 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 387 } else { 388 h->interrupts_enabled = 0; 389 writel(SA5_PERF_INTR_OFF, 390 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 391 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 392 } 393 } 394 395 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 396 { 397 struct reply_queue_buffer *rq = &h->reply_queue[q]; 398 unsigned long register_value = FIFO_EMPTY; 399 400 /* msi auto clears the interrupt pending bit. */ 401 if (!(h->msi_vector || h->msix_vector)) { 402 /* flush the controller write of the reply queue by reading 403 * outbound doorbell status register. 404 */ 405 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 406 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 407 /* Do a read in order to flush the write to the controller 408 * (as per spec.) 409 */ 410 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 411 } 412 413 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 414 register_value = rq->head[rq->current_entry]; 415 rq->current_entry++; 416 atomic_dec(&h->commands_outstanding); 417 } else { 418 register_value = FIFO_EMPTY; 419 } 420 /* Check for wraparound */ 421 if (rq->current_entry == h->max_commands) { 422 rq->current_entry = 0; 423 rq->wraparound ^= 1; 424 } 425 return register_value; 426 } 427 428 /* 429 * Returns true if fifo is full. 430 * 431 */ 432 static unsigned long SA5_fifo_full(struct ctlr_info *h) 433 { 434 return atomic_read(&h->commands_outstanding) >= h->max_commands; 435 } 436 /* 437 * returns value read from hardware. 438 * returns FIFO_EMPTY if there is nothing to read 439 */ 440 static unsigned long SA5_completed(struct ctlr_info *h, 441 __attribute__((unused)) u8 q) 442 { 443 unsigned long register_value 444 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); 445 446 if (register_value != FIFO_EMPTY) 447 atomic_dec(&h->commands_outstanding); 448 449 #ifdef HPSA_DEBUG 450 if (register_value != FIFO_EMPTY) 451 dev_dbg(&h->pdev->dev, "Read %lx back from board\n", 452 register_value); 453 else 454 dev_dbg(&h->pdev->dev, "FIFO Empty read\n"); 455 #endif 456 457 return register_value; 458 } 459 /* 460 * Returns true if an interrupt is pending.. 461 */ 462 static bool SA5_intr_pending(struct ctlr_info *h) 463 { 464 unsigned long register_value = 465 readl(h->vaddr + SA5_INTR_STATUS); 466 return register_value & SA5_INTR_PENDING; 467 } 468 469 static bool SA5_performant_intr_pending(struct ctlr_info *h) 470 { 471 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 472 473 if (!register_value) 474 return false; 475 476 if (h->msi_vector || h->msix_vector) 477 return true; 478 479 /* Read outbound doorbell to flush */ 480 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 481 return register_value & SA5_OUTDB_STATUS_PERF_BIT; 482 } 483 484 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100 485 486 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) 487 { 488 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 489 490 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? 491 true : false; 492 } 493 494 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 495 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 496 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC 497 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL 498 499 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) 500 { 501 u64 register_value; 502 struct reply_queue_buffer *rq = &h->reply_queue[q]; 503 504 BUG_ON(q >= h->nreply_queues); 505 506 register_value = rq->head[rq->current_entry]; 507 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { 508 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; 509 if (++rq->current_entry == rq->size) 510 rq->current_entry = 0; 511 /* 512 * @todo 513 * 514 * Don't really need to write the new index after each command, 515 * but with current driver design this is easiest. 516 */ 517 wmb(); 518 writel((q << 24) | rq->current_entry, h->vaddr + 519 IOACCEL_MODE1_CONSUMER_INDEX); 520 atomic_dec(&h->commands_outstanding); 521 } 522 return (unsigned long) register_value; 523 } 524 525 static struct access_method SA5_access = { 526 SA5_submit_command, 527 SA5_intr_mask, 528 SA5_fifo_full, 529 SA5_intr_pending, 530 SA5_completed, 531 }; 532 533 static struct access_method SA5_ioaccel_mode1_access = { 534 SA5_submit_command, 535 SA5_performant_intr_mask, 536 SA5_fifo_full, 537 SA5_ioaccel_mode1_intr_pending, 538 SA5_ioaccel_mode1_completed, 539 }; 540 541 static struct access_method SA5_ioaccel_mode2_access = { 542 SA5_submit_command_ioaccel2, 543 SA5_performant_intr_mask, 544 SA5_fifo_full, 545 SA5_performant_intr_pending, 546 SA5_performant_completed, 547 }; 548 549 static struct access_method SA5_performant_access = { 550 SA5_submit_command, 551 SA5_performant_intr_mask, 552 SA5_fifo_full, 553 SA5_performant_intr_pending, 554 SA5_performant_completed, 555 }; 556 557 static struct access_method SA5_performant_access_no_read = { 558 SA5_submit_command_no_read, 559 SA5_performant_intr_mask, 560 SA5_fifo_full, 561 SA5_performant_intr_pending, 562 SA5_performant_completed, 563 }; 564 565 struct board_type { 566 u32 board_id; 567 char *product_name; 568 struct access_method *access; 569 }; 570 571 #endif /* HPSA_H */ 572 573