1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 #ifndef HPSA_H 22 #define HPSA_H 23 24 #include <scsi/scsicam.h> 25 26 #define IO_OK 0 27 #define IO_ERROR 1 28 29 struct ctlr_info; 30 31 struct access_method { 32 void (*submit_command)(struct ctlr_info *h, 33 struct CommandList *c); 34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); 35 unsigned long (*fifo_full)(struct ctlr_info *h); 36 bool (*intr_pending)(struct ctlr_info *h); 37 unsigned long (*command_completed)(struct ctlr_info *h, u8 q); 38 }; 39 40 struct hpsa_scsi_dev_t { 41 int devtype; 42 int bus, target, lun; /* as presented to the OS */ 43 unsigned char scsi3addr[8]; /* as presented to the HW */ 44 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" 45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */ 46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 47 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 48 unsigned char raid_level; /* from inquiry page 0xC1 */ 49 u32 ioaccel_handle; 50 int offload_config; /* I/O accel RAID offload configured */ 51 int offload_enabled; /* I/O accel RAID offload enabled */ 52 int offload_to_mirror; /* Send next I/O accelerator RAID 53 * offload request to mirror drive 54 */ 55 struct raid_map_data raid_map; /* I/O accelerator RAID map */ 56 57 }; 58 59 struct reply_pool { 60 u64 *head; 61 size_t size; 62 u8 wraparound; 63 u32 current_entry; 64 }; 65 66 struct ctlr_info { 67 int ctlr; 68 char devname[8]; 69 char *product_name; 70 struct pci_dev *pdev; 71 u32 board_id; 72 void __iomem *vaddr; 73 unsigned long paddr; 74 int nr_cmds; /* Number of commands allowed on this controller */ 75 struct CfgTable __iomem *cfgtable; 76 int interrupts_enabled; 77 int major; 78 int max_commands; 79 int commands_outstanding; 80 int max_outstanding; /* Debug */ 81 int usage_count; /* number of opens all all minor devices */ 82 # define PERF_MODE_INT 0 83 # define DOORBELL_INT 1 84 # define SIMPLE_MODE_INT 2 85 # define MEMQ_MODE_INT 3 86 unsigned int intr[MAX_REPLY_QUEUES]; 87 unsigned int msix_vector; 88 unsigned int msi_vector; 89 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ 90 struct access_method access; 91 92 /* queue and queue Info */ 93 struct list_head reqQ; 94 struct list_head cmpQ; 95 unsigned int Qdepth; 96 unsigned int maxSG; 97 spinlock_t lock; 98 int maxsgentries; 99 u8 max_cmd_sg_entries; 100 int chainsize; 101 struct SGDescriptor **cmd_sg_list; 102 103 /* pointers to command and error info pool */ 104 struct CommandList *cmd_pool; 105 dma_addr_t cmd_pool_dhandle; 106 struct io_accel1_cmd *ioaccel_cmd_pool; 107 dma_addr_t ioaccel_cmd_pool_dhandle; 108 struct io_accel2_cmd *ioaccel2_cmd_pool; 109 dma_addr_t ioaccel2_cmd_pool_dhandle; 110 struct ErrorInfo *errinfo_pool; 111 dma_addr_t errinfo_pool_dhandle; 112 unsigned long *cmd_pool_bits; 113 int scan_finished; 114 spinlock_t scan_lock; 115 wait_queue_head_t scan_wait_queue; 116 117 struct Scsi_Host *scsi_host; 118 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ 119 int ndevices; /* number of used elements in .dev[] array. */ 120 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES]; 121 /* 122 * Performant mode tables. 123 */ 124 u32 trans_support; 125 u32 trans_offset; 126 struct TransTable_struct *transtable; 127 unsigned long transMethod; 128 129 /* cap concurrent passthrus at some reasonable maximum */ 130 #define HPSA_MAX_CONCURRENT_PASSTHRUS (20) 131 spinlock_t passthru_count_lock; /* protects passthru_count */ 132 int passthru_count; 133 134 /* 135 * Performant mode completion buffers 136 */ 137 u64 *reply_pool; 138 size_t reply_pool_size; 139 struct reply_pool reply_queue[MAX_REPLY_QUEUES]; 140 u8 nreply_queues; 141 dma_addr_t reply_pool_dhandle; 142 u32 *blockFetchTable; 143 u32 *ioaccel1_blockFetchTable; 144 u32 *ioaccel2_blockFetchTable; 145 u32 *ioaccel2_bft2_regs; 146 unsigned char *hba_inquiry_data; 147 u32 driver_support; 148 u32 fw_support; 149 int ioaccel_support; 150 int ioaccel_maxsg; 151 u64 last_intr_timestamp; 152 u32 last_heartbeat; 153 u64 last_heartbeat_timestamp; 154 u32 heartbeat_sample_interval; 155 atomic_t firmware_flash_in_progress; 156 u32 lockup_detected; 157 struct delayed_work monitor_ctlr_work; 158 int remove_in_progress; 159 u32 fifo_recently_full; 160 /* Address of h->q[x] is passed to intr handler to know which queue */ 161 u8 q[MAX_REPLY_QUEUES]; 162 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ 163 #define HPSATMF_BITS_SUPPORTED (1 << 0) 164 #define HPSATMF_PHYS_LUN_RESET (1 << 1) 165 #define HPSATMF_PHYS_NEX_RESET (1 << 2) 166 #define HPSATMF_PHYS_TASK_ABORT (1 << 3) 167 #define HPSATMF_PHYS_TSET_ABORT (1 << 4) 168 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5) 169 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6) 170 #define HPSATMF_PHYS_QRY_TASK (1 << 7) 171 #define HPSATMF_PHYS_QRY_TSET (1 << 8) 172 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9) 173 #define HPSATMF_MASK_SUPPORTED (1 << 16) 174 #define HPSATMF_LOG_LUN_RESET (1 << 17) 175 #define HPSATMF_LOG_NEX_RESET (1 << 18) 176 #define HPSATMF_LOG_TASK_ABORT (1 << 19) 177 #define HPSATMF_LOG_TSET_ABORT (1 << 20) 178 #define HPSATMF_LOG_CLEAR_ACA (1 << 21) 179 #define HPSATMF_LOG_CLEAR_TSET (1 << 22) 180 #define HPSATMF_LOG_QRY_TASK (1 << 23) 181 #define HPSATMF_LOG_QRY_TSET (1 << 24) 182 #define HPSATMF_LOG_QRY_ASYNC (1 << 25) 183 u32 events; 184 int acciopath_status; 185 int drv_req_rescan; /* flag for driver to request rescan event */ 186 }; 187 #define HPSA_ABORT_MSG 0 188 #define HPSA_DEVICE_RESET_MSG 1 189 #define HPSA_RESET_TYPE_CONTROLLER 0x00 190 #define HPSA_RESET_TYPE_BUS 0x01 191 #define HPSA_RESET_TYPE_TARGET 0x03 192 #define HPSA_RESET_TYPE_LUN 0x04 193 #define HPSA_MSG_SEND_RETRY_LIMIT 10 194 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000) 195 196 /* Maximum time in seconds driver will wait for command completions 197 * when polling before giving up. 198 */ 199 #define HPSA_MAX_POLL_TIME_SECS (20) 200 201 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines 202 * how many times to retry TEST UNIT READY on a device 203 * while waiting for it to become ready before giving up. 204 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval 205 * between sending TURs while waiting for a device 206 * to become ready. 207 */ 208 #define HPSA_TUR_RETRY_LIMIT (20) 209 #define HPSA_MAX_WAIT_INTERVAL_SECS (30) 210 211 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board 212 * to become ready, in seconds, before giving up on it. 213 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait 214 * between polling the board to see if it is ready, in 215 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and 216 * HPSA_BOARD_READY_ITERATIONS are derived from those. 217 */ 218 #define HPSA_BOARD_READY_WAIT_SECS (120) 219 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100) 220 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) 221 #define HPSA_BOARD_READY_POLL_INTERVAL \ 222 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) 223 #define HPSA_BOARD_READY_ITERATIONS \ 224 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ 225 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 226 #define HPSA_BOARD_NOT_READY_ITERATIONS \ 227 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ 228 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 229 #define HPSA_POST_RESET_PAUSE_MSECS (3000) 230 #define HPSA_POST_RESET_NOOP_RETRIES (12) 231 232 /* Defining the diffent access_menthods */ 233 /* 234 * Memory mapped FIFO interface (SMART 53xx cards) 235 */ 236 #define SA5_DOORBELL 0x20 237 #define SA5_REQUEST_PORT_OFFSET 0x40 238 #define SA5_REPLY_INTR_MASK_OFFSET 0x34 239 #define SA5_REPLY_PORT_OFFSET 0x44 240 #define SA5_INTR_STATUS 0x30 241 #define SA5_SCRATCHPAD_OFFSET 0xB0 242 243 #define SA5_CTCFG_OFFSET 0xB4 244 #define SA5_CTMEM_OFFSET 0xB8 245 246 #define SA5_INTR_OFF 0x08 247 #define SA5B_INTR_OFF 0x04 248 #define SA5_INTR_PENDING 0x08 249 #define SA5B_INTR_PENDING 0x04 250 #define FIFO_EMPTY 0xffffffff 251 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 252 253 #define HPSA_ERROR_BIT 0x02 254 255 /* Performant mode flags */ 256 #define SA5_PERF_INTR_PENDING 0x04 257 #define SA5_PERF_INTR_OFF 0x05 258 #define SA5_OUTDB_STATUS_PERF_BIT 0x01 259 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 260 #define SA5_OUTDB_CLEAR 0xA0 261 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 262 #define SA5_OUTDB_STATUS 0x9C 263 264 265 #define HPSA_INTR_ON 1 266 #define HPSA_INTR_OFF 0 267 268 /* 269 * Inbound Post Queue offsets for IO Accelerator Mode 2 270 */ 271 #define IOACCEL2_INBOUND_POSTQ_32 0x48 272 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0 273 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4 274 275 /* 276 Send the command to the hardware 277 */ 278 static void SA5_submit_command(struct ctlr_info *h, 279 struct CommandList *c) 280 { 281 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 282 c->Header.Tag.lower); 283 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 284 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 285 } 286 287 static void SA5_submit_command_ioaccel2(struct ctlr_info *h, 288 struct CommandList *c) 289 { 290 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 291 c->Header.Tag.lower); 292 if (c->cmd_type == CMD_IOACCEL2) 293 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 294 else 295 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 296 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 297 } 298 299 /* 300 * This card is the opposite of the other cards. 301 * 0 turns interrupts on... 302 * 0x08 turns them off... 303 */ 304 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) 305 { 306 if (val) { /* Turn interrupts on */ 307 h->interrupts_enabled = 1; 308 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 309 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 310 } else { /* Turn them off */ 311 h->interrupts_enabled = 0; 312 writel(SA5_INTR_OFF, 313 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 314 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 315 } 316 } 317 318 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) 319 { 320 if (val) { /* turn on interrupts */ 321 h->interrupts_enabled = 1; 322 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 323 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 324 } else { 325 h->interrupts_enabled = 0; 326 writel(SA5_PERF_INTR_OFF, 327 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 328 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 329 } 330 } 331 332 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 333 { 334 struct reply_pool *rq = &h->reply_queue[q]; 335 unsigned long flags, register_value = FIFO_EMPTY; 336 337 /* msi auto clears the interrupt pending bit. */ 338 if (!(h->msi_vector || h->msix_vector)) { 339 /* flush the controller write of the reply queue by reading 340 * outbound doorbell status register. 341 */ 342 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 343 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 344 /* Do a read in order to flush the write to the controller 345 * (as per spec.) 346 */ 347 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 348 } 349 350 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 351 register_value = rq->head[rq->current_entry]; 352 rq->current_entry++; 353 spin_lock_irqsave(&h->lock, flags); 354 h->commands_outstanding--; 355 spin_unlock_irqrestore(&h->lock, flags); 356 } else { 357 register_value = FIFO_EMPTY; 358 } 359 /* Check for wraparound */ 360 if (rq->current_entry == h->max_commands) { 361 rq->current_entry = 0; 362 rq->wraparound ^= 1; 363 } 364 return register_value; 365 } 366 367 /* 368 * Returns true if fifo is full. 369 * 370 */ 371 static unsigned long SA5_fifo_full(struct ctlr_info *h) 372 { 373 if (h->commands_outstanding >= h->max_commands) 374 return 1; 375 else 376 return 0; 377 378 } 379 /* 380 * returns value read from hardware. 381 * returns FIFO_EMPTY if there is nothing to read 382 */ 383 static unsigned long SA5_completed(struct ctlr_info *h, 384 __attribute__((unused)) u8 q) 385 { 386 unsigned long register_value 387 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); 388 unsigned long flags; 389 390 if (register_value != FIFO_EMPTY) { 391 spin_lock_irqsave(&h->lock, flags); 392 h->commands_outstanding--; 393 spin_unlock_irqrestore(&h->lock, flags); 394 } 395 396 #ifdef HPSA_DEBUG 397 if (register_value != FIFO_EMPTY) 398 dev_dbg(&h->pdev->dev, "Read %lx back from board\n", 399 register_value); 400 else 401 dev_dbg(&h->pdev->dev, "FIFO Empty read\n"); 402 #endif 403 404 return register_value; 405 } 406 /* 407 * Returns true if an interrupt is pending.. 408 */ 409 static bool SA5_intr_pending(struct ctlr_info *h) 410 { 411 unsigned long register_value = 412 readl(h->vaddr + SA5_INTR_STATUS); 413 dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value); 414 return register_value & SA5_INTR_PENDING; 415 } 416 417 static bool SA5_performant_intr_pending(struct ctlr_info *h) 418 { 419 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 420 421 if (!register_value) 422 return false; 423 424 if (h->msi_vector || h->msix_vector) 425 return true; 426 427 /* Read outbound doorbell to flush */ 428 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 429 return register_value & SA5_OUTDB_STATUS_PERF_BIT; 430 } 431 432 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100 433 434 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) 435 { 436 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 437 438 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? 439 true : false; 440 } 441 442 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 443 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 444 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC 445 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL 446 447 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) 448 { 449 u64 register_value; 450 struct reply_pool *rq = &h->reply_queue[q]; 451 unsigned long flags; 452 453 BUG_ON(q >= h->nreply_queues); 454 455 register_value = rq->head[rq->current_entry]; 456 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { 457 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; 458 if (++rq->current_entry == rq->size) 459 rq->current_entry = 0; 460 /* 461 * @todo 462 * 463 * Don't really need to write the new index after each command, 464 * but with current driver design this is easiest. 465 */ 466 wmb(); 467 writel((q << 24) | rq->current_entry, h->vaddr + 468 IOACCEL_MODE1_CONSUMER_INDEX); 469 spin_lock_irqsave(&h->lock, flags); 470 h->commands_outstanding--; 471 spin_unlock_irqrestore(&h->lock, flags); 472 } 473 return (unsigned long) register_value; 474 } 475 476 static struct access_method SA5_access = { 477 SA5_submit_command, 478 SA5_intr_mask, 479 SA5_fifo_full, 480 SA5_intr_pending, 481 SA5_completed, 482 }; 483 484 static struct access_method SA5_ioaccel_mode1_access = { 485 SA5_submit_command, 486 SA5_performant_intr_mask, 487 SA5_fifo_full, 488 SA5_ioaccel_mode1_intr_pending, 489 SA5_ioaccel_mode1_completed, 490 }; 491 492 static struct access_method SA5_ioaccel_mode2_access = { 493 SA5_submit_command_ioaccel2, 494 SA5_performant_intr_mask, 495 SA5_fifo_full, 496 SA5_performant_intr_pending, 497 SA5_performant_completed, 498 }; 499 500 static struct access_method SA5_performant_access = { 501 SA5_submit_command, 502 SA5_performant_intr_mask, 503 SA5_fifo_full, 504 SA5_performant_intr_pending, 505 SA5_performant_completed, 506 }; 507 508 struct board_type { 509 u32 board_id; 510 char *product_name; 511 struct access_method *access; 512 }; 513 514 #endif /* HPSA_H */ 515 516