1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 #ifndef HPSA_H 22 #define HPSA_H 23 24 #include <scsi/scsicam.h> 25 26 #define IO_OK 0 27 #define IO_ERROR 1 28 29 struct ctlr_info; 30 31 struct access_method { 32 void (*submit_command)(struct ctlr_info *h, 33 struct CommandList *c); 34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); 35 unsigned long (*fifo_full)(struct ctlr_info *h); 36 bool (*intr_pending)(struct ctlr_info *h); 37 unsigned long (*command_completed)(struct ctlr_info *h, u8 q); 38 }; 39 40 struct hpsa_scsi_dev_t { 41 int devtype; 42 int bus, target, lun; /* as presented to the OS */ 43 unsigned char scsi3addr[8]; /* as presented to the HW */ 44 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" 45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */ 46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 47 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 48 unsigned char raid_level; /* from inquiry page 0xC1 */ 49 u32 ioaccel_handle; 50 int offload_config; /* I/O accel RAID offload configured */ 51 int offload_enabled; /* I/O accel RAID offload enabled */ 52 int offload_to_mirror; /* Send next I/O accelerator RAID 53 * offload request to mirror drive 54 */ 55 struct raid_map_data raid_map; /* I/O accelerator RAID map */ 56 57 }; 58 59 struct reply_pool { 60 u64 *head; 61 size_t size; 62 u8 wraparound; 63 u32 current_entry; 64 }; 65 66 struct ctlr_info { 67 int ctlr; 68 char devname[8]; 69 char *product_name; 70 struct pci_dev *pdev; 71 u32 board_id; 72 void __iomem *vaddr; 73 unsigned long paddr; 74 int nr_cmds; /* Number of commands allowed on this controller */ 75 struct CfgTable __iomem *cfgtable; 76 int interrupts_enabled; 77 int major; 78 int max_commands; 79 int commands_outstanding; 80 int max_outstanding; /* Debug */ 81 int usage_count; /* number of opens all all minor devices */ 82 # define PERF_MODE_INT 0 83 # define DOORBELL_INT 1 84 # define SIMPLE_MODE_INT 2 85 # define MEMQ_MODE_INT 3 86 unsigned int intr[MAX_REPLY_QUEUES]; 87 unsigned int msix_vector; 88 unsigned int msi_vector; 89 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ 90 struct access_method access; 91 92 /* queue and queue Info */ 93 struct list_head reqQ; 94 struct list_head cmpQ; 95 unsigned int Qdepth; 96 unsigned int maxSG; 97 spinlock_t lock; 98 int maxsgentries; 99 u8 max_cmd_sg_entries; 100 int chainsize; 101 struct SGDescriptor **cmd_sg_list; 102 103 /* pointers to command and error info pool */ 104 struct CommandList *cmd_pool; 105 dma_addr_t cmd_pool_dhandle; 106 struct io_accel1_cmd *ioaccel_cmd_pool; 107 dma_addr_t ioaccel_cmd_pool_dhandle; 108 struct ErrorInfo *errinfo_pool; 109 dma_addr_t errinfo_pool_dhandle; 110 unsigned long *cmd_pool_bits; 111 int scan_finished; 112 spinlock_t scan_lock; 113 wait_queue_head_t scan_wait_queue; 114 115 struct Scsi_Host *scsi_host; 116 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ 117 int ndevices; /* number of used elements in .dev[] array. */ 118 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES]; 119 /* 120 * Performant mode tables. 121 */ 122 u32 trans_support; 123 u32 trans_offset; 124 struct TransTable_struct *transtable; 125 unsigned long transMethod; 126 127 /* cap concurrent passthrus at some reasonable maximum */ 128 #define HPSA_MAX_CONCURRENT_PASSTHRUS (20) 129 spinlock_t passthru_count_lock; /* protects passthru_count */ 130 int passthru_count; 131 132 /* 133 * Performant mode completion buffers 134 */ 135 u64 *reply_pool; 136 size_t reply_pool_size; 137 struct reply_pool reply_queue[MAX_REPLY_QUEUES]; 138 u8 nreply_queues; 139 dma_addr_t reply_pool_dhandle; 140 u32 *blockFetchTable; 141 u32 *ioaccel1_blockFetchTable; 142 unsigned char *hba_inquiry_data; 143 u32 driver_support; 144 u32 fw_support; 145 int ioaccel_support; 146 int ioaccel_maxsg; 147 u64 last_intr_timestamp; 148 u32 last_heartbeat; 149 u64 last_heartbeat_timestamp; 150 u32 heartbeat_sample_interval; 151 atomic_t firmware_flash_in_progress; 152 u32 lockup_detected; 153 struct delayed_work monitor_ctlr_work; 154 int remove_in_progress; 155 u32 fifo_recently_full; 156 /* Address of h->q[x] is passed to intr handler to know which queue */ 157 u8 q[MAX_REPLY_QUEUES]; 158 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ 159 #define HPSATMF_BITS_SUPPORTED (1 << 0) 160 #define HPSATMF_PHYS_LUN_RESET (1 << 1) 161 #define HPSATMF_PHYS_NEX_RESET (1 << 2) 162 #define HPSATMF_PHYS_TASK_ABORT (1 << 3) 163 #define HPSATMF_PHYS_TSET_ABORT (1 << 4) 164 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5) 165 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6) 166 #define HPSATMF_PHYS_QRY_TASK (1 << 7) 167 #define HPSATMF_PHYS_QRY_TSET (1 << 8) 168 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9) 169 #define HPSATMF_MASK_SUPPORTED (1 << 16) 170 #define HPSATMF_LOG_LUN_RESET (1 << 17) 171 #define HPSATMF_LOG_NEX_RESET (1 << 18) 172 #define HPSATMF_LOG_TASK_ABORT (1 << 19) 173 #define HPSATMF_LOG_TSET_ABORT (1 << 20) 174 #define HPSATMF_LOG_CLEAR_ACA (1 << 21) 175 #define HPSATMF_LOG_CLEAR_TSET (1 << 22) 176 #define HPSATMF_LOG_QRY_TASK (1 << 23) 177 #define HPSATMF_LOG_QRY_TSET (1 << 24) 178 #define HPSATMF_LOG_QRY_ASYNC (1 << 25) 179 u32 events; 180 }; 181 #define HPSA_ABORT_MSG 0 182 #define HPSA_DEVICE_RESET_MSG 1 183 #define HPSA_RESET_TYPE_CONTROLLER 0x00 184 #define HPSA_RESET_TYPE_BUS 0x01 185 #define HPSA_RESET_TYPE_TARGET 0x03 186 #define HPSA_RESET_TYPE_LUN 0x04 187 #define HPSA_MSG_SEND_RETRY_LIMIT 10 188 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000) 189 190 /* Maximum time in seconds driver will wait for command completions 191 * when polling before giving up. 192 */ 193 #define HPSA_MAX_POLL_TIME_SECS (20) 194 195 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines 196 * how many times to retry TEST UNIT READY on a device 197 * while waiting for it to become ready before giving up. 198 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval 199 * between sending TURs while waiting for a device 200 * to become ready. 201 */ 202 #define HPSA_TUR_RETRY_LIMIT (20) 203 #define HPSA_MAX_WAIT_INTERVAL_SECS (30) 204 205 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board 206 * to become ready, in seconds, before giving up on it. 207 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait 208 * between polling the board to see if it is ready, in 209 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and 210 * HPSA_BOARD_READY_ITERATIONS are derived from those. 211 */ 212 #define HPSA_BOARD_READY_WAIT_SECS (120) 213 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100) 214 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) 215 #define HPSA_BOARD_READY_POLL_INTERVAL \ 216 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) 217 #define HPSA_BOARD_READY_ITERATIONS \ 218 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ 219 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 220 #define HPSA_BOARD_NOT_READY_ITERATIONS \ 221 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ 222 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 223 #define HPSA_POST_RESET_PAUSE_MSECS (3000) 224 #define HPSA_POST_RESET_NOOP_RETRIES (12) 225 226 /* Defining the diffent access_menthods */ 227 /* 228 * Memory mapped FIFO interface (SMART 53xx cards) 229 */ 230 #define SA5_DOORBELL 0x20 231 #define SA5_REQUEST_PORT_OFFSET 0x40 232 #define SA5_REPLY_INTR_MASK_OFFSET 0x34 233 #define SA5_REPLY_PORT_OFFSET 0x44 234 #define SA5_INTR_STATUS 0x30 235 #define SA5_SCRATCHPAD_OFFSET 0xB0 236 237 #define SA5_CTCFG_OFFSET 0xB4 238 #define SA5_CTMEM_OFFSET 0xB8 239 240 #define SA5_INTR_OFF 0x08 241 #define SA5B_INTR_OFF 0x04 242 #define SA5_INTR_PENDING 0x08 243 #define SA5B_INTR_PENDING 0x04 244 #define FIFO_EMPTY 0xffffffff 245 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 246 247 #define HPSA_ERROR_BIT 0x02 248 249 /* Performant mode flags */ 250 #define SA5_PERF_INTR_PENDING 0x04 251 #define SA5_PERF_INTR_OFF 0x05 252 #define SA5_OUTDB_STATUS_PERF_BIT 0x01 253 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 254 #define SA5_OUTDB_CLEAR 0xA0 255 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 256 #define SA5_OUTDB_STATUS 0x9C 257 258 259 #define HPSA_INTR_ON 1 260 #define HPSA_INTR_OFF 0 261 /* 262 Send the command to the hardware 263 */ 264 static void SA5_submit_command(struct ctlr_info *h, 265 struct CommandList *c) 266 { 267 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 268 c->Header.Tag.lower); 269 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 270 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 271 } 272 273 /* 274 * This card is the opposite of the other cards. 275 * 0 turns interrupts on... 276 * 0x08 turns them off... 277 */ 278 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) 279 { 280 if (val) { /* Turn interrupts on */ 281 h->interrupts_enabled = 1; 282 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 283 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 284 } else { /* Turn them off */ 285 h->interrupts_enabled = 0; 286 writel(SA5_INTR_OFF, 287 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 288 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 289 } 290 } 291 292 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) 293 { 294 if (val) { /* turn on interrupts */ 295 h->interrupts_enabled = 1; 296 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 297 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 298 } else { 299 h->interrupts_enabled = 0; 300 writel(SA5_PERF_INTR_OFF, 301 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 302 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 303 } 304 } 305 306 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 307 { 308 struct reply_pool *rq = &h->reply_queue[q]; 309 unsigned long flags, register_value = FIFO_EMPTY; 310 311 /* msi auto clears the interrupt pending bit. */ 312 if (!(h->msi_vector || h->msix_vector)) { 313 /* flush the controller write of the reply queue by reading 314 * outbound doorbell status register. 315 */ 316 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 317 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 318 /* Do a read in order to flush the write to the controller 319 * (as per spec.) 320 */ 321 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 322 } 323 324 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 325 register_value = rq->head[rq->current_entry]; 326 rq->current_entry++; 327 spin_lock_irqsave(&h->lock, flags); 328 h->commands_outstanding--; 329 spin_unlock_irqrestore(&h->lock, flags); 330 } else { 331 register_value = FIFO_EMPTY; 332 } 333 /* Check for wraparound */ 334 if (rq->current_entry == h->max_commands) { 335 rq->current_entry = 0; 336 rq->wraparound ^= 1; 337 } 338 return register_value; 339 } 340 341 /* 342 * Returns true if fifo is full. 343 * 344 */ 345 static unsigned long SA5_fifo_full(struct ctlr_info *h) 346 { 347 if (h->commands_outstanding >= h->max_commands) 348 return 1; 349 else 350 return 0; 351 352 } 353 /* 354 * returns value read from hardware. 355 * returns FIFO_EMPTY if there is nothing to read 356 */ 357 static unsigned long SA5_completed(struct ctlr_info *h, 358 __attribute__((unused)) u8 q) 359 { 360 unsigned long register_value 361 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); 362 unsigned long flags; 363 364 if (register_value != FIFO_EMPTY) { 365 spin_lock_irqsave(&h->lock, flags); 366 h->commands_outstanding--; 367 spin_unlock_irqrestore(&h->lock, flags); 368 } 369 370 #ifdef HPSA_DEBUG 371 if (register_value != FIFO_EMPTY) 372 dev_dbg(&h->pdev->dev, "Read %lx back from board\n", 373 register_value); 374 else 375 dev_dbg(&h->pdev->dev, "FIFO Empty read\n"); 376 #endif 377 378 return register_value; 379 } 380 /* 381 * Returns true if an interrupt is pending.. 382 */ 383 static bool SA5_intr_pending(struct ctlr_info *h) 384 { 385 unsigned long register_value = 386 readl(h->vaddr + SA5_INTR_STATUS); 387 dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value); 388 return register_value & SA5_INTR_PENDING; 389 } 390 391 static bool SA5_performant_intr_pending(struct ctlr_info *h) 392 { 393 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 394 395 if (!register_value) 396 return false; 397 398 if (h->msi_vector || h->msix_vector) 399 return true; 400 401 /* Read outbound doorbell to flush */ 402 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 403 return register_value & SA5_OUTDB_STATUS_PERF_BIT; 404 } 405 406 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100 407 408 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) 409 { 410 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 411 412 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? 413 true : false; 414 } 415 416 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 417 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 418 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC 419 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL 420 421 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) 422 { 423 u64 register_value; 424 struct reply_pool *rq = &h->reply_queue[q]; 425 unsigned long flags; 426 427 BUG_ON(q >= h->nreply_queues); 428 429 register_value = rq->head[rq->current_entry]; 430 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { 431 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; 432 if (++rq->current_entry == rq->size) 433 rq->current_entry = 0; 434 /* 435 * @todo 436 * 437 * Don't really need to write the new index after each command, 438 * but with current driver design this is easiest. 439 */ 440 wmb(); 441 writel((q << 24) | rq->current_entry, h->vaddr + 442 IOACCEL_MODE1_CONSUMER_INDEX); 443 spin_lock_irqsave(&h->lock, flags); 444 h->commands_outstanding--; 445 spin_unlock_irqrestore(&h->lock, flags); 446 } 447 return (unsigned long) register_value; 448 } 449 450 static struct access_method SA5_access = { 451 SA5_submit_command, 452 SA5_intr_mask, 453 SA5_fifo_full, 454 SA5_intr_pending, 455 SA5_completed, 456 }; 457 458 static struct access_method SA5_ioaccel_mode1_access = { 459 SA5_submit_command, 460 SA5_performant_intr_mask, 461 SA5_fifo_full, 462 SA5_ioaccel_mode1_intr_pending, 463 SA5_ioaccel_mode1_completed, 464 }; 465 466 static struct access_method SA5_performant_access = { 467 SA5_submit_command, 468 SA5_performant_intr_mask, 469 SA5_fifo_full, 470 SA5_performant_intr_pending, 471 SA5_performant_completed, 472 }; 473 474 struct board_type { 475 u32 board_id; 476 char *product_name; 477 struct access_method *access; 478 }; 479 480 #endif /* HPSA_H */ 481 482