1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * 18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19 * 20 */ 21 #ifndef HPSA_H 22 #define HPSA_H 23 24 #include <scsi/scsicam.h> 25 26 #define IO_OK 0 27 #define IO_ERROR 1 28 29 struct ctlr_info; 30 31 struct access_method { 32 void (*submit_command)(struct ctlr_info *h, 33 struct CommandList *c); 34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); 35 unsigned long (*fifo_full)(struct ctlr_info *h); 36 bool (*intr_pending)(struct ctlr_info *h); 37 unsigned long (*command_completed)(struct ctlr_info *h, u8 q); 38 }; 39 40 struct hpsa_scsi_dev_t { 41 int devtype; 42 int bus, target, lun; /* as presented to the OS */ 43 unsigned char scsi3addr[8]; /* as presented to the HW */ 44 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" 45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */ 46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 47 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 48 unsigned char raid_level; /* from inquiry page 0xC1 */ 49 unsigned char volume_offline; /* discovered via TUR or VPD */ 50 u32 ioaccel_handle; 51 int offload_config; /* I/O accel RAID offload configured */ 52 int offload_enabled; /* I/O accel RAID offload enabled */ 53 int offload_to_mirror; /* Send next I/O accelerator RAID 54 * offload request to mirror drive 55 */ 56 struct raid_map_data raid_map; /* I/O accelerator RAID map */ 57 58 }; 59 60 struct reply_pool { 61 u64 *head; 62 size_t size; 63 u8 wraparound; 64 u32 current_entry; 65 }; 66 67 #pragma pack(1) 68 struct bmic_controller_parameters { 69 u8 led_flags; 70 u8 enable_command_list_verification; 71 u8 backed_out_write_drives; 72 u16 stripes_for_parity; 73 u8 parity_distribution_mode_flags; 74 u16 max_driver_requests; 75 u16 elevator_trend_count; 76 u8 disable_elevator; 77 u8 force_scan_complete; 78 u8 scsi_transfer_mode; 79 u8 force_narrow; 80 u8 rebuild_priority; 81 u8 expand_priority; 82 u8 host_sdb_asic_fix; 83 u8 pdpi_burst_from_host_disabled; 84 char software_name[64]; 85 char hardware_name[32]; 86 u8 bridge_revision; 87 u8 snapshot_priority; 88 u32 os_specific; 89 u8 post_prompt_timeout; 90 u8 automatic_drive_slamming; 91 u8 reserved1; 92 u8 nvram_flags; 93 u8 cache_nvram_flags; 94 u8 drive_config_flags; 95 u16 reserved2; 96 u8 temp_warning_level; 97 u8 temp_shutdown_level; 98 u8 temp_condition_reset; 99 u8 max_coalesce_commands; 100 u32 max_coalesce_delay; 101 u8 orca_password[4]; 102 u8 access_id[16]; 103 u8 reserved[356]; 104 }; 105 #pragma pack() 106 107 struct ctlr_info { 108 int ctlr; 109 char devname[8]; 110 char *product_name; 111 struct pci_dev *pdev; 112 u32 board_id; 113 void __iomem *vaddr; 114 unsigned long paddr; 115 int nr_cmds; /* Number of commands allowed on this controller */ 116 struct CfgTable __iomem *cfgtable; 117 int interrupts_enabled; 118 int major; 119 int max_commands; 120 int commands_outstanding; 121 int max_outstanding; /* Debug */ 122 int usage_count; /* number of opens all all minor devices */ 123 # define PERF_MODE_INT 0 124 # define DOORBELL_INT 1 125 # define SIMPLE_MODE_INT 2 126 # define MEMQ_MODE_INT 3 127 unsigned int intr[MAX_REPLY_QUEUES]; 128 unsigned int msix_vector; 129 unsigned int msi_vector; 130 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ 131 struct access_method access; 132 char hba_mode_enabled; 133 134 /* queue and queue Info */ 135 struct list_head reqQ; 136 struct list_head cmpQ; 137 unsigned int Qdepth; 138 unsigned int maxSG; 139 spinlock_t lock; 140 int maxsgentries; 141 u8 max_cmd_sg_entries; 142 int chainsize; 143 struct SGDescriptor **cmd_sg_list; 144 145 /* pointers to command and error info pool */ 146 struct CommandList *cmd_pool; 147 dma_addr_t cmd_pool_dhandle; 148 struct io_accel1_cmd *ioaccel_cmd_pool; 149 dma_addr_t ioaccel_cmd_pool_dhandle; 150 struct io_accel2_cmd *ioaccel2_cmd_pool; 151 dma_addr_t ioaccel2_cmd_pool_dhandle; 152 struct ErrorInfo *errinfo_pool; 153 dma_addr_t errinfo_pool_dhandle; 154 unsigned long *cmd_pool_bits; 155 int scan_finished; 156 spinlock_t scan_lock; 157 wait_queue_head_t scan_wait_queue; 158 159 struct Scsi_Host *scsi_host; 160 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ 161 int ndevices; /* number of used elements in .dev[] array. */ 162 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES]; 163 /* 164 * Performant mode tables. 165 */ 166 u32 trans_support; 167 u32 trans_offset; 168 struct TransTable_struct *transtable; 169 unsigned long transMethod; 170 171 /* cap concurrent passthrus at some reasonable maximum */ 172 #define HPSA_MAX_CONCURRENT_PASSTHRUS (20) 173 spinlock_t passthru_count_lock; /* protects passthru_count */ 174 int passthru_count; 175 176 /* 177 * Performant mode completion buffers 178 */ 179 u64 *reply_pool; 180 size_t reply_pool_size; 181 struct reply_pool reply_queue[MAX_REPLY_QUEUES]; 182 u8 nreply_queues; 183 dma_addr_t reply_pool_dhandle; 184 u32 *blockFetchTable; 185 u32 *ioaccel1_blockFetchTable; 186 u32 *ioaccel2_blockFetchTable; 187 u32 *ioaccel2_bft2_regs; 188 unsigned char *hba_inquiry_data; 189 u32 driver_support; 190 u32 fw_support; 191 int ioaccel_support; 192 int ioaccel_maxsg; 193 u64 last_intr_timestamp; 194 u32 last_heartbeat; 195 u64 last_heartbeat_timestamp; 196 u32 heartbeat_sample_interval; 197 atomic_t firmware_flash_in_progress; 198 u32 lockup_detected; 199 struct delayed_work monitor_ctlr_work; 200 int remove_in_progress; 201 u32 fifo_recently_full; 202 /* Address of h->q[x] is passed to intr handler to know which queue */ 203 u8 q[MAX_REPLY_QUEUES]; 204 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ 205 #define HPSATMF_BITS_SUPPORTED (1 << 0) 206 #define HPSATMF_PHYS_LUN_RESET (1 << 1) 207 #define HPSATMF_PHYS_NEX_RESET (1 << 2) 208 #define HPSATMF_PHYS_TASK_ABORT (1 << 3) 209 #define HPSATMF_PHYS_TSET_ABORT (1 << 4) 210 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5) 211 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6) 212 #define HPSATMF_PHYS_QRY_TASK (1 << 7) 213 #define HPSATMF_PHYS_QRY_TSET (1 << 8) 214 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9) 215 #define HPSATMF_MASK_SUPPORTED (1 << 16) 216 #define HPSATMF_LOG_LUN_RESET (1 << 17) 217 #define HPSATMF_LOG_NEX_RESET (1 << 18) 218 #define HPSATMF_LOG_TASK_ABORT (1 << 19) 219 #define HPSATMF_LOG_TSET_ABORT (1 << 20) 220 #define HPSATMF_LOG_CLEAR_ACA (1 << 21) 221 #define HPSATMF_LOG_CLEAR_TSET (1 << 22) 222 #define HPSATMF_LOG_QRY_TASK (1 << 23) 223 #define HPSATMF_LOG_QRY_TSET (1 << 24) 224 #define HPSATMF_LOG_QRY_ASYNC (1 << 25) 225 u32 events; 226 #define CTLR_STATE_CHANGE_EVENT (1 << 0) 227 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1) 228 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4) 229 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5) 230 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6) 231 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30) 232 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) 233 234 #define RESCAN_REQUIRED_EVENT_BITS \ 235 (CTLR_STATE_CHANGE_EVENT | \ 236 CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ 237 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ 238 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ 239 CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \ 240 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ 241 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) 242 spinlock_t offline_device_lock; 243 struct list_head offline_device_list; 244 int acciopath_status; 245 int drv_req_rescan; /* flag for driver to request rescan event */ 246 int raid_offload_debug; 247 }; 248 249 struct offline_device_entry { 250 unsigned char scsi3addr[8]; 251 struct list_head offline_list; 252 }; 253 254 #define HPSA_ABORT_MSG 0 255 #define HPSA_DEVICE_RESET_MSG 1 256 #define HPSA_RESET_TYPE_CONTROLLER 0x00 257 #define HPSA_RESET_TYPE_BUS 0x01 258 #define HPSA_RESET_TYPE_TARGET 0x03 259 #define HPSA_RESET_TYPE_LUN 0x04 260 #define HPSA_MSG_SEND_RETRY_LIMIT 10 261 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000) 262 263 /* Maximum time in seconds driver will wait for command completions 264 * when polling before giving up. 265 */ 266 #define HPSA_MAX_POLL_TIME_SECS (20) 267 268 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines 269 * how many times to retry TEST UNIT READY on a device 270 * while waiting for it to become ready before giving up. 271 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval 272 * between sending TURs while waiting for a device 273 * to become ready. 274 */ 275 #define HPSA_TUR_RETRY_LIMIT (20) 276 #define HPSA_MAX_WAIT_INTERVAL_SECS (30) 277 278 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board 279 * to become ready, in seconds, before giving up on it. 280 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait 281 * between polling the board to see if it is ready, in 282 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and 283 * HPSA_BOARD_READY_ITERATIONS are derived from those. 284 */ 285 #define HPSA_BOARD_READY_WAIT_SECS (120) 286 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100) 287 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) 288 #define HPSA_BOARD_READY_POLL_INTERVAL \ 289 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) 290 #define HPSA_BOARD_READY_ITERATIONS \ 291 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ 292 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 293 #define HPSA_BOARD_NOT_READY_ITERATIONS \ 294 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ 295 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 296 #define HPSA_POST_RESET_PAUSE_MSECS (3000) 297 #define HPSA_POST_RESET_NOOP_RETRIES (12) 298 299 /* Defining the diffent access_menthods */ 300 /* 301 * Memory mapped FIFO interface (SMART 53xx cards) 302 */ 303 #define SA5_DOORBELL 0x20 304 #define SA5_REQUEST_PORT_OFFSET 0x40 305 #define SA5_REPLY_INTR_MASK_OFFSET 0x34 306 #define SA5_REPLY_PORT_OFFSET 0x44 307 #define SA5_INTR_STATUS 0x30 308 #define SA5_SCRATCHPAD_OFFSET 0xB0 309 310 #define SA5_CTCFG_OFFSET 0xB4 311 #define SA5_CTMEM_OFFSET 0xB8 312 313 #define SA5_INTR_OFF 0x08 314 #define SA5B_INTR_OFF 0x04 315 #define SA5_INTR_PENDING 0x08 316 #define SA5B_INTR_PENDING 0x04 317 #define FIFO_EMPTY 0xffffffff 318 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 319 320 #define HPSA_ERROR_BIT 0x02 321 322 /* Performant mode flags */ 323 #define SA5_PERF_INTR_PENDING 0x04 324 #define SA5_PERF_INTR_OFF 0x05 325 #define SA5_OUTDB_STATUS_PERF_BIT 0x01 326 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 327 #define SA5_OUTDB_CLEAR 0xA0 328 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 329 #define SA5_OUTDB_STATUS 0x9C 330 331 332 #define HPSA_INTR_ON 1 333 #define HPSA_INTR_OFF 0 334 335 /* 336 * Inbound Post Queue offsets for IO Accelerator Mode 2 337 */ 338 #define IOACCEL2_INBOUND_POSTQ_32 0x48 339 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0 340 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4 341 342 /* 343 Send the command to the hardware 344 */ 345 static void SA5_submit_command(struct ctlr_info *h, 346 struct CommandList *c) 347 { 348 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 349 c->Header.Tag.lower); 350 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 351 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 352 } 353 354 static void SA5_submit_command_ioaccel2(struct ctlr_info *h, 355 struct CommandList *c) 356 { 357 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 358 c->Header.Tag.lower); 359 if (c->cmd_type == CMD_IOACCEL2) 360 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 361 else 362 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 363 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 364 } 365 366 /* 367 * This card is the opposite of the other cards. 368 * 0 turns interrupts on... 369 * 0x08 turns them off... 370 */ 371 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) 372 { 373 if (val) { /* Turn interrupts on */ 374 h->interrupts_enabled = 1; 375 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 376 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 377 } else { /* Turn them off */ 378 h->interrupts_enabled = 0; 379 writel(SA5_INTR_OFF, 380 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 381 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 382 } 383 } 384 385 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) 386 { 387 if (val) { /* turn on interrupts */ 388 h->interrupts_enabled = 1; 389 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 390 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 391 } else { 392 h->interrupts_enabled = 0; 393 writel(SA5_PERF_INTR_OFF, 394 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 395 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 396 } 397 } 398 399 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 400 { 401 struct reply_pool *rq = &h->reply_queue[q]; 402 unsigned long flags, register_value = FIFO_EMPTY; 403 404 /* msi auto clears the interrupt pending bit. */ 405 if (!(h->msi_vector || h->msix_vector)) { 406 /* flush the controller write of the reply queue by reading 407 * outbound doorbell status register. 408 */ 409 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 410 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 411 /* Do a read in order to flush the write to the controller 412 * (as per spec.) 413 */ 414 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 415 } 416 417 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 418 register_value = rq->head[rq->current_entry]; 419 rq->current_entry++; 420 spin_lock_irqsave(&h->lock, flags); 421 h->commands_outstanding--; 422 spin_unlock_irqrestore(&h->lock, flags); 423 } else { 424 register_value = FIFO_EMPTY; 425 } 426 /* Check for wraparound */ 427 if (rq->current_entry == h->max_commands) { 428 rq->current_entry = 0; 429 rq->wraparound ^= 1; 430 } 431 return register_value; 432 } 433 434 /* 435 * Returns true if fifo is full. 436 * 437 */ 438 static unsigned long SA5_fifo_full(struct ctlr_info *h) 439 { 440 if (h->commands_outstanding >= h->max_commands) 441 return 1; 442 else 443 return 0; 444 445 } 446 /* 447 * returns value read from hardware. 448 * returns FIFO_EMPTY if there is nothing to read 449 */ 450 static unsigned long SA5_completed(struct ctlr_info *h, 451 __attribute__((unused)) u8 q) 452 { 453 unsigned long register_value 454 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); 455 unsigned long flags; 456 457 if (register_value != FIFO_EMPTY) { 458 spin_lock_irqsave(&h->lock, flags); 459 h->commands_outstanding--; 460 spin_unlock_irqrestore(&h->lock, flags); 461 } 462 463 #ifdef HPSA_DEBUG 464 if (register_value != FIFO_EMPTY) 465 dev_dbg(&h->pdev->dev, "Read %lx back from board\n", 466 register_value); 467 else 468 dev_dbg(&h->pdev->dev, "FIFO Empty read\n"); 469 #endif 470 471 return register_value; 472 } 473 /* 474 * Returns true if an interrupt is pending.. 475 */ 476 static bool SA5_intr_pending(struct ctlr_info *h) 477 { 478 unsigned long register_value = 479 readl(h->vaddr + SA5_INTR_STATUS); 480 dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value); 481 return register_value & SA5_INTR_PENDING; 482 } 483 484 static bool SA5_performant_intr_pending(struct ctlr_info *h) 485 { 486 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 487 488 if (!register_value) 489 return false; 490 491 if (h->msi_vector || h->msix_vector) 492 return true; 493 494 /* Read outbound doorbell to flush */ 495 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 496 return register_value & SA5_OUTDB_STATUS_PERF_BIT; 497 } 498 499 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100 500 501 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) 502 { 503 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 504 505 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? 506 true : false; 507 } 508 509 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 510 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 511 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC 512 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL 513 514 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) 515 { 516 u64 register_value; 517 struct reply_pool *rq = &h->reply_queue[q]; 518 unsigned long flags; 519 520 BUG_ON(q >= h->nreply_queues); 521 522 register_value = rq->head[rq->current_entry]; 523 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { 524 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; 525 if (++rq->current_entry == rq->size) 526 rq->current_entry = 0; 527 /* 528 * @todo 529 * 530 * Don't really need to write the new index after each command, 531 * but with current driver design this is easiest. 532 */ 533 wmb(); 534 writel((q << 24) | rq->current_entry, h->vaddr + 535 IOACCEL_MODE1_CONSUMER_INDEX); 536 spin_lock_irqsave(&h->lock, flags); 537 h->commands_outstanding--; 538 spin_unlock_irqrestore(&h->lock, flags); 539 } 540 return (unsigned long) register_value; 541 } 542 543 static struct access_method SA5_access = { 544 SA5_submit_command, 545 SA5_intr_mask, 546 SA5_fifo_full, 547 SA5_intr_pending, 548 SA5_completed, 549 }; 550 551 static struct access_method SA5_ioaccel_mode1_access = { 552 SA5_submit_command, 553 SA5_performant_intr_mask, 554 SA5_fifo_full, 555 SA5_ioaccel_mode1_intr_pending, 556 SA5_ioaccel_mode1_completed, 557 }; 558 559 static struct access_method SA5_ioaccel_mode2_access = { 560 SA5_submit_command_ioaccel2, 561 SA5_performant_intr_mask, 562 SA5_fifo_full, 563 SA5_performant_intr_pending, 564 SA5_performant_completed, 565 }; 566 567 static struct access_method SA5_performant_access = { 568 SA5_submit_command, 569 SA5_performant_intr_mask, 570 SA5_fifo_full, 571 SA5_performant_intr_pending, 572 SA5_performant_completed, 573 }; 574 575 struct board_type { 576 u32 board_id; 577 char *product_name; 578 struct access_method *access; 579 }; 580 581 #endif /* HPSA_H */ 582 583