1 /* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2016 Microsemi Corporation 4 * Copyright 2014-2015 PMC-Sierra, Inc. 5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; version 2 of the License. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 14 * NON INFRINGEMENT. See the GNU General Public License for more details. 15 * 16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com 17 * 18 */ 19 #ifndef HPSA_H 20 #define HPSA_H 21 22 #include <scsi/scsicam.h> 23 24 #define IO_OK 0 25 #define IO_ERROR 1 26 27 struct ctlr_info; 28 29 struct access_method { 30 void (*submit_command)(struct ctlr_info *h, 31 struct CommandList *c); 32 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); 33 bool (*intr_pending)(struct ctlr_info *h); 34 unsigned long (*command_completed)(struct ctlr_info *h, u8 q); 35 }; 36 37 /* for SAS hosts and SAS expanders */ 38 struct hpsa_sas_node { 39 struct device *parent_dev; 40 struct list_head port_list_head; 41 }; 42 43 struct hpsa_sas_port { 44 struct list_head port_list_entry; 45 u64 sas_address; 46 struct sas_port *port; 47 int next_phy_index; 48 struct list_head phy_list_head; 49 struct hpsa_sas_node *parent_node; 50 struct sas_rphy *rphy; 51 }; 52 53 struct hpsa_sas_phy { 54 struct list_head phy_list_entry; 55 struct sas_phy *phy; 56 struct hpsa_sas_port *parent_port; 57 bool added_to_port; 58 }; 59 60 struct hpsa_scsi_dev_t { 61 unsigned int devtype; 62 int bus, target, lun; /* as presented to the OS */ 63 unsigned char scsi3addr[8]; /* as presented to the HW */ 64 u8 physical_device : 1; 65 u8 expose_device; 66 u8 removed : 1; /* device is marked for death */ 67 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" 68 unsigned char device_id[16]; /* from inquiry pg. 0x83 */ 69 u64 sas_address; 70 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 71 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 72 unsigned char rev; /* byte 2 of inquiry data */ 73 unsigned char raid_level; /* from inquiry page 0xC1 */ 74 unsigned char volume_offline; /* discovered via TUR or VPD */ 75 u16 queue_depth; /* max queue_depth for this device */ 76 atomic_t reset_cmds_out; /* Count of commands to-be affected */ 77 atomic_t ioaccel_cmds_out; /* Only used for physical devices 78 * counts commands sent to physical 79 * device via "ioaccel" path. 80 */ 81 u32 ioaccel_handle; 82 u8 active_path_index; 83 u8 path_map; 84 u8 bay; 85 u8 box[8]; 86 u16 phys_connector[8]; 87 int offload_config; /* I/O accel RAID offload configured */ 88 int offload_enabled; /* I/O accel RAID offload enabled */ 89 int offload_to_be_enabled; 90 int hba_ioaccel_enabled; 91 int offload_to_mirror; /* Send next I/O accelerator RAID 92 * offload request to mirror drive 93 */ 94 struct raid_map_data raid_map; /* I/O accelerator RAID map */ 95 96 /* 97 * Pointers from logical drive map indices to the phys drives that 98 * make those logical drives. Note, multiple logical drives may 99 * share physical drives. You can have for instance 5 physical 100 * drives with 3 logical drives each using those same 5 physical 101 * disks. We need these pointers for counting i/o's out to physical 102 * devices in order to honor physical device queue depth limits. 103 */ 104 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES]; 105 int nphysical_disks; 106 int supports_aborts; 107 struct hpsa_sas_port *sas_port; 108 int external; /* 1-from external array 0-not <0-unknown */ 109 }; 110 111 struct reply_queue_buffer { 112 u64 *head; 113 size_t size; 114 u8 wraparound; 115 u32 current_entry; 116 dma_addr_t busaddr; 117 }; 118 119 #pragma pack(1) 120 struct bmic_controller_parameters { 121 u8 led_flags; 122 u8 enable_command_list_verification; 123 u8 backed_out_write_drives; 124 u16 stripes_for_parity; 125 u8 parity_distribution_mode_flags; 126 u16 max_driver_requests; 127 u16 elevator_trend_count; 128 u8 disable_elevator; 129 u8 force_scan_complete; 130 u8 scsi_transfer_mode; 131 u8 force_narrow; 132 u8 rebuild_priority; 133 u8 expand_priority; 134 u8 host_sdb_asic_fix; 135 u8 pdpi_burst_from_host_disabled; 136 char software_name[64]; 137 char hardware_name[32]; 138 u8 bridge_revision; 139 u8 snapshot_priority; 140 u32 os_specific; 141 u8 post_prompt_timeout; 142 u8 automatic_drive_slamming; 143 u8 reserved1; 144 u8 nvram_flags; 145 u8 cache_nvram_flags; 146 u8 drive_config_flags; 147 u16 reserved2; 148 u8 temp_warning_level; 149 u8 temp_shutdown_level; 150 u8 temp_condition_reset; 151 u8 max_coalesce_commands; 152 u32 max_coalesce_delay; 153 u8 orca_password[4]; 154 u8 access_id[16]; 155 u8 reserved[356]; 156 }; 157 #pragma pack() 158 159 struct ctlr_info { 160 int ctlr; 161 char devname[8]; 162 char *product_name; 163 struct pci_dev *pdev; 164 u32 board_id; 165 u64 sas_address; 166 void __iomem *vaddr; 167 unsigned long paddr; 168 int nr_cmds; /* Number of commands allowed on this controller */ 169 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2 170 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1 171 struct CfgTable __iomem *cfgtable; 172 int interrupts_enabled; 173 int max_commands; 174 atomic_t commands_outstanding; 175 # define PERF_MODE_INT 0 176 # define DOORBELL_INT 1 177 # define SIMPLE_MODE_INT 2 178 # define MEMQ_MODE_INT 3 179 unsigned int intr[MAX_REPLY_QUEUES]; 180 unsigned int msix_vector; 181 unsigned int msi_vector; 182 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ 183 struct access_method access; 184 185 /* queue and queue Info */ 186 unsigned int Qdepth; 187 unsigned int maxSG; 188 spinlock_t lock; 189 int maxsgentries; 190 u8 max_cmd_sg_entries; 191 int chainsize; 192 struct SGDescriptor **cmd_sg_list; 193 struct ioaccel2_sg_element **ioaccel2_cmd_sg_list; 194 195 /* pointers to command and error info pool */ 196 struct CommandList *cmd_pool; 197 dma_addr_t cmd_pool_dhandle; 198 struct io_accel1_cmd *ioaccel_cmd_pool; 199 dma_addr_t ioaccel_cmd_pool_dhandle; 200 struct io_accel2_cmd *ioaccel2_cmd_pool; 201 dma_addr_t ioaccel2_cmd_pool_dhandle; 202 struct ErrorInfo *errinfo_pool; 203 dma_addr_t errinfo_pool_dhandle; 204 unsigned long *cmd_pool_bits; 205 int scan_finished; 206 spinlock_t scan_lock; 207 wait_queue_head_t scan_wait_queue; 208 209 struct Scsi_Host *scsi_host; 210 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ 211 int ndevices; /* number of used elements in .dev[] array. */ 212 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES]; 213 /* 214 * Performant mode tables. 215 */ 216 u32 trans_support; 217 u32 trans_offset; 218 struct TransTable_struct __iomem *transtable; 219 unsigned long transMethod; 220 221 /* cap concurrent passthrus at some reasonable maximum */ 222 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10) 223 atomic_t passthru_cmds_avail; 224 225 /* 226 * Performant mode completion buffers 227 */ 228 size_t reply_queue_size; 229 struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES]; 230 u8 nreply_queues; 231 u32 *blockFetchTable; 232 u32 *ioaccel1_blockFetchTable; 233 u32 *ioaccel2_blockFetchTable; 234 u32 __iomem *ioaccel2_bft2_regs; 235 unsigned char *hba_inquiry_data; 236 u32 driver_support; 237 u32 fw_support; 238 int ioaccel_support; 239 int ioaccel_maxsg; 240 u64 last_intr_timestamp; 241 u32 last_heartbeat; 242 u64 last_heartbeat_timestamp; 243 u32 heartbeat_sample_interval; 244 atomic_t firmware_flash_in_progress; 245 u32 __percpu *lockup_detected; 246 struct delayed_work monitor_ctlr_work; 247 struct delayed_work rescan_ctlr_work; 248 int remove_in_progress; 249 /* Address of h->q[x] is passed to intr handler to know which queue */ 250 u8 q[MAX_REPLY_QUEUES]; 251 char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */ 252 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ 253 #define HPSATMF_BITS_SUPPORTED (1 << 0) 254 #define HPSATMF_PHYS_LUN_RESET (1 << 1) 255 #define HPSATMF_PHYS_NEX_RESET (1 << 2) 256 #define HPSATMF_PHYS_TASK_ABORT (1 << 3) 257 #define HPSATMF_PHYS_TSET_ABORT (1 << 4) 258 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5) 259 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6) 260 #define HPSATMF_PHYS_QRY_TASK (1 << 7) 261 #define HPSATMF_PHYS_QRY_TSET (1 << 8) 262 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9) 263 #define HPSATMF_IOACCEL_ENABLED (1 << 15) 264 #define HPSATMF_MASK_SUPPORTED (1 << 16) 265 #define HPSATMF_LOG_LUN_RESET (1 << 17) 266 #define HPSATMF_LOG_NEX_RESET (1 << 18) 267 #define HPSATMF_LOG_TASK_ABORT (1 << 19) 268 #define HPSATMF_LOG_TSET_ABORT (1 << 20) 269 #define HPSATMF_LOG_CLEAR_ACA (1 << 21) 270 #define HPSATMF_LOG_CLEAR_TSET (1 << 22) 271 #define HPSATMF_LOG_QRY_TASK (1 << 23) 272 #define HPSATMF_LOG_QRY_TSET (1 << 24) 273 #define HPSATMF_LOG_QRY_ASYNC (1 << 25) 274 u32 events; 275 #define CTLR_STATE_CHANGE_EVENT (1 << 0) 276 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1) 277 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4) 278 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5) 279 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6) 280 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30) 281 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) 282 283 #define RESCAN_REQUIRED_EVENT_BITS \ 284 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ 285 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ 286 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ 287 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ 288 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) 289 spinlock_t offline_device_lock; 290 struct list_head offline_device_list; 291 int acciopath_status; 292 int drv_req_rescan; 293 int raid_offload_debug; 294 int discovery_polling; 295 struct ReportLUNdata *lastlogicals; 296 int needs_abort_tags_swizzled; 297 struct workqueue_struct *resubmit_wq; 298 struct workqueue_struct *rescan_ctlr_wq; 299 atomic_t abort_cmds_available; 300 wait_queue_head_t abort_cmd_wait_queue; 301 wait_queue_head_t event_sync_wait_queue; 302 struct mutex reset_mutex; 303 u8 reset_in_progress; 304 struct hpsa_sas_node *sas_host; 305 }; 306 307 struct offline_device_entry { 308 unsigned char scsi3addr[8]; 309 struct list_head offline_list; 310 }; 311 312 #define HPSA_ABORT_MSG 0 313 #define HPSA_DEVICE_RESET_MSG 1 314 #define HPSA_RESET_TYPE_CONTROLLER 0x00 315 #define HPSA_RESET_TYPE_BUS 0x01 316 #define HPSA_RESET_TYPE_LUN 0x04 317 #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */ 318 #define HPSA_MSG_SEND_RETRY_LIMIT 10 319 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000) 320 321 /* Maximum time in seconds driver will wait for command completions 322 * when polling before giving up. 323 */ 324 #define HPSA_MAX_POLL_TIME_SECS (20) 325 326 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines 327 * how many times to retry TEST UNIT READY on a device 328 * while waiting for it to become ready before giving up. 329 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval 330 * between sending TURs while waiting for a device 331 * to become ready. 332 */ 333 #define HPSA_TUR_RETRY_LIMIT (20) 334 #define HPSA_MAX_WAIT_INTERVAL_SECS (30) 335 336 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board 337 * to become ready, in seconds, before giving up on it. 338 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait 339 * between polling the board to see if it is ready, in 340 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and 341 * HPSA_BOARD_READY_ITERATIONS are derived from those. 342 */ 343 #define HPSA_BOARD_READY_WAIT_SECS (120) 344 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100) 345 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) 346 #define HPSA_BOARD_READY_POLL_INTERVAL \ 347 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) 348 #define HPSA_BOARD_READY_ITERATIONS \ 349 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ 350 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 351 #define HPSA_BOARD_NOT_READY_ITERATIONS \ 352 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ 353 HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 354 #define HPSA_POST_RESET_PAUSE_MSECS (3000) 355 #define HPSA_POST_RESET_NOOP_RETRIES (12) 356 357 /* Defining the diffent access_menthods */ 358 /* 359 * Memory mapped FIFO interface (SMART 53xx cards) 360 */ 361 #define SA5_DOORBELL 0x20 362 #define SA5_REQUEST_PORT_OFFSET 0x40 363 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0 364 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4 365 #define SA5_REPLY_INTR_MASK_OFFSET 0x34 366 #define SA5_REPLY_PORT_OFFSET 0x44 367 #define SA5_INTR_STATUS 0x30 368 #define SA5_SCRATCHPAD_OFFSET 0xB0 369 370 #define SA5_CTCFG_OFFSET 0xB4 371 #define SA5_CTMEM_OFFSET 0xB8 372 373 #define SA5_INTR_OFF 0x08 374 #define SA5B_INTR_OFF 0x04 375 #define SA5_INTR_PENDING 0x08 376 #define SA5B_INTR_PENDING 0x04 377 #define FIFO_EMPTY 0xffffffff 378 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 379 380 #define HPSA_ERROR_BIT 0x02 381 382 /* Performant mode flags */ 383 #define SA5_PERF_INTR_PENDING 0x04 384 #define SA5_PERF_INTR_OFF 0x05 385 #define SA5_OUTDB_STATUS_PERF_BIT 0x01 386 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 387 #define SA5_OUTDB_CLEAR 0xA0 388 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 389 #define SA5_OUTDB_STATUS 0x9C 390 391 392 #define HPSA_INTR_ON 1 393 #define HPSA_INTR_OFF 0 394 395 /* 396 * Inbound Post Queue offsets for IO Accelerator Mode 2 397 */ 398 #define IOACCEL2_INBOUND_POSTQ_32 0x48 399 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0 400 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4 401 402 #define HPSA_PHYSICAL_DEVICE_BUS 0 403 #define HPSA_RAID_VOLUME_BUS 1 404 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2 405 #define HPSA_HBA_BUS 0 406 #define HPSA_LEGACY_HBA_BUS 3 407 408 /* 409 Send the command to the hardware 410 */ 411 static void SA5_submit_command(struct ctlr_info *h, 412 struct CommandList *c) 413 { 414 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 415 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 416 } 417 418 static void SA5_submit_command_no_read(struct ctlr_info *h, 419 struct CommandList *c) 420 { 421 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 422 } 423 424 static void SA5_submit_command_ioaccel2(struct ctlr_info *h, 425 struct CommandList *c) 426 { 427 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 428 } 429 430 /* 431 * This card is the opposite of the other cards. 432 * 0 turns interrupts on... 433 * 0x08 turns them off... 434 */ 435 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) 436 { 437 if (val) { /* Turn interrupts on */ 438 h->interrupts_enabled = 1; 439 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 440 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 441 } else { /* Turn them off */ 442 h->interrupts_enabled = 0; 443 writel(SA5_INTR_OFF, 444 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 445 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 446 } 447 } 448 449 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) 450 { 451 if (val) { /* turn on interrupts */ 452 h->interrupts_enabled = 1; 453 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 454 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 455 } else { 456 h->interrupts_enabled = 0; 457 writel(SA5_PERF_INTR_OFF, 458 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 459 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 460 } 461 } 462 463 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 464 { 465 struct reply_queue_buffer *rq = &h->reply_queue[q]; 466 unsigned long register_value = FIFO_EMPTY; 467 468 /* msi auto clears the interrupt pending bit. */ 469 if (unlikely(!(h->msi_vector || h->msix_vector))) { 470 /* flush the controller write of the reply queue by reading 471 * outbound doorbell status register. 472 */ 473 (void) readl(h->vaddr + SA5_OUTDB_STATUS); 474 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 475 /* Do a read in order to flush the write to the controller 476 * (as per spec.) 477 */ 478 (void) readl(h->vaddr + SA5_OUTDB_STATUS); 479 } 480 481 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) { 482 register_value = rq->head[rq->current_entry]; 483 rq->current_entry++; 484 atomic_dec(&h->commands_outstanding); 485 } else { 486 register_value = FIFO_EMPTY; 487 } 488 /* Check for wraparound */ 489 if (rq->current_entry == h->max_commands) { 490 rq->current_entry = 0; 491 rq->wraparound ^= 1; 492 } 493 return register_value; 494 } 495 496 /* 497 * returns value read from hardware. 498 * returns FIFO_EMPTY if there is nothing to read 499 */ 500 static unsigned long SA5_completed(struct ctlr_info *h, 501 __attribute__((unused)) u8 q) 502 { 503 unsigned long register_value 504 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); 505 506 if (register_value != FIFO_EMPTY) 507 atomic_dec(&h->commands_outstanding); 508 509 #ifdef HPSA_DEBUG 510 if (register_value != FIFO_EMPTY) 511 dev_dbg(&h->pdev->dev, "Read %lx back from board\n", 512 register_value); 513 else 514 dev_dbg(&h->pdev->dev, "FIFO Empty read\n"); 515 #endif 516 517 return register_value; 518 } 519 /* 520 * Returns true if an interrupt is pending.. 521 */ 522 static bool SA5_intr_pending(struct ctlr_info *h) 523 { 524 unsigned long register_value = 525 readl(h->vaddr + SA5_INTR_STATUS); 526 return register_value & SA5_INTR_PENDING; 527 } 528 529 static bool SA5_performant_intr_pending(struct ctlr_info *h) 530 { 531 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 532 533 if (!register_value) 534 return false; 535 536 /* Read outbound doorbell to flush */ 537 register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 538 return register_value & SA5_OUTDB_STATUS_PERF_BIT; 539 } 540 541 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100 542 543 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) 544 { 545 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 546 547 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? 548 true : false; 549 } 550 551 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 552 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 553 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC 554 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL 555 556 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) 557 { 558 u64 register_value; 559 struct reply_queue_buffer *rq = &h->reply_queue[q]; 560 561 BUG_ON(q >= h->nreply_queues); 562 563 register_value = rq->head[rq->current_entry]; 564 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { 565 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; 566 if (++rq->current_entry == rq->size) 567 rq->current_entry = 0; 568 /* 569 * @todo 570 * 571 * Don't really need to write the new index after each command, 572 * but with current driver design this is easiest. 573 */ 574 wmb(); 575 writel((q << 24) | rq->current_entry, h->vaddr + 576 IOACCEL_MODE1_CONSUMER_INDEX); 577 atomic_dec(&h->commands_outstanding); 578 } 579 return (unsigned long) register_value; 580 } 581 582 static struct access_method SA5_access = { 583 SA5_submit_command, 584 SA5_intr_mask, 585 SA5_intr_pending, 586 SA5_completed, 587 }; 588 589 static struct access_method SA5_ioaccel_mode1_access = { 590 SA5_submit_command, 591 SA5_performant_intr_mask, 592 SA5_ioaccel_mode1_intr_pending, 593 SA5_ioaccel_mode1_completed, 594 }; 595 596 static struct access_method SA5_ioaccel_mode2_access = { 597 SA5_submit_command_ioaccel2, 598 SA5_performant_intr_mask, 599 SA5_performant_intr_pending, 600 SA5_performant_completed, 601 }; 602 603 static struct access_method SA5_performant_access = { 604 SA5_submit_command, 605 SA5_performant_intr_mask, 606 SA5_performant_intr_pending, 607 SA5_performant_completed, 608 }; 609 610 static struct access_method SA5_performant_access_no_read = { 611 SA5_submit_command_no_read, 612 SA5_performant_intr_mask, 613 SA5_performant_intr_pending, 614 SA5_performant_completed, 615 }; 616 617 struct board_type { 618 u32 board_id; 619 char *product_name; 620 struct access_method *access; 621 }; 622 623 #endif /* HPSA_H */ 624 625