xref: /openbmc/linux/drivers/scsi/hpsa.h (revision 93df8a1e)
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4  *
5  *    This program is free software; you can redistribute it and/or modify
6  *    it under the terms of the GNU General Public License as published by
7  *    the Free Software Foundation; version 2 of the License.
8  *
9  *    This program is distributed in the hope that it will be useful,
10  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
13  *
14  *    You should have received a copy of the GNU General Public License
15  *    along with this program; if not, write to the Free Software
16  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  *
18  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
19  *
20  */
21 #ifndef HPSA_H
22 #define HPSA_H
23 
24 #include <scsi/scsicam.h>
25 
26 #define IO_OK		0
27 #define IO_ERROR	1
28 
29 struct ctlr_info;
30 
31 struct access_method {
32 	void (*submit_command)(struct ctlr_info *h,
33 		struct CommandList *c);
34 	void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 	bool (*intr_pending)(struct ctlr_info *h);
36 	unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
37 };
38 
39 struct hpsa_scsi_dev_t {
40 	int devtype;
41 	int bus, target, lun;		/* as presented to the OS */
42 	unsigned char scsi3addr[8];	/* as presented to the HW */
43 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
44 	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
45 	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
46 	unsigned char model[16];        /* bytes 16-31 of inquiry data */
47 	unsigned char raid_level;	/* from inquiry page 0xC1 */
48 	unsigned char volume_offline;	/* discovered via TUR or VPD */
49 	u16 queue_depth;		/* max queue_depth for this device */
50 	atomic_t reset_cmds_out;	/* Count of commands to-be affected */
51 	atomic_t ioaccel_cmds_out;	/* Only used for physical devices
52 					 * counts commands sent to physical
53 					 * device via "ioaccel" path.
54 					 */
55 	u32 ioaccel_handle;
56 	int offload_config;		/* I/O accel RAID offload configured */
57 	int offload_enabled;		/* I/O accel RAID offload enabled */
58 	int offload_to_be_enabled;
59 	int hba_ioaccel_enabled;
60 	int offload_to_mirror;		/* Send next I/O accelerator RAID
61 					 * offload request to mirror drive
62 					 */
63 	struct raid_map_data raid_map;	/* I/O accelerator RAID map */
64 
65 	/*
66 	 * Pointers from logical drive map indices to the phys drives that
67 	 * make those logical drives.  Note, multiple logical drives may
68 	 * share physical drives.  You can have for instance 5 physical
69 	 * drives with 3 logical drives each using those same 5 physical
70 	 * disks. We need these pointers for counting i/o's out to physical
71 	 * devices in order to honor physical device queue depth limits.
72 	 */
73 	struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
74 	int nphysical_disks;
75 	int supports_aborts;
76 #define HPSA_DO_NOT_EXPOSE	0x0
77 #define HPSA_SG_ATTACH		0x1
78 #define HPSA_ULD_ATTACH		0x2
79 #define HPSA_SCSI_ADD		(HPSA_SG_ATTACH | HPSA_ULD_ATTACH)
80 	u8 expose_state;
81 };
82 
83 struct reply_queue_buffer {
84 	u64 *head;
85 	size_t size;
86 	u8 wraparound;
87 	u32 current_entry;
88 	dma_addr_t busaddr;
89 };
90 
91 #pragma pack(1)
92 struct bmic_controller_parameters {
93 	u8   led_flags;
94 	u8   enable_command_list_verification;
95 	u8   backed_out_write_drives;
96 	u16  stripes_for_parity;
97 	u8   parity_distribution_mode_flags;
98 	u16  max_driver_requests;
99 	u16  elevator_trend_count;
100 	u8   disable_elevator;
101 	u8   force_scan_complete;
102 	u8   scsi_transfer_mode;
103 	u8   force_narrow;
104 	u8   rebuild_priority;
105 	u8   expand_priority;
106 	u8   host_sdb_asic_fix;
107 	u8   pdpi_burst_from_host_disabled;
108 	char software_name[64];
109 	char hardware_name[32];
110 	u8   bridge_revision;
111 	u8   snapshot_priority;
112 	u32  os_specific;
113 	u8   post_prompt_timeout;
114 	u8   automatic_drive_slamming;
115 	u8   reserved1;
116 	u8   nvram_flags;
117 #define HBA_MODE_ENABLED_FLAG (1 << 3)
118 	u8   cache_nvram_flags;
119 	u8   drive_config_flags;
120 	u16  reserved2;
121 	u8   temp_warning_level;
122 	u8   temp_shutdown_level;
123 	u8   temp_condition_reset;
124 	u8   max_coalesce_commands;
125 	u32  max_coalesce_delay;
126 	u8   orca_password[4];
127 	u8   access_id[16];
128 	u8   reserved[356];
129 };
130 #pragma pack()
131 
132 struct ctlr_info {
133 	int	ctlr;
134 	char	devname[8];
135 	char    *product_name;
136 	struct pci_dev *pdev;
137 	u32	board_id;
138 	void __iomem *vaddr;
139 	unsigned long paddr;
140 	int 	nr_cmds; /* Number of commands allowed on this controller */
141 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
142 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
143 	struct CfgTable __iomem *cfgtable;
144 	int	interrupts_enabled;
145 	int 	max_commands;
146 	atomic_t commands_outstanding;
147 #	define PERF_MODE_INT	0
148 #	define DOORBELL_INT	1
149 #	define SIMPLE_MODE_INT	2
150 #	define MEMQ_MODE_INT	3
151 	unsigned int intr[MAX_REPLY_QUEUES];
152 	unsigned int msix_vector;
153 	unsigned int msi_vector;
154 	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
155 	struct access_method access;
156 	char hba_mode_enabled;
157 
158 	/* queue and queue Info */
159 	unsigned int Qdepth;
160 	unsigned int maxSG;
161 	spinlock_t lock;
162 	int maxsgentries;
163 	u8 max_cmd_sg_entries;
164 	int chainsize;
165 	struct SGDescriptor **cmd_sg_list;
166 	struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
167 
168 	/* pointers to command and error info pool */
169 	struct CommandList 	*cmd_pool;
170 	dma_addr_t		cmd_pool_dhandle;
171 	struct io_accel1_cmd	*ioaccel_cmd_pool;
172 	dma_addr_t		ioaccel_cmd_pool_dhandle;
173 	struct io_accel2_cmd	*ioaccel2_cmd_pool;
174 	dma_addr_t		ioaccel2_cmd_pool_dhandle;
175 	struct ErrorInfo 	*errinfo_pool;
176 	dma_addr_t		errinfo_pool_dhandle;
177 	unsigned long  		*cmd_pool_bits;
178 	int			scan_finished;
179 	spinlock_t		scan_lock;
180 	wait_queue_head_t	scan_wait_queue;
181 
182 	struct Scsi_Host *scsi_host;
183 	spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
184 	int ndevices; /* number of used elements in .dev[] array. */
185 	struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
186 	/*
187 	 * Performant mode tables.
188 	 */
189 	u32 trans_support;
190 	u32 trans_offset;
191 	struct TransTable_struct __iomem *transtable;
192 	unsigned long transMethod;
193 
194 	/* cap concurrent passthrus at some reasonable maximum */
195 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
196 	atomic_t passthru_cmds_avail;
197 
198 	/*
199 	 * Performant mode completion buffers
200 	 */
201 	size_t reply_queue_size;
202 	struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
203 	u8 nreply_queues;
204 	u32 *blockFetchTable;
205 	u32 *ioaccel1_blockFetchTable;
206 	u32 *ioaccel2_blockFetchTable;
207 	u32 __iomem *ioaccel2_bft2_regs;
208 	unsigned char *hba_inquiry_data;
209 	u32 driver_support;
210 	u32 fw_support;
211 	int ioaccel_support;
212 	int ioaccel_maxsg;
213 	u64 last_intr_timestamp;
214 	u32 last_heartbeat;
215 	u64 last_heartbeat_timestamp;
216 	u32 heartbeat_sample_interval;
217 	atomic_t firmware_flash_in_progress;
218 	u32 __percpu *lockup_detected;
219 	struct delayed_work monitor_ctlr_work;
220 	struct delayed_work rescan_ctlr_work;
221 	int remove_in_progress;
222 	/* Address of h->q[x] is passed to intr handler to know which queue */
223 	u8 q[MAX_REPLY_QUEUES];
224 	char intrname[MAX_REPLY_QUEUES][16];	/* "hpsa0-msix00" names */
225 	u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
226 #define HPSATMF_BITS_SUPPORTED  (1 << 0)
227 #define HPSATMF_PHYS_LUN_RESET  (1 << 1)
228 #define HPSATMF_PHYS_NEX_RESET  (1 << 2)
229 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
230 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
231 #define HPSATMF_PHYS_CLEAR_ACA  (1 << 5)
232 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
233 #define HPSATMF_PHYS_QRY_TASK   (1 << 7)
234 #define HPSATMF_PHYS_QRY_TSET   (1 << 8)
235 #define HPSATMF_PHYS_QRY_ASYNC  (1 << 9)
236 #define HPSATMF_IOACCEL_ENABLED (1 << 15)
237 #define HPSATMF_MASK_SUPPORTED  (1 << 16)
238 #define HPSATMF_LOG_LUN_RESET   (1 << 17)
239 #define HPSATMF_LOG_NEX_RESET   (1 << 18)
240 #define HPSATMF_LOG_TASK_ABORT  (1 << 19)
241 #define HPSATMF_LOG_TSET_ABORT  (1 << 20)
242 #define HPSATMF_LOG_CLEAR_ACA   (1 << 21)
243 #define HPSATMF_LOG_CLEAR_TSET  (1 << 22)
244 #define HPSATMF_LOG_QRY_TASK    (1 << 23)
245 #define HPSATMF_LOG_QRY_TSET    (1 << 24)
246 #define HPSATMF_LOG_QRY_ASYNC   (1 << 25)
247 	u32 events;
248 #define CTLR_STATE_CHANGE_EVENT				(1 << 0)
249 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT			(1 << 1)
250 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV		(1 << 4)
251 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV		(1 << 5)
252 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL		(1 << 6)
253 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED	(1 << 30)
254 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE	(1 << 31)
255 
256 #define RESCAN_REQUIRED_EVENT_BITS \
257 		(CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
258 		CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
259 		CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
260 		CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
261 		CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
262 	spinlock_t offline_device_lock;
263 	struct list_head offline_device_list;
264 	int	acciopath_status;
265 	int	raid_offload_debug;
266 	int	needs_abort_tags_swizzled;
267 	struct workqueue_struct *resubmit_wq;
268 	struct workqueue_struct *rescan_ctlr_wq;
269 	atomic_t abort_cmds_available;
270 	wait_queue_head_t abort_cmd_wait_queue;
271 	wait_queue_head_t event_sync_wait_queue;
272 	struct mutex reset_mutex;
273 };
274 
275 struct offline_device_entry {
276 	unsigned char scsi3addr[8];
277 	struct list_head offline_list;
278 };
279 
280 #define HPSA_ABORT_MSG 0
281 #define HPSA_DEVICE_RESET_MSG 1
282 #define HPSA_RESET_TYPE_CONTROLLER 0x00
283 #define HPSA_RESET_TYPE_BUS 0x01
284 #define HPSA_RESET_TYPE_TARGET 0x03
285 #define HPSA_RESET_TYPE_LUN 0x04
286 #define HPSA_MSG_SEND_RETRY_LIMIT 10
287 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
288 
289 /* Maximum time in seconds driver will wait for command completions
290  * when polling before giving up.
291  */
292 #define HPSA_MAX_POLL_TIME_SECS (20)
293 
294 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
295  * how many times to retry TEST UNIT READY on a device
296  * while waiting for it to become ready before giving up.
297  * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
298  * between sending TURs while waiting for a device
299  * to become ready.
300  */
301 #define HPSA_TUR_RETRY_LIMIT (20)
302 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
303 
304 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
305  * to become ready, in seconds, before giving up on it.
306  * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
307  * between polling the board to see if it is ready, in
308  * milliseconds.  HPSA_BOARD_READY_POLL_INTERVAL and
309  * HPSA_BOARD_READY_ITERATIONS are derived from those.
310  */
311 #define HPSA_BOARD_READY_WAIT_SECS (120)
312 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
313 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
314 #define HPSA_BOARD_READY_POLL_INTERVAL \
315 	((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
316 #define HPSA_BOARD_READY_ITERATIONS \
317 	((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
318 		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
319 #define HPSA_BOARD_NOT_READY_ITERATIONS \
320 	((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
321 		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
322 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
323 #define HPSA_POST_RESET_NOOP_RETRIES (12)
324 
325 /*  Defining the diffent access_menthods */
326 /*
327  * Memory mapped FIFO interface (SMART 53xx cards)
328  */
329 #define SA5_DOORBELL	0x20
330 #define SA5_REQUEST_PORT_OFFSET	0x40
331 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
332 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
333 #define SA5_REPLY_INTR_MASK_OFFSET	0x34
334 #define SA5_REPLY_PORT_OFFSET		0x44
335 #define SA5_INTR_STATUS		0x30
336 #define SA5_SCRATCHPAD_OFFSET	0xB0
337 
338 #define SA5_CTCFG_OFFSET	0xB4
339 #define SA5_CTMEM_OFFSET	0xB8
340 
341 #define SA5_INTR_OFF		0x08
342 #define SA5B_INTR_OFF		0x04
343 #define SA5_INTR_PENDING	0x08
344 #define SA5B_INTR_PENDING	0x04
345 #define FIFO_EMPTY		0xffffffff
346 #define HPSA_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
347 
348 #define HPSA_ERROR_BIT		0x02
349 
350 /* Performant mode flags */
351 #define SA5_PERF_INTR_PENDING   0x04
352 #define SA5_PERF_INTR_OFF       0x05
353 #define SA5_OUTDB_STATUS_PERF_BIT       0x01
354 #define SA5_OUTDB_CLEAR_PERF_BIT        0x01
355 #define SA5_OUTDB_CLEAR         0xA0
356 #define SA5_OUTDB_CLEAR_PERF_BIT        0x01
357 #define SA5_OUTDB_STATUS        0x9C
358 
359 
360 #define HPSA_INTR_ON 	1
361 #define HPSA_INTR_OFF	0
362 
363 /*
364  * Inbound Post Queue offsets for IO Accelerator Mode 2
365  */
366 #define IOACCEL2_INBOUND_POSTQ_32	0x48
367 #define IOACCEL2_INBOUND_POSTQ_64_LOW	0xd0
368 #define IOACCEL2_INBOUND_POSTQ_64_HI	0xd4
369 
370 /*
371 	Send the command to the hardware
372 */
373 static void SA5_submit_command(struct ctlr_info *h,
374 	struct CommandList *c)
375 {
376 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
377 	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
378 }
379 
380 static void SA5_submit_command_no_read(struct ctlr_info *h,
381 	struct CommandList *c)
382 {
383 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
384 }
385 
386 static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
387 	struct CommandList *c)
388 {
389 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
390 }
391 
392 /*
393  *  This card is the opposite of the other cards.
394  *   0 turns interrupts on...
395  *   0x08 turns them off...
396  */
397 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
398 {
399 	if (val) { /* Turn interrupts on */
400 		h->interrupts_enabled = 1;
401 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
402 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
403 	} else { /* Turn them off */
404 		h->interrupts_enabled = 0;
405 		writel(SA5_INTR_OFF,
406 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
407 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
408 	}
409 }
410 
411 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
412 {
413 	if (val) { /* turn on interrupts */
414 		h->interrupts_enabled = 1;
415 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
416 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
417 	} else {
418 		h->interrupts_enabled = 0;
419 		writel(SA5_PERF_INTR_OFF,
420 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
421 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
422 	}
423 }
424 
425 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
426 {
427 	struct reply_queue_buffer *rq = &h->reply_queue[q];
428 	unsigned long register_value = FIFO_EMPTY;
429 
430 	/* msi auto clears the interrupt pending bit. */
431 	if (unlikely(!(h->msi_vector || h->msix_vector))) {
432 		/* flush the controller write of the reply queue by reading
433 		 * outbound doorbell status register.
434 		 */
435 		(void) readl(h->vaddr + SA5_OUTDB_STATUS);
436 		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
437 		/* Do a read in order to flush the write to the controller
438 		 * (as per spec.)
439 		 */
440 		(void) readl(h->vaddr + SA5_OUTDB_STATUS);
441 	}
442 
443 	if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
444 		register_value = rq->head[rq->current_entry];
445 		rq->current_entry++;
446 		atomic_dec(&h->commands_outstanding);
447 	} else {
448 		register_value = FIFO_EMPTY;
449 	}
450 	/* Check for wraparound */
451 	if (rq->current_entry == h->max_commands) {
452 		rq->current_entry = 0;
453 		rq->wraparound ^= 1;
454 	}
455 	return register_value;
456 }
457 
458 /*
459  *   returns value read from hardware.
460  *     returns FIFO_EMPTY if there is nothing to read
461  */
462 static unsigned long SA5_completed(struct ctlr_info *h,
463 	__attribute__((unused)) u8 q)
464 {
465 	unsigned long register_value
466 		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
467 
468 	if (register_value != FIFO_EMPTY)
469 		atomic_dec(&h->commands_outstanding);
470 
471 #ifdef HPSA_DEBUG
472 	if (register_value != FIFO_EMPTY)
473 		dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
474 			register_value);
475 	else
476 		dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
477 #endif
478 
479 	return register_value;
480 }
481 /*
482  *	Returns true if an interrupt is pending..
483  */
484 static bool SA5_intr_pending(struct ctlr_info *h)
485 {
486 	unsigned long register_value  =
487 		readl(h->vaddr + SA5_INTR_STATUS);
488 	return register_value & SA5_INTR_PENDING;
489 }
490 
491 static bool SA5_performant_intr_pending(struct ctlr_info *h)
492 {
493 	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
494 
495 	if (!register_value)
496 		return false;
497 
498 	/* Read outbound doorbell to flush */
499 	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
500 	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
501 }
502 
503 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT    0x100
504 
505 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
506 {
507 	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
508 
509 	return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
510 		true : false;
511 }
512 
513 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX  0x1A0
514 #define IOACCEL_MODE1_PRODUCER_INDEX     0x1B8
515 #define IOACCEL_MODE1_CONSUMER_INDEX     0x1BC
516 #define IOACCEL_MODE1_REPLY_UNUSED       0xFFFFFFFFFFFFFFFFULL
517 
518 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
519 {
520 	u64 register_value;
521 	struct reply_queue_buffer *rq = &h->reply_queue[q];
522 
523 	BUG_ON(q >= h->nreply_queues);
524 
525 	register_value = rq->head[rq->current_entry];
526 	if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
527 		rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
528 		if (++rq->current_entry == rq->size)
529 			rq->current_entry = 0;
530 		/*
531 		 * @todo
532 		 *
533 		 * Don't really need to write the new index after each command,
534 		 * but with current driver design this is easiest.
535 		 */
536 		wmb();
537 		writel((q << 24) | rq->current_entry, h->vaddr +
538 				IOACCEL_MODE1_CONSUMER_INDEX);
539 		atomic_dec(&h->commands_outstanding);
540 	}
541 	return (unsigned long) register_value;
542 }
543 
544 static struct access_method SA5_access = {
545 	SA5_submit_command,
546 	SA5_intr_mask,
547 	SA5_intr_pending,
548 	SA5_completed,
549 };
550 
551 static struct access_method SA5_ioaccel_mode1_access = {
552 	SA5_submit_command,
553 	SA5_performant_intr_mask,
554 	SA5_ioaccel_mode1_intr_pending,
555 	SA5_ioaccel_mode1_completed,
556 };
557 
558 static struct access_method SA5_ioaccel_mode2_access = {
559 	SA5_submit_command_ioaccel2,
560 	SA5_performant_intr_mask,
561 	SA5_performant_intr_pending,
562 	SA5_performant_completed,
563 };
564 
565 static struct access_method SA5_performant_access = {
566 	SA5_submit_command,
567 	SA5_performant_intr_mask,
568 	SA5_performant_intr_pending,
569 	SA5_performant_completed,
570 };
571 
572 static struct access_method SA5_performant_access_no_read = {
573 	SA5_submit_command_no_read,
574 	SA5_performant_intr_mask,
575 	SA5_performant_intr_pending,
576 	SA5_performant_completed,
577 };
578 
579 struct board_type {
580 	u32	board_id;
581 	char	*product_name;
582 	struct access_method *access;
583 };
584 
585 #endif /* HPSA_H */
586 
587