xref: /openbmc/linux/drivers/scsi/hpsa.h (revision 96de2506)
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2016 Microsemi Corporation
4  *    Copyright 2014-2015 PMC-Sierra, Inc.
5  *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6  *
7  *    This program is free software; you can redistribute it and/or modify
8  *    it under the terms of the GNU General Public License as published by
9  *    the Free Software Foundation; version 2 of the License.
10  *
11  *    This program is distributed in the hope that it will be useful,
12  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
15  *
16  *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
17  *
18  */
19 #ifndef HPSA_H
20 #define HPSA_H
21 
22 #include <scsi/scsicam.h>
23 
24 #define IO_OK		0
25 #define IO_ERROR	1
26 
27 struct ctlr_info;
28 
29 struct access_method {
30 	void (*submit_command)(struct ctlr_info *h,
31 		struct CommandList *c);
32 	void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
33 	bool (*intr_pending)(struct ctlr_info *h);
34 	unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
35 };
36 
37 /* for SAS hosts and SAS expanders */
38 struct hpsa_sas_node {
39 	struct device *parent_dev;
40 	struct list_head port_list_head;
41 };
42 
43 struct hpsa_sas_port {
44 	struct list_head port_list_entry;
45 	u64 sas_address;
46 	struct sas_port *port;
47 	int next_phy_index;
48 	struct list_head phy_list_head;
49 	struct hpsa_sas_node *parent_node;
50 	struct sas_rphy *rphy;
51 };
52 
53 struct hpsa_sas_phy {
54 	struct list_head phy_list_entry;
55 	struct sas_phy *phy;
56 	struct hpsa_sas_port *parent_port;
57 	bool added_to_port;
58 };
59 
60 #define EXTERNAL_QD 7
61 struct hpsa_scsi_dev_t {
62 	unsigned int devtype;
63 	int bus, target, lun;		/* as presented to the OS */
64 	unsigned char scsi3addr[8];	/* as presented to the HW */
65 	u8 physical_device : 1;
66 	u8 expose_device;
67 	u8 removed : 1;			/* device is marked for death */
68 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
69 	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
70 	u64 sas_address;
71 	u64 eli;			/* from report diags. */
72 	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
73 	unsigned char model[16];        /* bytes 16-31 of inquiry data */
74 	unsigned char rev;		/* byte 2 of inquiry data */
75 	unsigned char raid_level;	/* from inquiry page 0xC1 */
76 	unsigned char volume_offline;	/* discovered via TUR or VPD */
77 	u16 queue_depth;		/* max queue_depth for this device */
78 	atomic_t reset_cmds_out;	/* Count of commands to-be affected */
79 	atomic_t ioaccel_cmds_out;	/* Only used for physical devices
80 					 * counts commands sent to physical
81 					 * device via "ioaccel" path.
82 					 */
83 	u32 ioaccel_handle;
84 	u8 active_path_index;
85 	u8 path_map;
86 	u8 bay;
87 	u8 box[8];
88 	u16 phys_connector[8];
89 	int offload_config;		/* I/O accel RAID offload configured */
90 	int offload_enabled;		/* I/O accel RAID offload enabled */
91 	int offload_to_be_enabled;
92 	int hba_ioaccel_enabled;
93 	int offload_to_mirror;		/* Send next I/O accelerator RAID
94 					 * offload request to mirror drive
95 					 */
96 	struct raid_map_data raid_map;	/* I/O accelerator RAID map */
97 
98 	/*
99 	 * Pointers from logical drive map indices to the phys drives that
100 	 * make those logical drives.  Note, multiple logical drives may
101 	 * share physical drives.  You can have for instance 5 physical
102 	 * drives with 3 logical drives each using those same 5 physical
103 	 * disks. We need these pointers for counting i/o's out to physical
104 	 * devices in order to honor physical device queue depth limits.
105 	 */
106 	struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
107 	int nphysical_disks;
108 	int supports_aborts;
109 	struct hpsa_sas_port *sas_port;
110 	int external;   /* 1-from external array 0-not <0-unknown */
111 };
112 
113 struct reply_queue_buffer {
114 	u64 *head;
115 	size_t size;
116 	u8 wraparound;
117 	u32 current_entry;
118 	dma_addr_t busaddr;
119 };
120 
121 #pragma pack(1)
122 struct bmic_controller_parameters {
123 	u8   led_flags;
124 	u8   enable_command_list_verification;
125 	u8   backed_out_write_drives;
126 	u16  stripes_for_parity;
127 	u8   parity_distribution_mode_flags;
128 	u16  max_driver_requests;
129 	u16  elevator_trend_count;
130 	u8   disable_elevator;
131 	u8   force_scan_complete;
132 	u8   scsi_transfer_mode;
133 	u8   force_narrow;
134 	u8   rebuild_priority;
135 	u8   expand_priority;
136 	u8   host_sdb_asic_fix;
137 	u8   pdpi_burst_from_host_disabled;
138 	char software_name[64];
139 	char hardware_name[32];
140 	u8   bridge_revision;
141 	u8   snapshot_priority;
142 	u32  os_specific;
143 	u8   post_prompt_timeout;
144 	u8   automatic_drive_slamming;
145 	u8   reserved1;
146 	u8   nvram_flags;
147 	u8   cache_nvram_flags;
148 	u8   drive_config_flags;
149 	u16  reserved2;
150 	u8   temp_warning_level;
151 	u8   temp_shutdown_level;
152 	u8   temp_condition_reset;
153 	u8   max_coalesce_commands;
154 	u32  max_coalesce_delay;
155 	u8   orca_password[4];
156 	u8   access_id[16];
157 	u8   reserved[356];
158 };
159 #pragma pack()
160 
161 struct ctlr_info {
162 	unsigned int *reply_map;
163 	int	ctlr;
164 	char	devname[8];
165 	char    *product_name;
166 	struct pci_dev *pdev;
167 	u32	board_id;
168 	u64	sas_address;
169 	void __iomem *vaddr;
170 	unsigned long paddr;
171 	int 	nr_cmds; /* Number of commands allowed on this controller */
172 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
173 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
174 	struct CfgTable __iomem *cfgtable;
175 	int	interrupts_enabled;
176 	int 	max_commands;
177 	atomic_t commands_outstanding;
178 #	define PERF_MODE_INT	0
179 #	define DOORBELL_INT	1
180 #	define SIMPLE_MODE_INT	2
181 #	define MEMQ_MODE_INT	3
182 	unsigned int msix_vectors;
183 	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
184 	struct access_method access;
185 
186 	/* queue and queue Info */
187 	unsigned int Qdepth;
188 	unsigned int maxSG;
189 	spinlock_t lock;
190 	int maxsgentries;
191 	u8 max_cmd_sg_entries;
192 	int chainsize;
193 	struct SGDescriptor **cmd_sg_list;
194 	struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
195 
196 	/* pointers to command and error info pool */
197 	struct CommandList 	*cmd_pool;
198 	dma_addr_t		cmd_pool_dhandle;
199 	struct io_accel1_cmd	*ioaccel_cmd_pool;
200 	dma_addr_t		ioaccel_cmd_pool_dhandle;
201 	struct io_accel2_cmd	*ioaccel2_cmd_pool;
202 	dma_addr_t		ioaccel2_cmd_pool_dhandle;
203 	struct ErrorInfo 	*errinfo_pool;
204 	dma_addr_t		errinfo_pool_dhandle;
205 	unsigned long  		*cmd_pool_bits;
206 	int			scan_finished;
207 	u8			scan_waiting : 1;
208 	spinlock_t		scan_lock;
209 	wait_queue_head_t	scan_wait_queue;
210 
211 	struct Scsi_Host *scsi_host;
212 	spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
213 	int ndevices; /* number of used elements in .dev[] array. */
214 	struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
215 	/*
216 	 * Performant mode tables.
217 	 */
218 	u32 trans_support;
219 	u32 trans_offset;
220 	struct TransTable_struct __iomem *transtable;
221 	unsigned long transMethod;
222 
223 	/* cap concurrent passthrus at some reasonable maximum */
224 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
225 	atomic_t passthru_cmds_avail;
226 
227 	/*
228 	 * Performant mode completion buffers
229 	 */
230 	size_t reply_queue_size;
231 	struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
232 	u8 nreply_queues;
233 	u32 *blockFetchTable;
234 	u32 *ioaccel1_blockFetchTable;
235 	u32 *ioaccel2_blockFetchTable;
236 	u32 __iomem *ioaccel2_bft2_regs;
237 	unsigned char *hba_inquiry_data;
238 	u32 driver_support;
239 	u32 fw_support;
240 	int ioaccel_support;
241 	int ioaccel_maxsg;
242 	u64 last_intr_timestamp;
243 	u32 last_heartbeat;
244 	u64 last_heartbeat_timestamp;
245 	u32 heartbeat_sample_interval;
246 	atomic_t firmware_flash_in_progress;
247 	u32 __percpu *lockup_detected;
248 	struct delayed_work monitor_ctlr_work;
249 	struct delayed_work rescan_ctlr_work;
250 	struct delayed_work event_monitor_work;
251 	int remove_in_progress;
252 	/* Address of h->q[x] is passed to intr handler to know which queue */
253 	u8 q[MAX_REPLY_QUEUES];
254 	char intrname[MAX_REPLY_QUEUES][16];	/* "hpsa0-msix00" names */
255 	u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
256 #define HPSATMF_BITS_SUPPORTED  (1 << 0)
257 #define HPSATMF_PHYS_LUN_RESET  (1 << 1)
258 #define HPSATMF_PHYS_NEX_RESET  (1 << 2)
259 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
260 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
261 #define HPSATMF_PHYS_CLEAR_ACA  (1 << 5)
262 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
263 #define HPSATMF_PHYS_QRY_TASK   (1 << 7)
264 #define HPSATMF_PHYS_QRY_TSET   (1 << 8)
265 #define HPSATMF_PHYS_QRY_ASYNC  (1 << 9)
266 #define HPSATMF_IOACCEL_ENABLED (1 << 15)
267 #define HPSATMF_MASK_SUPPORTED  (1 << 16)
268 #define HPSATMF_LOG_LUN_RESET   (1 << 17)
269 #define HPSATMF_LOG_NEX_RESET   (1 << 18)
270 #define HPSATMF_LOG_TASK_ABORT  (1 << 19)
271 #define HPSATMF_LOG_TSET_ABORT  (1 << 20)
272 #define HPSATMF_LOG_CLEAR_ACA   (1 << 21)
273 #define HPSATMF_LOG_CLEAR_TSET  (1 << 22)
274 #define HPSATMF_LOG_QRY_TASK    (1 << 23)
275 #define HPSATMF_LOG_QRY_TSET    (1 << 24)
276 #define HPSATMF_LOG_QRY_ASYNC   (1 << 25)
277 	u32 events;
278 #define CTLR_STATE_CHANGE_EVENT				(1 << 0)
279 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT			(1 << 1)
280 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV		(1 << 4)
281 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV		(1 << 5)
282 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL		(1 << 6)
283 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED	(1 << 30)
284 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE	(1 << 31)
285 
286 #define RESCAN_REQUIRED_EVENT_BITS \
287 		(CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
288 		CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
289 		CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
290 		CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
291 		CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
292 	spinlock_t offline_device_lock;
293 	struct list_head offline_device_list;
294 	int	acciopath_status;
295 	int	drv_req_rescan;
296 	int	raid_offload_debug;
297 	int     discovery_polling;
298 	int     legacy_board;
299 	struct  ReportLUNdata *lastlogicals;
300 	int	needs_abort_tags_swizzled;
301 	struct workqueue_struct *resubmit_wq;
302 	struct workqueue_struct *rescan_ctlr_wq;
303 	atomic_t abort_cmds_available;
304 	wait_queue_head_t event_sync_wait_queue;
305 	struct mutex reset_mutex;
306 	u8 reset_in_progress;
307 	struct hpsa_sas_node *sas_host;
308 	spinlock_t reset_lock;
309 };
310 
311 struct offline_device_entry {
312 	unsigned char scsi3addr[8];
313 	struct list_head offline_list;
314 };
315 
316 #define HPSA_ABORT_MSG 0
317 #define HPSA_DEVICE_RESET_MSG 1
318 #define HPSA_RESET_TYPE_CONTROLLER 0x00
319 #define HPSA_RESET_TYPE_BUS 0x01
320 #define HPSA_RESET_TYPE_LUN 0x04
321 #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
322 #define HPSA_MSG_SEND_RETRY_LIMIT 10
323 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
324 
325 /* Maximum time in seconds driver will wait for command completions
326  * when polling before giving up.
327  */
328 #define HPSA_MAX_POLL_TIME_SECS (20)
329 
330 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
331  * how many times to retry TEST UNIT READY on a device
332  * while waiting for it to become ready before giving up.
333  * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
334  * between sending TURs while waiting for a device
335  * to become ready.
336  */
337 #define HPSA_TUR_RETRY_LIMIT (20)
338 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
339 
340 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
341  * to become ready, in seconds, before giving up on it.
342  * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
343  * between polling the board to see if it is ready, in
344  * milliseconds.  HPSA_BOARD_READY_POLL_INTERVAL and
345  * HPSA_BOARD_READY_ITERATIONS are derived from those.
346  */
347 #define HPSA_BOARD_READY_WAIT_SECS (120)
348 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
349 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
350 #define HPSA_BOARD_READY_POLL_INTERVAL \
351 	((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
352 #define HPSA_BOARD_READY_ITERATIONS \
353 	((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
354 		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
355 #define HPSA_BOARD_NOT_READY_ITERATIONS \
356 	((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
357 		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
358 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
359 #define HPSA_POST_RESET_NOOP_RETRIES (12)
360 
361 /*  Defining the diffent access_menthods */
362 /*
363  * Memory mapped FIFO interface (SMART 53xx cards)
364  */
365 #define SA5_DOORBELL	0x20
366 #define SA5_REQUEST_PORT_OFFSET	0x40
367 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
368 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
369 #define SA5_REPLY_INTR_MASK_OFFSET	0x34
370 #define SA5_REPLY_PORT_OFFSET		0x44
371 #define SA5_INTR_STATUS		0x30
372 #define SA5_SCRATCHPAD_OFFSET	0xB0
373 
374 #define SA5_CTCFG_OFFSET	0xB4
375 #define SA5_CTMEM_OFFSET	0xB8
376 
377 #define SA5_INTR_OFF		0x08
378 #define SA5B_INTR_OFF		0x04
379 #define SA5_INTR_PENDING	0x08
380 #define SA5B_INTR_PENDING	0x04
381 #define FIFO_EMPTY		0xffffffff
382 #define HPSA_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
383 
384 #define HPSA_ERROR_BIT		0x02
385 
386 /* Performant mode flags */
387 #define SA5_PERF_INTR_PENDING   0x04
388 #define SA5_PERF_INTR_OFF       0x05
389 #define SA5_OUTDB_STATUS_PERF_BIT       0x01
390 #define SA5_OUTDB_CLEAR_PERF_BIT        0x01
391 #define SA5_OUTDB_CLEAR         0xA0
392 #define SA5_OUTDB_CLEAR_PERF_BIT        0x01
393 #define SA5_OUTDB_STATUS        0x9C
394 
395 
396 #define HPSA_INTR_ON 	1
397 #define HPSA_INTR_OFF	0
398 
399 /*
400  * Inbound Post Queue offsets for IO Accelerator Mode 2
401  */
402 #define IOACCEL2_INBOUND_POSTQ_32	0x48
403 #define IOACCEL2_INBOUND_POSTQ_64_LOW	0xd0
404 #define IOACCEL2_INBOUND_POSTQ_64_HI	0xd4
405 
406 #define HPSA_PHYSICAL_DEVICE_BUS	0
407 #define HPSA_RAID_VOLUME_BUS		1
408 #define HPSA_EXTERNAL_RAID_VOLUME_BUS	2
409 #define HPSA_HBA_BUS			0
410 #define HPSA_LEGACY_HBA_BUS		3
411 
412 /*
413 	Send the command to the hardware
414 */
415 static void SA5_submit_command(struct ctlr_info *h,
416 	struct CommandList *c)
417 {
418 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
419 	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
420 }
421 
422 static void SA5_submit_command_no_read(struct ctlr_info *h,
423 	struct CommandList *c)
424 {
425 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
426 }
427 
428 static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
429 	struct CommandList *c)
430 {
431 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
432 }
433 
434 /*
435  *  This card is the opposite of the other cards.
436  *   0 turns interrupts on...
437  *   0x08 turns them off...
438  */
439 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
440 {
441 	if (val) { /* Turn interrupts on */
442 		h->interrupts_enabled = 1;
443 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
444 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
445 	} else { /* Turn them off */
446 		h->interrupts_enabled = 0;
447 		writel(SA5_INTR_OFF,
448 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
449 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
450 	}
451 }
452 
453 /*
454  *  Variant of the above; 0x04 turns interrupts off...
455  */
456 static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val)
457 {
458 	if (val) { /* Turn interrupts on */
459 		h->interrupts_enabled = 1;
460 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
461 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
462 	} else { /* Turn them off */
463 		h->interrupts_enabled = 0;
464 		writel(SA5B_INTR_OFF,
465 		       h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
466 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
467 	}
468 }
469 
470 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
471 {
472 	if (val) { /* turn on interrupts */
473 		h->interrupts_enabled = 1;
474 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
475 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
476 	} else {
477 		h->interrupts_enabled = 0;
478 		writel(SA5_PERF_INTR_OFF,
479 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
480 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
481 	}
482 }
483 
484 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
485 {
486 	struct reply_queue_buffer *rq = &h->reply_queue[q];
487 	unsigned long register_value = FIFO_EMPTY;
488 
489 	/* msi auto clears the interrupt pending bit. */
490 	if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
491 		/* flush the controller write of the reply queue by reading
492 		 * outbound doorbell status register.
493 		 */
494 		(void) readl(h->vaddr + SA5_OUTDB_STATUS);
495 		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
496 		/* Do a read in order to flush the write to the controller
497 		 * (as per spec.)
498 		 */
499 		(void) readl(h->vaddr + SA5_OUTDB_STATUS);
500 	}
501 
502 	if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
503 		register_value = rq->head[rq->current_entry];
504 		rq->current_entry++;
505 		atomic_dec(&h->commands_outstanding);
506 	} else {
507 		register_value = FIFO_EMPTY;
508 	}
509 	/* Check for wraparound */
510 	if (rq->current_entry == h->max_commands) {
511 		rq->current_entry = 0;
512 		rq->wraparound ^= 1;
513 	}
514 	return register_value;
515 }
516 
517 /*
518  *   returns value read from hardware.
519  *     returns FIFO_EMPTY if there is nothing to read
520  */
521 static unsigned long SA5_completed(struct ctlr_info *h,
522 	__attribute__((unused)) u8 q)
523 {
524 	unsigned long register_value
525 		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
526 
527 	if (register_value != FIFO_EMPTY)
528 		atomic_dec(&h->commands_outstanding);
529 
530 #ifdef HPSA_DEBUG
531 	if (register_value != FIFO_EMPTY)
532 		dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
533 			register_value);
534 	else
535 		dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
536 #endif
537 
538 	return register_value;
539 }
540 /*
541  *	Returns true if an interrupt is pending..
542  */
543 static bool SA5_intr_pending(struct ctlr_info *h)
544 {
545 	unsigned long register_value  =
546 		readl(h->vaddr + SA5_INTR_STATUS);
547 	return register_value & SA5_INTR_PENDING;
548 }
549 
550 static bool SA5_performant_intr_pending(struct ctlr_info *h)
551 {
552 	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
553 
554 	if (!register_value)
555 		return false;
556 
557 	/* Read outbound doorbell to flush */
558 	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
559 	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
560 }
561 
562 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT    0x100
563 
564 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
565 {
566 	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
567 
568 	return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
569 		true : false;
570 }
571 
572 /*
573  *      Returns true if an interrupt is pending..
574  */
575 static bool SA5B_intr_pending(struct ctlr_info *h)
576 {
577 	return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING;
578 }
579 
580 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX  0x1A0
581 #define IOACCEL_MODE1_PRODUCER_INDEX     0x1B8
582 #define IOACCEL_MODE1_CONSUMER_INDEX     0x1BC
583 #define IOACCEL_MODE1_REPLY_UNUSED       0xFFFFFFFFFFFFFFFFULL
584 
585 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
586 {
587 	u64 register_value;
588 	struct reply_queue_buffer *rq = &h->reply_queue[q];
589 
590 	BUG_ON(q >= h->nreply_queues);
591 
592 	register_value = rq->head[rq->current_entry];
593 	if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
594 		rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
595 		if (++rq->current_entry == rq->size)
596 			rq->current_entry = 0;
597 		/*
598 		 * @todo
599 		 *
600 		 * Don't really need to write the new index after each command,
601 		 * but with current driver design this is easiest.
602 		 */
603 		wmb();
604 		writel((q << 24) | rq->current_entry, h->vaddr +
605 				IOACCEL_MODE1_CONSUMER_INDEX);
606 		atomic_dec(&h->commands_outstanding);
607 	}
608 	return (unsigned long) register_value;
609 }
610 
611 static struct access_method SA5_access = {
612 	.submit_command =	SA5_submit_command,
613 	.set_intr_mask =	SA5_intr_mask,
614 	.intr_pending =		SA5_intr_pending,
615 	.command_completed =	SA5_completed,
616 };
617 
618 /* Duplicate entry of the above to mark unsupported boards */
619 static struct access_method SA5A_access = {
620 	.submit_command =	SA5_submit_command,
621 	.set_intr_mask =	SA5_intr_mask,
622 	.intr_pending =		SA5_intr_pending,
623 	.command_completed =	SA5_completed,
624 };
625 
626 static struct access_method SA5B_access = {
627 	.submit_command =	SA5_submit_command,
628 	.set_intr_mask =	SA5B_intr_mask,
629 	.intr_pending =		SA5B_intr_pending,
630 	.command_completed =	SA5_completed,
631 };
632 
633 static struct access_method SA5_ioaccel_mode1_access = {
634 	.submit_command =	SA5_submit_command,
635 	.set_intr_mask =	SA5_performant_intr_mask,
636 	.intr_pending =		SA5_ioaccel_mode1_intr_pending,
637 	.command_completed =	SA5_ioaccel_mode1_completed,
638 };
639 
640 static struct access_method SA5_ioaccel_mode2_access = {
641 	.submit_command =	SA5_submit_command_ioaccel2,
642 	.set_intr_mask =	SA5_performant_intr_mask,
643 	.intr_pending =		SA5_performant_intr_pending,
644 	.command_completed =	SA5_performant_completed,
645 };
646 
647 static struct access_method SA5_performant_access = {
648 	.submit_command =	SA5_submit_command,
649 	.set_intr_mask =	SA5_performant_intr_mask,
650 	.intr_pending =		SA5_performant_intr_pending,
651 	.command_completed =	SA5_performant_completed,
652 };
653 
654 static struct access_method SA5_performant_access_no_read = {
655 	.submit_command =	SA5_submit_command_no_read,
656 	.set_intr_mask =	SA5_performant_intr_mask,
657 	.intr_pending =		SA5_performant_intr_pending,
658 	.command_completed =	SA5_performant_completed,
659 };
660 
661 struct board_type {
662 	u32	board_id;
663 	char	*product_name;
664 	struct access_method *access;
665 };
666 
667 #endif /* HPSA_H */
668 
669