1edd16368SStephen M. Cameron /*
2edd16368SStephen M. Cameron * Disk Array driver for HP Smart Array SAS controllers
39e21760eSDon Brace * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
494c7bc31SDon Brace * Copyright 2016 Microsemi Corporation
51358f6dcSDon Brace * Copyright 2014-2015 PMC-Sierra, Inc.
61358f6dcSDon Brace * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
7edd16368SStephen M. Cameron *
8edd16368SStephen M. Cameron * This program is free software; you can redistribute it and/or modify
9edd16368SStephen M. Cameron * it under the terms of the GNU General Public License as published by
10edd16368SStephen M. Cameron * the Free Software Foundation; version 2 of the License.
11edd16368SStephen M. Cameron *
12edd16368SStephen M. Cameron * This program is distributed in the hope that it will be useful,
13edd16368SStephen M. Cameron * but WITHOUT ANY WARRANTY; without even the implied warranty of
14edd16368SStephen M. Cameron * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
15edd16368SStephen M. Cameron * NON INFRINGEMENT. See the GNU General Public License for more details.
16edd16368SStephen M. Cameron *
1794c7bc31SDon Brace * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
18edd16368SStephen M. Cameron *
19edd16368SStephen M. Cameron */
20edd16368SStephen M. Cameron #ifndef HPSA_H
21edd16368SStephen M. Cameron #define HPSA_H
22edd16368SStephen M. Cameron
23edd16368SStephen M. Cameron #include <scsi/scsicam.h>
24edd16368SStephen M. Cameron
25edd16368SStephen M. Cameron #define IO_OK 0
26edd16368SStephen M. Cameron #define IO_ERROR 1
27edd16368SStephen M. Cameron
28edd16368SStephen M. Cameron struct ctlr_info;
29edd16368SStephen M. Cameron
30edd16368SStephen M. Cameron struct access_method {
31edd16368SStephen M. Cameron void (*submit_command)(struct ctlr_info *h,
32edd16368SStephen M. Cameron struct CommandList *c);
33edd16368SStephen M. Cameron void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
34900c5440SStephen M. Cameron bool (*intr_pending)(struct ctlr_info *h);
35254f796bSMatt Gates unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
36edd16368SStephen M. Cameron };
37edd16368SStephen M. Cameron
38d04e62b9SKevin Barnett /* for SAS hosts and SAS expanders */
39d04e62b9SKevin Barnett struct hpsa_sas_node {
40d04e62b9SKevin Barnett struct device *parent_dev;
41d04e62b9SKevin Barnett struct list_head port_list_head;
42d04e62b9SKevin Barnett };
43d04e62b9SKevin Barnett
44d04e62b9SKevin Barnett struct hpsa_sas_port {
45d04e62b9SKevin Barnett struct list_head port_list_entry;
46d04e62b9SKevin Barnett u64 sas_address;
47d04e62b9SKevin Barnett struct sas_port *port;
48d04e62b9SKevin Barnett int next_phy_index;
49d04e62b9SKevin Barnett struct list_head phy_list_head;
50d04e62b9SKevin Barnett struct hpsa_sas_node *parent_node;
51d04e62b9SKevin Barnett struct sas_rphy *rphy;
52d04e62b9SKevin Barnett };
53d04e62b9SKevin Barnett
54d04e62b9SKevin Barnett struct hpsa_sas_phy {
55d04e62b9SKevin Barnett struct list_head phy_list_entry;
56d04e62b9SKevin Barnett struct sas_phy *phy;
57d04e62b9SKevin Barnett struct hpsa_sas_port *parent_port;
58d04e62b9SKevin Barnett bool added_to_port;
59d04e62b9SKevin Barnett };
60d04e62b9SKevin Barnett
619bb872a7SDon Brace #define EXTERNAL_QD 128
62edd16368SStephen M. Cameron struct hpsa_scsi_dev_t {
633ad7de6bSDon Brace unsigned int devtype;
64edd16368SStephen M. Cameron int bus, target, lun; /* as presented to the OS */
65edd16368SStephen M. Cameron unsigned char scsi3addr[8]; /* as presented to the HW */
6604fa2f44SKevin Barnett u8 physical_device : 1;
672a168208SKevin Barnett u8 expose_device;
68ba74fdc4SDon Brace u8 removed : 1; /* device is marked for death */
699e33f0d5SDon Brace u8 was_removed : 1; /* device actually removed */
70edd16368SStephen M. Cameron #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
71edd16368SStephen M. Cameron unsigned char device_id[16]; /* from inquiry pg. 0x83 */
72d04e62b9SKevin Barnett u64 sas_address;
7301d0e789SDon Brace u64 eli; /* from report diags. */
74edd16368SStephen M. Cameron unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
75edd16368SStephen M. Cameron unsigned char model[16]; /* bytes 16-31 of inquiry data */
767630b3a5SHannes Reinecke unsigned char rev; /* byte 2 of inquiry data */
77edd16368SStephen M. Cameron unsigned char raid_level; /* from inquiry page 0xC1 */
789846590eSStephen M. Cameron unsigned char volume_offline; /* discovered via TUR or VPD */
7903383736SDon Brace u16 queue_depth; /* max queue_depth for this device */
80c5dfd106SDon Brace atomic_t commands_outstanding; /* track commands sent to device */
8103383736SDon Brace atomic_t ioaccel_cmds_out; /* Only used for physical devices
8203383736SDon Brace * counts commands sent to physical
8303383736SDon Brace * device via "ioaccel" path.
8403383736SDon Brace */
85c5dfd106SDon Brace bool in_reset;
86e1f7de0cSMatt Gates u32 ioaccel_handle;
878270b862SJoe Handzik u8 active_path_index;
888270b862SJoe Handzik u8 path_map;
898270b862SJoe Handzik u8 bay;
908270b862SJoe Handzik u8 box[8];
918270b862SJoe Handzik u16 phys_connector[8];
92283b4a9bSStephen M. Cameron int offload_config; /* I/O accel RAID offload configured */
93283b4a9bSStephen M. Cameron int offload_enabled; /* I/O accel RAID offload enabled */
9441ce4c35SStephen Cameron int offload_to_be_enabled;
95a3144e0bSJoe Handzik int hba_ioaccel_enabled;
96283b4a9bSStephen M. Cameron int offload_to_mirror; /* Send next I/O accelerator RAID
97283b4a9bSStephen M. Cameron * offload request to mirror drive
98283b4a9bSStephen M. Cameron */
99283b4a9bSStephen M. Cameron struct raid_map_data raid_map; /* I/O accelerator RAID map */
100283b4a9bSStephen M. Cameron
10103383736SDon Brace /*
10203383736SDon Brace * Pointers from logical drive map indices to the phys drives that
10303383736SDon Brace * make those logical drives. Note, multiple logical drives may
10403383736SDon Brace * share physical drives. You can have for instance 5 physical
10503383736SDon Brace * drives with 3 logical drives each using those same 5 physical
10603383736SDon Brace * disks. We need these pointers for counting i/o's out to physical
10703383736SDon Brace * devices in order to honor physical device queue depth limits.
10803383736SDon Brace */
10903383736SDon Brace struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
110d604f533SWebb Scales int nphysical_disks;
1119b5c48c2SStephen Cameron int supports_aborts;
112d04e62b9SKevin Barnett struct hpsa_sas_port *sas_port;
11366749d0dSScott Teel int external; /* 1-from external array 0-not <0-unknown */
114edd16368SStephen M. Cameron };
115edd16368SStephen M. Cameron
116072b0518SStephen M. Cameron struct reply_queue_buffer {
117254f796bSMatt Gates u64 *head;
118254f796bSMatt Gates size_t size;
119254f796bSMatt Gates u8 wraparound;
120254f796bSMatt Gates u32 current_entry;
121072b0518SStephen M. Cameron dma_addr_t busaddr;
122254f796bSMatt Gates };
123254f796bSMatt Gates
124316b221aSStephen M. Cameron #pragma pack(1)
125316b221aSStephen M. Cameron struct bmic_controller_parameters {
126316b221aSStephen M. Cameron u8 led_flags;
127316b221aSStephen M. Cameron u8 enable_command_list_verification;
128316b221aSStephen M. Cameron u8 backed_out_write_drives;
129316b221aSStephen M. Cameron u16 stripes_for_parity;
130316b221aSStephen M. Cameron u8 parity_distribution_mode_flags;
131316b221aSStephen M. Cameron u16 max_driver_requests;
132316b221aSStephen M. Cameron u16 elevator_trend_count;
133316b221aSStephen M. Cameron u8 disable_elevator;
134316b221aSStephen M. Cameron u8 force_scan_complete;
135316b221aSStephen M. Cameron u8 scsi_transfer_mode;
136316b221aSStephen M. Cameron u8 force_narrow;
137316b221aSStephen M. Cameron u8 rebuild_priority;
138316b221aSStephen M. Cameron u8 expand_priority;
139316b221aSStephen M. Cameron u8 host_sdb_asic_fix;
140316b221aSStephen M. Cameron u8 pdpi_burst_from_host_disabled;
141316b221aSStephen M. Cameron char software_name[64];
142316b221aSStephen M. Cameron char hardware_name[32];
143316b221aSStephen M. Cameron u8 bridge_revision;
144316b221aSStephen M. Cameron u8 snapshot_priority;
145316b221aSStephen M. Cameron u32 os_specific;
146316b221aSStephen M. Cameron u8 post_prompt_timeout;
147316b221aSStephen M. Cameron u8 automatic_drive_slamming;
148316b221aSStephen M. Cameron u8 reserved1;
149316b221aSStephen M. Cameron u8 nvram_flags;
150316b221aSStephen M. Cameron u8 cache_nvram_flags;
151316b221aSStephen M. Cameron u8 drive_config_flags;
152316b221aSStephen M. Cameron u16 reserved2;
153316b221aSStephen M. Cameron u8 temp_warning_level;
154316b221aSStephen M. Cameron u8 temp_shutdown_level;
155316b221aSStephen M. Cameron u8 temp_condition_reset;
156316b221aSStephen M. Cameron u8 max_coalesce_commands;
157316b221aSStephen M. Cameron u32 max_coalesce_delay;
158316b221aSStephen M. Cameron u8 orca_password[4];
159316b221aSStephen M. Cameron u8 access_id[16];
160316b221aSStephen M. Cameron u8 reserved[356];
161316b221aSStephen M. Cameron };
162316b221aSStephen M. Cameron #pragma pack()
163316b221aSStephen M. Cameron
164edd16368SStephen M. Cameron struct ctlr_info {
1658b834bffSMing Lei unsigned int *reply_map;
166edd16368SStephen M. Cameron int ctlr;
167edd16368SStephen M. Cameron char devname[8];
168edd16368SStephen M. Cameron char *product_name;
169edd16368SStephen M. Cameron struct pci_dev *pdev;
17001a02ffcSStephen M. Cameron u32 board_id;
171d04e62b9SKevin Barnett u64 sas_address;
172edd16368SStephen M. Cameron void __iomem *vaddr;
173edd16368SStephen M. Cameron unsigned long paddr;
174edd16368SStephen M. Cameron int nr_cmds; /* Number of commands allowed on this controller */
175d54c5c24SStephen Cameron #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
176d54c5c24SStephen Cameron #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
177edd16368SStephen M. Cameron struct CfgTable __iomem *cfgtable;
178edd16368SStephen M. Cameron int interrupts_enabled;
179edd16368SStephen M. Cameron int max_commands;
1804770e68dSDon Brace int last_collision_tag; /* tags are global */
1810cbf768eSStephen M. Cameron atomic_t commands_outstanding;
182303932fdSDon Brace # define PERF_MODE_INT 0
183303932fdSDon Brace # define DOORBELL_INT 1
184edd16368SStephen M. Cameron # define SIMPLE_MODE_INT 2
185edd16368SStephen M. Cameron # define MEMQ_MODE_INT 3
186bc2bb154SChristoph Hellwig unsigned int msix_vectors;
187a9a3a273SStephen M. Cameron int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
188edd16368SStephen M. Cameron struct access_method access;
189edd16368SStephen M. Cameron
190edd16368SStephen M. Cameron /* queue and queue Info */
191edd16368SStephen M. Cameron unsigned int Qdepth;
192edd16368SStephen M. Cameron unsigned int maxSG;
193edd16368SStephen M. Cameron spinlock_t lock;
19433a2ffceSStephen M. Cameron int maxsgentries;
19533a2ffceSStephen M. Cameron u8 max_cmd_sg_entries;
19633a2ffceSStephen M. Cameron int chainsize;
19733a2ffceSStephen M. Cameron struct SGDescriptor **cmd_sg_list;
198d9a729f3SWebb Scales struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
199edd16368SStephen M. Cameron
200edd16368SStephen M. Cameron /* pointers to command and error info pool */
201edd16368SStephen M. Cameron struct CommandList *cmd_pool;
202edd16368SStephen M. Cameron dma_addr_t cmd_pool_dhandle;
203e1f7de0cSMatt Gates struct io_accel1_cmd *ioaccel_cmd_pool;
204e1f7de0cSMatt Gates dma_addr_t ioaccel_cmd_pool_dhandle;
205aca9012aSStephen M. Cameron struct io_accel2_cmd *ioaccel2_cmd_pool;
206aca9012aSStephen M. Cameron dma_addr_t ioaccel2_cmd_pool_dhandle;
207edd16368SStephen M. Cameron struct ErrorInfo *errinfo_pool;
208edd16368SStephen M. Cameron dma_addr_t errinfo_pool_dhandle;
209edd16368SStephen M. Cameron unsigned long *cmd_pool_bits;
210a08a8471SStephen M. Cameron int scan_finished;
21187b9e6aaSDon Brace u8 scan_waiting : 1;
212a08a8471SStephen M. Cameron spinlock_t scan_lock;
213a08a8471SStephen M. Cameron wait_queue_head_t scan_wait_queue;
214edd16368SStephen M. Cameron
215edd16368SStephen M. Cameron struct Scsi_Host *scsi_host;
216edd16368SStephen M. Cameron spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
217edd16368SStephen M. Cameron int ndevices; /* number of used elements in .dev[] array. */
218cfe5badcSScott Teel struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
219303932fdSDon Brace /*
220303932fdSDon Brace * Performant mode tables.
221303932fdSDon Brace */
222303932fdSDon Brace u32 trans_support;
223303932fdSDon Brace u32 trans_offset;
22442a91641SDon Brace struct TransTable_struct __iomem *transtable;
225303932fdSDon Brace unsigned long transMethod;
226303932fdSDon Brace
2270390f0c0SStephen M. Cameron /* cap concurrent passthrus at some reasonable maximum */
22845fcb86eSStephen Cameron #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
22934f0c627SDon Brace atomic_t passthru_cmds_avail;
2300390f0c0SStephen M. Cameron
231303932fdSDon Brace /*
232254f796bSMatt Gates * Performant mode completion buffers
233303932fdSDon Brace */
234072b0518SStephen M. Cameron size_t reply_queue_size;
235072b0518SStephen M. Cameron struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
236254f796bSMatt Gates u8 nreply_queues;
237303932fdSDon Brace u32 *blockFetchTable;
238e1f7de0cSMatt Gates u32 *ioaccel1_blockFetchTable;
239aca9012aSStephen M. Cameron u32 *ioaccel2_blockFetchTable;
24042a91641SDon Brace u32 __iomem *ioaccel2_bft2_regs;
241339b2b14SStephen M. Cameron unsigned char *hba_inquiry_data;
242283b4a9bSStephen M. Cameron u32 driver_support;
243283b4a9bSStephen M. Cameron u32 fw_support;
244283b4a9bSStephen M. Cameron int ioaccel_support;
245283b4a9bSStephen M. Cameron int ioaccel_maxsg;
246a0c12413SStephen M. Cameron u64 last_intr_timestamp;
247a0c12413SStephen M. Cameron u32 last_heartbeat;
248a0c12413SStephen M. Cameron u64 last_heartbeat_timestamp;
249e85c5974SStephen M. Cameron u32 heartbeat_sample_interval;
250e85c5974SStephen M. Cameron atomic_t firmware_flash_in_progress;
25142a91641SDon Brace u32 __percpu *lockup_detected;
2528a98db73SStephen M. Cameron struct delayed_work monitor_ctlr_work;
2536636e7f4SDon Brace struct delayed_work rescan_ctlr_work;
2543d38f00cSScott Teel struct delayed_work event_monitor_work;
2558a98db73SStephen M. Cameron int remove_in_progress;
256254f796bSMatt Gates /* Address of h->q[x] is passed to intr handler to know which queue */
257254f796bSMatt Gates u8 q[MAX_REPLY_QUEUES];
2588b47004aSRobert Elliott char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */
25975167d2cSStephen M. Cameron u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
26075167d2cSStephen M. Cameron #define HPSATMF_BITS_SUPPORTED (1 << 0)
26175167d2cSStephen M. Cameron #define HPSATMF_PHYS_LUN_RESET (1 << 1)
26275167d2cSStephen M. Cameron #define HPSATMF_PHYS_NEX_RESET (1 << 2)
26375167d2cSStephen M. Cameron #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
26475167d2cSStephen M. Cameron #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
26575167d2cSStephen M. Cameron #define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
26675167d2cSStephen M. Cameron #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
26775167d2cSStephen M. Cameron #define HPSATMF_PHYS_QRY_TASK (1 << 7)
26875167d2cSStephen M. Cameron #define HPSATMF_PHYS_QRY_TSET (1 << 8)
26975167d2cSStephen M. Cameron #define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
2708be986ccSStephen Cameron #define HPSATMF_IOACCEL_ENABLED (1 << 15)
27175167d2cSStephen M. Cameron #define HPSATMF_MASK_SUPPORTED (1 << 16)
27275167d2cSStephen M. Cameron #define HPSATMF_LOG_LUN_RESET (1 << 17)
27375167d2cSStephen M. Cameron #define HPSATMF_LOG_NEX_RESET (1 << 18)
27475167d2cSStephen M. Cameron #define HPSATMF_LOG_TASK_ABORT (1 << 19)
27575167d2cSStephen M. Cameron #define HPSATMF_LOG_TSET_ABORT (1 << 20)
27675167d2cSStephen M. Cameron #define HPSATMF_LOG_CLEAR_ACA (1 << 21)
27775167d2cSStephen M. Cameron #define HPSATMF_LOG_CLEAR_TSET (1 << 22)
27875167d2cSStephen M. Cameron #define HPSATMF_LOG_QRY_TASK (1 << 23)
27975167d2cSStephen M. Cameron #define HPSATMF_LOG_QRY_TSET (1 << 24)
28075167d2cSStephen M. Cameron #define HPSATMF_LOG_QRY_ASYNC (1 << 25)
28176438d08SStephen M. Cameron u32 events;
282faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT (1 << 0)
283faff6ee0SStephen M. Cameron #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
284faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
285faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
286faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
287faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
288faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
289faff6ee0SStephen M. Cameron
290faff6ee0SStephen M. Cameron #define RESCAN_REQUIRED_EVENT_BITS \
2917b2c46eeSStephen M. Cameron (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
292faff6ee0SStephen M. Cameron CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
293faff6ee0SStephen M. Cameron CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
294faff6ee0SStephen M. Cameron CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
295faff6ee0SStephen M. Cameron CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
2969846590eSStephen M. Cameron spinlock_t offline_device_lock;
2979846590eSStephen M. Cameron struct list_head offline_device_list;
298da0697bdSScott Teel int acciopath_status;
299853633e8SDon Brace int drv_req_rescan;
3002ba8bfc8SStephen M. Cameron int raid_offload_debug;
30134592254SScott Teel int discovery_polling;
302135ae6edSHannes Reinecke int legacy_board;
30334592254SScott Teel struct ReportLUNdata *lastlogicals;
3049b5c48c2SStephen Cameron int needs_abort_tags_swizzled;
305080ef1ccSDon Brace struct workqueue_struct *resubmit_wq;
3066636e7f4SDon Brace struct workqueue_struct *rescan_ctlr_wq;
30701192088SDon Brace struct workqueue_struct *monitor_ctlr_wq;
3089b5c48c2SStephen Cameron atomic_t abort_cmds_available;
309d604f533SWebb Scales wait_queue_head_t event_sync_wait_queue;
310d604f533SWebb Scales struct mutex reset_mutex;
311da03ded0SDon Brace u8 reset_in_progress;
312d04e62b9SKevin Barnett struct hpsa_sas_node *sas_host;
313c59d04f3SDon Brace spinlock_t reset_lock;
314edd16368SStephen M. Cameron };
3159846590eSStephen M. Cameron
3169846590eSStephen M. Cameron struct offline_device_entry {
3179846590eSStephen M. Cameron unsigned char scsi3addr[8];
3189846590eSStephen M. Cameron struct list_head offline_list;
3199846590eSStephen M. Cameron };
3209846590eSStephen M. Cameron
321edd16368SStephen M. Cameron #define HPSA_ABORT_MSG 0
322edd16368SStephen M. Cameron #define HPSA_DEVICE_RESET_MSG 1
32364670ac8SStephen M. Cameron #define HPSA_RESET_TYPE_CONTROLLER 0x00
32464670ac8SStephen M. Cameron #define HPSA_RESET_TYPE_BUS 0x01
32564670ac8SStephen M. Cameron #define HPSA_RESET_TYPE_LUN 0x04
3260b9b7b6eSScott Teel #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
327edd16368SStephen M. Cameron #define HPSA_MSG_SEND_RETRY_LIMIT 10
328516fda49SStephen M. Cameron #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
329edd16368SStephen M. Cameron
330edd16368SStephen M. Cameron /* Maximum time in seconds driver will wait for command completions
331edd16368SStephen M. Cameron * when polling before giving up.
332edd16368SStephen M. Cameron */
333edd16368SStephen M. Cameron #define HPSA_MAX_POLL_TIME_SECS (20)
334edd16368SStephen M. Cameron
335edd16368SStephen M. Cameron /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
336edd16368SStephen M. Cameron * how many times to retry TEST UNIT READY on a device
337edd16368SStephen M. Cameron * while waiting for it to become ready before giving up.
338edd16368SStephen M. Cameron * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
339edd16368SStephen M. Cameron * between sending TURs while waiting for a device
340edd16368SStephen M. Cameron * to become ready.
341edd16368SStephen M. Cameron */
342edd16368SStephen M. Cameron #define HPSA_TUR_RETRY_LIMIT (20)
343edd16368SStephen M. Cameron #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
344edd16368SStephen M. Cameron
345edd16368SStephen M. Cameron /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
346edd16368SStephen M. Cameron * to become ready, in seconds, before giving up on it.
347edd16368SStephen M. Cameron * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
348edd16368SStephen M. Cameron * between polling the board to see if it is ready, in
349edd16368SStephen M. Cameron * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
350edd16368SStephen M. Cameron * HPSA_BOARD_READY_ITERATIONS are derived from those.
351edd16368SStephen M. Cameron */
352edd16368SStephen M. Cameron #define HPSA_BOARD_READY_WAIT_SECS (120)
3532ed7127bSStephen M. Cameron #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
354edd16368SStephen M. Cameron #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
355edd16368SStephen M. Cameron #define HPSA_BOARD_READY_POLL_INTERVAL \
356edd16368SStephen M. Cameron ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
357edd16368SStephen M. Cameron #define HPSA_BOARD_READY_ITERATIONS \
358edd16368SStephen M. Cameron ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
359edd16368SStephen M. Cameron HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
360fe5389c8SStephen M. Cameron #define HPSA_BOARD_NOT_READY_ITERATIONS \
361fe5389c8SStephen M. Cameron ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
362fe5389c8SStephen M. Cameron HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
363edd16368SStephen M. Cameron #define HPSA_POST_RESET_PAUSE_MSECS (3000)
364edd16368SStephen M. Cameron #define HPSA_POST_RESET_NOOP_RETRIES (12)
365edd16368SStephen M. Cameron
366edd16368SStephen M. Cameron /* Defining the diffent access_menthods */
367edd16368SStephen M. Cameron /*
368edd16368SStephen M. Cameron * Memory mapped FIFO interface (SMART 53xx cards)
369edd16368SStephen M. Cameron */
370edd16368SStephen M. Cameron #define SA5_DOORBELL 0x20
371edd16368SStephen M. Cameron #define SA5_REQUEST_PORT_OFFSET 0x40
372281a7fd0SWebb Scales #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
373281a7fd0SWebb Scales #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
374edd16368SStephen M. Cameron #define SA5_REPLY_INTR_MASK_OFFSET 0x34
375edd16368SStephen M. Cameron #define SA5_REPLY_PORT_OFFSET 0x44
376edd16368SStephen M. Cameron #define SA5_INTR_STATUS 0x30
377edd16368SStephen M. Cameron #define SA5_SCRATCHPAD_OFFSET 0xB0
378edd16368SStephen M. Cameron
379edd16368SStephen M. Cameron #define SA5_CTCFG_OFFSET 0xB4
380edd16368SStephen M. Cameron #define SA5_CTMEM_OFFSET 0xB8
381edd16368SStephen M. Cameron
382edd16368SStephen M. Cameron #define SA5_INTR_OFF 0x08
383edd16368SStephen M. Cameron #define SA5B_INTR_OFF 0x04
384edd16368SStephen M. Cameron #define SA5_INTR_PENDING 0x08
385edd16368SStephen M. Cameron #define SA5B_INTR_PENDING 0x04
386edd16368SStephen M. Cameron #define FIFO_EMPTY 0xffffffff
387edd16368SStephen M. Cameron #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
388edd16368SStephen M. Cameron
389edd16368SStephen M. Cameron #define HPSA_ERROR_BIT 0x02
390edd16368SStephen M. Cameron
391303932fdSDon Brace /* Performant mode flags */
392303932fdSDon Brace #define SA5_PERF_INTR_PENDING 0x04
393303932fdSDon Brace #define SA5_PERF_INTR_OFF 0x05
394303932fdSDon Brace #define SA5_OUTDB_STATUS_PERF_BIT 0x01
395303932fdSDon Brace #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
396303932fdSDon Brace #define SA5_OUTDB_CLEAR 0xA0
397303932fdSDon Brace #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
398303932fdSDon Brace #define SA5_OUTDB_STATUS 0x9C
399303932fdSDon Brace
400303932fdSDon Brace
401edd16368SStephen M. Cameron #define HPSA_INTR_ON 1
402edd16368SStephen M. Cameron #define HPSA_INTR_OFF 0
403b66cc250SMike Miller
404b66cc250SMike Miller /*
405b66cc250SMike Miller * Inbound Post Queue offsets for IO Accelerator Mode 2
406b66cc250SMike Miller */
407b66cc250SMike Miller #define IOACCEL2_INBOUND_POSTQ_32 0x48
408b66cc250SMike Miller #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
409b66cc250SMike Miller #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
410b66cc250SMike Miller
411c795505aSKevin Barnett #define HPSA_PHYSICAL_DEVICE_BUS 0
412c795505aSKevin Barnett #define HPSA_RAID_VOLUME_BUS 1
413c795505aSKevin Barnett #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
41409371d62SDon Brace #define HPSA_HBA_BUS 0
4157630b3a5SHannes Reinecke #define HPSA_LEGACY_HBA_BUS 3
416c795505aSKevin Barnett
417edd16368SStephen M. Cameron /*
418edd16368SStephen M. Cameron Send the command to the hardware
419edd16368SStephen M. Cameron */
SA5_submit_command(struct ctlr_info * h,struct CommandList * c)420edd16368SStephen M. Cameron static void SA5_submit_command(struct ctlr_info *h,
421edd16368SStephen M. Cameron struct CommandList *c)
422edd16368SStephen M. Cameron {
423edd16368SStephen M. Cameron writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
424fec62c36SStephen M. Cameron (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
425edd16368SStephen M. Cameron }
426edd16368SStephen M. Cameron
SA5_submit_command_no_read(struct ctlr_info * h,struct CommandList * c)427b3a52e79SStephen M. Cameron static void SA5_submit_command_no_read(struct ctlr_info *h,
428b3a52e79SStephen M. Cameron struct CommandList *c)
429b3a52e79SStephen M. Cameron {
430b3a52e79SStephen M. Cameron writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
431b3a52e79SStephen M. Cameron }
432b3a52e79SStephen M. Cameron
SA5_submit_command_ioaccel2(struct ctlr_info * h,struct CommandList * c)433c349775eSScott Teel static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
434c349775eSScott Teel struct CommandList *c)
435c349775eSScott Teel {
436c349775eSScott Teel writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
437c349775eSScott Teel }
438c349775eSScott Teel
439edd16368SStephen M. Cameron /*
440edd16368SStephen M. Cameron * This card is the opposite of the other cards.
441edd16368SStephen M. Cameron * 0 turns interrupts on...
442edd16368SStephen M. Cameron * 0x08 turns them off...
443edd16368SStephen M. Cameron */
SA5_intr_mask(struct ctlr_info * h,unsigned long val)444edd16368SStephen M. Cameron static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
445edd16368SStephen M. Cameron {
446edd16368SStephen M. Cameron if (val) { /* Turn interrupts on */
447edd16368SStephen M. Cameron h->interrupts_enabled = 1;
448edd16368SStephen M. Cameron writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
4498cd21da7SStephen M. Cameron (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
450edd16368SStephen M. Cameron } else { /* Turn them off */
451edd16368SStephen M. Cameron h->interrupts_enabled = 0;
452edd16368SStephen M. Cameron writel(SA5_INTR_OFF,
453edd16368SStephen M. Cameron h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
4548cd21da7SStephen M. Cameron (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
455edd16368SStephen M. Cameron }
456edd16368SStephen M. Cameron }
457303932fdSDon Brace
458135ae6edSHannes Reinecke /*
459135ae6edSHannes Reinecke * Variant of the above; 0x04 turns interrupts off...
460135ae6edSHannes Reinecke */
SA5B_intr_mask(struct ctlr_info * h,unsigned long val)461135ae6edSHannes Reinecke static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val)
462135ae6edSHannes Reinecke {
463135ae6edSHannes Reinecke if (val) { /* Turn interrupts on */
464135ae6edSHannes Reinecke h->interrupts_enabled = 1;
465135ae6edSHannes Reinecke writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
466135ae6edSHannes Reinecke (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
467135ae6edSHannes Reinecke } else { /* Turn them off */
468135ae6edSHannes Reinecke h->interrupts_enabled = 0;
469135ae6edSHannes Reinecke writel(SA5B_INTR_OFF,
470135ae6edSHannes Reinecke h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
471135ae6edSHannes Reinecke (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
472135ae6edSHannes Reinecke }
473135ae6edSHannes Reinecke }
474135ae6edSHannes Reinecke
SA5_performant_intr_mask(struct ctlr_info * h,unsigned long val)475303932fdSDon Brace static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
476303932fdSDon Brace {
477303932fdSDon Brace if (val) { /* turn on interrupts */
478303932fdSDon Brace h->interrupts_enabled = 1;
479303932fdSDon Brace writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
4808cd21da7SStephen M. Cameron (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
481303932fdSDon Brace } else {
482303932fdSDon Brace h->interrupts_enabled = 0;
483303932fdSDon Brace writel(SA5_PERF_INTR_OFF,
484303932fdSDon Brace h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
4858cd21da7SStephen M. Cameron (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
486303932fdSDon Brace }
487303932fdSDon Brace }
488303932fdSDon Brace
SA5_performant_completed(struct ctlr_info * h,u8 q)489254f796bSMatt Gates static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
490303932fdSDon Brace {
491072b0518SStephen M. Cameron struct reply_queue_buffer *rq = &h->reply_queue[q];
4920cbf768eSStephen M. Cameron unsigned long register_value = FIFO_EMPTY;
493303932fdSDon Brace
4942c17d2daSStephen M. Cameron /* msi auto clears the interrupt pending bit. */
495bc2bb154SChristoph Hellwig if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
496303932fdSDon Brace /* flush the controller write of the reply queue by reading
497303932fdSDon Brace * outbound doorbell status register.
498303932fdSDon Brace */
499bee266a6SDon Brace (void) readl(h->vaddr + SA5_OUTDB_STATUS);
500303932fdSDon Brace writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
501303932fdSDon Brace /* Do a read in order to flush the write to the controller
502303932fdSDon Brace * (as per spec.)
503303932fdSDon Brace */
504bee266a6SDon Brace (void) readl(h->vaddr + SA5_OUTDB_STATUS);
505303932fdSDon Brace }
506303932fdSDon Brace
507bee266a6SDon Brace if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
508254f796bSMatt Gates register_value = rq->head[rq->current_entry];
509254f796bSMatt Gates rq->current_entry++;
5100cbf768eSStephen M. Cameron atomic_dec(&h->commands_outstanding);
511303932fdSDon Brace } else {
512303932fdSDon Brace register_value = FIFO_EMPTY;
513303932fdSDon Brace }
514303932fdSDon Brace /* Check for wraparound */
515254f796bSMatt Gates if (rq->current_entry == h->max_commands) {
516254f796bSMatt Gates rq->current_entry = 0;
517254f796bSMatt Gates rq->wraparound ^= 1;
518303932fdSDon Brace }
519303932fdSDon Brace return register_value;
520303932fdSDon Brace }
521303932fdSDon Brace
522edd16368SStephen M. Cameron /*
523edd16368SStephen M. Cameron * returns value read from hardware.
524edd16368SStephen M. Cameron * returns FIFO_EMPTY if there is nothing to read
525edd16368SStephen M. Cameron */
SA5_completed(struct ctlr_info * h,u8 q)526254f796bSMatt Gates static unsigned long SA5_completed(struct ctlr_info *h,
527254f796bSMatt Gates __attribute__((unused)) u8 q)
528edd16368SStephen M. Cameron {
529edd16368SStephen M. Cameron unsigned long register_value
530edd16368SStephen M. Cameron = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
531edd16368SStephen M. Cameron
5320cbf768eSStephen M. Cameron if (register_value != FIFO_EMPTY)
5330cbf768eSStephen M. Cameron atomic_dec(&h->commands_outstanding);
534edd16368SStephen M. Cameron
535edd16368SStephen M. Cameron #ifdef HPSA_DEBUG
536edd16368SStephen M. Cameron if (register_value != FIFO_EMPTY)
53784ca0be2SStephen M. Cameron dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
538edd16368SStephen M. Cameron register_value);
539edd16368SStephen M. Cameron else
540f79cfec6SStephen M. Cameron dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
541edd16368SStephen M. Cameron #endif
542edd16368SStephen M. Cameron
543edd16368SStephen M. Cameron return register_value;
544edd16368SStephen M. Cameron }
545edd16368SStephen M. Cameron /*
546edd16368SStephen M. Cameron * Returns true if an interrupt is pending..
547edd16368SStephen M. Cameron */
SA5_intr_pending(struct ctlr_info * h)548900c5440SStephen M. Cameron static bool SA5_intr_pending(struct ctlr_info *h)
549edd16368SStephen M. Cameron {
550edd16368SStephen M. Cameron unsigned long register_value =
551edd16368SStephen M. Cameron readl(h->vaddr + SA5_INTR_STATUS);
552900c5440SStephen M. Cameron return register_value & SA5_INTR_PENDING;
553edd16368SStephen M. Cameron }
554edd16368SStephen M. Cameron
SA5_performant_intr_pending(struct ctlr_info * h)555303932fdSDon Brace static bool SA5_performant_intr_pending(struct ctlr_info *h)
556303932fdSDon Brace {
557303932fdSDon Brace unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
558303932fdSDon Brace
559303932fdSDon Brace if (!register_value)
560303932fdSDon Brace return false;
561303932fdSDon Brace
562303932fdSDon Brace /* Read outbound doorbell to flush */
563303932fdSDon Brace register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
564303932fdSDon Brace return register_value & SA5_OUTDB_STATUS_PERF_BIT;
565303932fdSDon Brace }
566edd16368SStephen M. Cameron
567e1f7de0cSMatt Gates #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
568e1f7de0cSMatt Gates
SA5_ioaccel_mode1_intr_pending(struct ctlr_info * h)569e1f7de0cSMatt Gates static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
570e1f7de0cSMatt Gates {
571e1f7de0cSMatt Gates unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
572e1f7de0cSMatt Gates
573e1f7de0cSMatt Gates return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
574e1f7de0cSMatt Gates true : false;
575e1f7de0cSMatt Gates }
576e1f7de0cSMatt Gates
577135ae6edSHannes Reinecke /*
578135ae6edSHannes Reinecke * Returns true if an interrupt is pending..
579135ae6edSHannes Reinecke */
SA5B_intr_pending(struct ctlr_info * h)580135ae6edSHannes Reinecke static bool SA5B_intr_pending(struct ctlr_info *h)
581135ae6edSHannes Reinecke {
582135ae6edSHannes Reinecke return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING;
583135ae6edSHannes Reinecke }
584135ae6edSHannes Reinecke
585e1f7de0cSMatt Gates #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
586e1f7de0cSMatt Gates #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
587e1f7de0cSMatt Gates #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
588e1f7de0cSMatt Gates #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
589e1f7de0cSMatt Gates
SA5_ioaccel_mode1_completed(struct ctlr_info * h,u8 q)590283b4a9bSStephen M. Cameron static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
591e1f7de0cSMatt Gates {
592e1f7de0cSMatt Gates u64 register_value;
593072b0518SStephen M. Cameron struct reply_queue_buffer *rq = &h->reply_queue[q];
594e1f7de0cSMatt Gates
595e1f7de0cSMatt Gates BUG_ON(q >= h->nreply_queues);
596e1f7de0cSMatt Gates
597e1f7de0cSMatt Gates register_value = rq->head[rq->current_entry];
598e1f7de0cSMatt Gates if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
599e1f7de0cSMatt Gates rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
600e1f7de0cSMatt Gates if (++rq->current_entry == rq->size)
601e1f7de0cSMatt Gates rq->current_entry = 0;
602283b4a9bSStephen M. Cameron /*
603283b4a9bSStephen M. Cameron * @todo
604283b4a9bSStephen M. Cameron *
605283b4a9bSStephen M. Cameron * Don't really need to write the new index after each command,
606283b4a9bSStephen M. Cameron * but with current driver design this is easiest.
607283b4a9bSStephen M. Cameron */
608283b4a9bSStephen M. Cameron wmb();
609283b4a9bSStephen M. Cameron writel((q << 24) | rq->current_entry, h->vaddr +
610283b4a9bSStephen M. Cameron IOACCEL_MODE1_CONSUMER_INDEX);
6110cbf768eSStephen M. Cameron atomic_dec(&h->commands_outstanding);
612e1f7de0cSMatt Gates }
613e1f7de0cSMatt Gates return (unsigned long) register_value;
614e1f7de0cSMatt Gates }
615e1f7de0cSMatt Gates
616edd16368SStephen M. Cameron static struct access_method SA5_access = {
61793380123SKees Cook .submit_command = SA5_submit_command,
61893380123SKees Cook .set_intr_mask = SA5_intr_mask,
61993380123SKees Cook .intr_pending = SA5_intr_pending,
62093380123SKees Cook .command_completed = SA5_completed,
621edd16368SStephen M. Cameron };
622edd16368SStephen M. Cameron
623135ae6edSHannes Reinecke /* Duplicate entry of the above to mark unsupported boards */
624135ae6edSHannes Reinecke static struct access_method SA5A_access = {
625135ae6edSHannes Reinecke .submit_command = SA5_submit_command,
626135ae6edSHannes Reinecke .set_intr_mask = SA5_intr_mask,
627135ae6edSHannes Reinecke .intr_pending = SA5_intr_pending,
628135ae6edSHannes Reinecke .command_completed = SA5_completed,
629135ae6edSHannes Reinecke };
630135ae6edSHannes Reinecke
631135ae6edSHannes Reinecke static struct access_method SA5B_access = {
632135ae6edSHannes Reinecke .submit_command = SA5_submit_command,
633135ae6edSHannes Reinecke .set_intr_mask = SA5B_intr_mask,
634135ae6edSHannes Reinecke .intr_pending = SA5B_intr_pending,
635135ae6edSHannes Reinecke .command_completed = SA5_completed,
636135ae6edSHannes Reinecke };
637135ae6edSHannes Reinecke
638e1f7de0cSMatt Gates static struct access_method SA5_ioaccel_mode1_access = {
63993380123SKees Cook .submit_command = SA5_submit_command,
64093380123SKees Cook .set_intr_mask = SA5_performant_intr_mask,
64193380123SKees Cook .intr_pending = SA5_ioaccel_mode1_intr_pending,
64293380123SKees Cook .command_completed = SA5_ioaccel_mode1_completed,
643e1f7de0cSMatt Gates };
644e1f7de0cSMatt Gates
645c349775eSScott Teel static struct access_method SA5_ioaccel_mode2_access = {
64693380123SKees Cook .submit_command = SA5_submit_command_ioaccel2,
64793380123SKees Cook .set_intr_mask = SA5_performant_intr_mask,
64893380123SKees Cook .intr_pending = SA5_performant_intr_pending,
64993380123SKees Cook .command_completed = SA5_performant_completed,
650c349775eSScott Teel };
651c349775eSScott Teel
652303932fdSDon Brace static struct access_method SA5_performant_access = {
65393380123SKees Cook .submit_command = SA5_submit_command,
65493380123SKees Cook .set_intr_mask = SA5_performant_intr_mask,
65593380123SKees Cook .intr_pending = SA5_performant_intr_pending,
65693380123SKees Cook .command_completed = SA5_performant_completed,
657303932fdSDon Brace };
658303932fdSDon Brace
659b3a52e79SStephen M. Cameron static struct access_method SA5_performant_access_no_read = {
66093380123SKees Cook .submit_command = SA5_submit_command_no_read,
66193380123SKees Cook .set_intr_mask = SA5_performant_intr_mask,
66293380123SKees Cook .intr_pending = SA5_performant_intr_pending,
66393380123SKees Cook .command_completed = SA5_performant_completed,
664b3a52e79SStephen M. Cameron };
665b3a52e79SStephen M. Cameron
666edd16368SStephen M. Cameron struct board_type {
66701a02ffcSStephen M. Cameron u32 board_id;
668edd16368SStephen M. Cameron char *product_name;
669edd16368SStephen M. Cameron struct access_method *access;
670edd16368SStephen M. Cameron };
671edd16368SStephen M. Cameron
672edd16368SStephen M. Cameron #endif /* HPSA_H */
673edd16368SStephen M. Cameron
674