1edd16368SStephen M. Cameron /* 2edd16368SStephen M. Cameron * Disk Array driver for HP Smart Array SAS controllers 351c35139SScott Teel * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 4edd16368SStephen M. Cameron * 5edd16368SStephen M. Cameron * This program is free software; you can redistribute it and/or modify 6edd16368SStephen M. Cameron * it under the terms of the GNU General Public License as published by 7edd16368SStephen M. Cameron * the Free Software Foundation; version 2 of the License. 8edd16368SStephen M. Cameron * 9edd16368SStephen M. Cameron * This program is distributed in the hope that it will be useful, 10edd16368SStephen M. Cameron * but WITHOUT ANY WARRANTY; without even the implied warranty of 11edd16368SStephen M. Cameron * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12edd16368SStephen M. Cameron * NON INFRINGEMENT. See the GNU General Public License for more details. 13edd16368SStephen M. Cameron * 14edd16368SStephen M. Cameron * You should have received a copy of the GNU General Public License 15edd16368SStephen M. Cameron * along with this program; if not, write to the Free Software 16edd16368SStephen M. Cameron * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17edd16368SStephen M. Cameron * 18edd16368SStephen M. Cameron * Questions/Comments/Bugfixes to iss_storagedev@hp.com 19edd16368SStephen M. Cameron * 20edd16368SStephen M. Cameron */ 21edd16368SStephen M. Cameron #ifndef HPSA_H 22edd16368SStephen M. Cameron #define HPSA_H 23edd16368SStephen M. Cameron 24edd16368SStephen M. Cameron #include <scsi/scsicam.h> 25edd16368SStephen M. Cameron 26edd16368SStephen M. Cameron #define IO_OK 0 27edd16368SStephen M. Cameron #define IO_ERROR 1 28edd16368SStephen M. Cameron 29edd16368SStephen M. Cameron struct ctlr_info; 30edd16368SStephen M. Cameron 31edd16368SStephen M. Cameron struct access_method { 32edd16368SStephen M. Cameron void (*submit_command)(struct ctlr_info *h, 33edd16368SStephen M. Cameron struct CommandList *c); 34edd16368SStephen M. Cameron void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); 35edd16368SStephen M. Cameron unsigned long (*fifo_full)(struct ctlr_info *h); 36900c5440SStephen M. Cameron bool (*intr_pending)(struct ctlr_info *h); 37254f796bSMatt Gates unsigned long (*command_completed)(struct ctlr_info *h, u8 q); 38edd16368SStephen M. Cameron }; 39edd16368SStephen M. Cameron 40edd16368SStephen M. Cameron struct hpsa_scsi_dev_t { 41edd16368SStephen M. Cameron int devtype; 42edd16368SStephen M. Cameron int bus, target, lun; /* as presented to the OS */ 43edd16368SStephen M. Cameron unsigned char scsi3addr[8]; /* as presented to the HW */ 44edd16368SStephen M. Cameron #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" 45edd16368SStephen M. Cameron unsigned char device_id[16]; /* from inquiry pg. 0x83 */ 46edd16368SStephen M. Cameron unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 47edd16368SStephen M. Cameron unsigned char model[16]; /* bytes 16-31 of inquiry data */ 48edd16368SStephen M. Cameron unsigned char raid_level; /* from inquiry page 0xC1 */ 499846590eSStephen M. Cameron unsigned char volume_offline; /* discovered via TUR or VPD */ 50e1f7de0cSMatt Gates u32 ioaccel_handle; 51283b4a9bSStephen M. Cameron int offload_config; /* I/O accel RAID offload configured */ 52283b4a9bSStephen M. Cameron int offload_enabled; /* I/O accel RAID offload enabled */ 53283b4a9bSStephen M. Cameron int offload_to_mirror; /* Send next I/O accelerator RAID 54283b4a9bSStephen M. Cameron * offload request to mirror drive 55283b4a9bSStephen M. Cameron */ 56283b4a9bSStephen M. Cameron struct raid_map_data raid_map; /* I/O accelerator RAID map */ 57283b4a9bSStephen M. Cameron 58edd16368SStephen M. Cameron }; 59edd16368SStephen M. Cameron 60254f796bSMatt Gates struct reply_pool { 61254f796bSMatt Gates u64 *head; 62254f796bSMatt Gates size_t size; 63254f796bSMatt Gates u8 wraparound; 64254f796bSMatt Gates u32 current_entry; 65254f796bSMatt Gates }; 66254f796bSMatt Gates 67316b221aSStephen M. Cameron #pragma pack(1) 68316b221aSStephen M. Cameron struct bmic_controller_parameters { 69316b221aSStephen M. Cameron u8 led_flags; 70316b221aSStephen M. Cameron u8 enable_command_list_verification; 71316b221aSStephen M. Cameron u8 backed_out_write_drives; 72316b221aSStephen M. Cameron u16 stripes_for_parity; 73316b221aSStephen M. Cameron u8 parity_distribution_mode_flags; 74316b221aSStephen M. Cameron u16 max_driver_requests; 75316b221aSStephen M. Cameron u16 elevator_trend_count; 76316b221aSStephen M. Cameron u8 disable_elevator; 77316b221aSStephen M. Cameron u8 force_scan_complete; 78316b221aSStephen M. Cameron u8 scsi_transfer_mode; 79316b221aSStephen M. Cameron u8 force_narrow; 80316b221aSStephen M. Cameron u8 rebuild_priority; 81316b221aSStephen M. Cameron u8 expand_priority; 82316b221aSStephen M. Cameron u8 host_sdb_asic_fix; 83316b221aSStephen M. Cameron u8 pdpi_burst_from_host_disabled; 84316b221aSStephen M. Cameron char software_name[64]; 85316b221aSStephen M. Cameron char hardware_name[32]; 86316b221aSStephen M. Cameron u8 bridge_revision; 87316b221aSStephen M. Cameron u8 snapshot_priority; 88316b221aSStephen M. Cameron u32 os_specific; 89316b221aSStephen M. Cameron u8 post_prompt_timeout; 90316b221aSStephen M. Cameron u8 automatic_drive_slamming; 91316b221aSStephen M. Cameron u8 reserved1; 92316b221aSStephen M. Cameron u8 nvram_flags; 936e8e8088SJoe Handzik #define HBA_MODE_ENABLED_FLAG (1 << 3) 94316b221aSStephen M. Cameron u8 cache_nvram_flags; 95316b221aSStephen M. Cameron u8 drive_config_flags; 96316b221aSStephen M. Cameron u16 reserved2; 97316b221aSStephen M. Cameron u8 temp_warning_level; 98316b221aSStephen M. Cameron u8 temp_shutdown_level; 99316b221aSStephen M. Cameron u8 temp_condition_reset; 100316b221aSStephen M. Cameron u8 max_coalesce_commands; 101316b221aSStephen M. Cameron u32 max_coalesce_delay; 102316b221aSStephen M. Cameron u8 orca_password[4]; 103316b221aSStephen M. Cameron u8 access_id[16]; 104316b221aSStephen M. Cameron u8 reserved[356]; 105316b221aSStephen M. Cameron }; 106316b221aSStephen M. Cameron #pragma pack() 107316b221aSStephen M. Cameron 108edd16368SStephen M. Cameron struct ctlr_info { 109edd16368SStephen M. Cameron int ctlr; 110edd16368SStephen M. Cameron char devname[8]; 111edd16368SStephen M. Cameron char *product_name; 112edd16368SStephen M. Cameron struct pci_dev *pdev; 11301a02ffcSStephen M. Cameron u32 board_id; 114edd16368SStephen M. Cameron void __iomem *vaddr; 115edd16368SStephen M. Cameron unsigned long paddr; 116edd16368SStephen M. Cameron int nr_cmds; /* Number of commands allowed on this controller */ 117edd16368SStephen M. Cameron struct CfgTable __iomem *cfgtable; 118edd16368SStephen M. Cameron int interrupts_enabled; 119edd16368SStephen M. Cameron int major; 120edd16368SStephen M. Cameron int max_commands; 121edd16368SStephen M. Cameron int commands_outstanding; 122edd16368SStephen M. Cameron int max_outstanding; /* Debug */ 123edd16368SStephen M. Cameron int usage_count; /* number of opens all all minor devices */ 124303932fdSDon Brace # define PERF_MODE_INT 0 125303932fdSDon Brace # define DOORBELL_INT 1 126edd16368SStephen M. Cameron # define SIMPLE_MODE_INT 2 127edd16368SStephen M. Cameron # define MEMQ_MODE_INT 3 128254f796bSMatt Gates unsigned int intr[MAX_REPLY_QUEUES]; 129edd16368SStephen M. Cameron unsigned int msix_vector; 130edd16368SStephen M. Cameron unsigned int msi_vector; 131a9a3a273SStephen M. Cameron int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ 132edd16368SStephen M. Cameron struct access_method access; 133316b221aSStephen M. Cameron char hba_mode_enabled; 134edd16368SStephen M. Cameron 135edd16368SStephen M. Cameron /* queue and queue Info */ 1369e0fc764SStephen M. Cameron struct list_head reqQ; 1379e0fc764SStephen M. Cameron struct list_head cmpQ; 138edd16368SStephen M. Cameron unsigned int Qdepth; 139edd16368SStephen M. Cameron unsigned int maxSG; 140edd16368SStephen M. Cameron spinlock_t lock; 14133a2ffceSStephen M. Cameron int maxsgentries; 14233a2ffceSStephen M. Cameron u8 max_cmd_sg_entries; 14333a2ffceSStephen M. Cameron int chainsize; 14433a2ffceSStephen M. Cameron struct SGDescriptor **cmd_sg_list; 145edd16368SStephen M. Cameron 146edd16368SStephen M. Cameron /* pointers to command and error info pool */ 147edd16368SStephen M. Cameron struct CommandList *cmd_pool; 148edd16368SStephen M. Cameron dma_addr_t cmd_pool_dhandle; 149e1f7de0cSMatt Gates struct io_accel1_cmd *ioaccel_cmd_pool; 150e1f7de0cSMatt Gates dma_addr_t ioaccel_cmd_pool_dhandle; 151aca9012aSStephen M. Cameron struct io_accel2_cmd *ioaccel2_cmd_pool; 152aca9012aSStephen M. Cameron dma_addr_t ioaccel2_cmd_pool_dhandle; 153edd16368SStephen M. Cameron struct ErrorInfo *errinfo_pool; 154edd16368SStephen M. Cameron dma_addr_t errinfo_pool_dhandle; 155edd16368SStephen M. Cameron unsigned long *cmd_pool_bits; 156a08a8471SStephen M. Cameron int scan_finished; 157a08a8471SStephen M. Cameron spinlock_t scan_lock; 158a08a8471SStephen M. Cameron wait_queue_head_t scan_wait_queue; 159edd16368SStephen M. Cameron 160edd16368SStephen M. Cameron struct Scsi_Host *scsi_host; 161edd16368SStephen M. Cameron spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ 162edd16368SStephen M. Cameron int ndevices; /* number of used elements in .dev[] array. */ 163cfe5badcSScott Teel struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES]; 164303932fdSDon Brace /* 165303932fdSDon Brace * Performant mode tables. 166303932fdSDon Brace */ 167303932fdSDon Brace u32 trans_support; 168303932fdSDon Brace u32 trans_offset; 169303932fdSDon Brace struct TransTable_struct *transtable; 170303932fdSDon Brace unsigned long transMethod; 171303932fdSDon Brace 1720390f0c0SStephen M. Cameron /* cap concurrent passthrus at some reasonable maximum */ 1730390f0c0SStephen M. Cameron #define HPSA_MAX_CONCURRENT_PASSTHRUS (20) 1740390f0c0SStephen M. Cameron spinlock_t passthru_count_lock; /* protects passthru_count */ 1750390f0c0SStephen M. Cameron int passthru_count; 1760390f0c0SStephen M. Cameron 177303932fdSDon Brace /* 178254f796bSMatt Gates * Performant mode completion buffers 179303932fdSDon Brace */ 180303932fdSDon Brace u64 *reply_pool; 181303932fdSDon Brace size_t reply_pool_size; 182254f796bSMatt Gates struct reply_pool reply_queue[MAX_REPLY_QUEUES]; 183254f796bSMatt Gates u8 nreply_queues; 184254f796bSMatt Gates dma_addr_t reply_pool_dhandle; 185303932fdSDon Brace u32 *blockFetchTable; 186e1f7de0cSMatt Gates u32 *ioaccel1_blockFetchTable; 187aca9012aSStephen M. Cameron u32 *ioaccel2_blockFetchTable; 188b9af4937SStephen M. Cameron u32 *ioaccel2_bft2_regs; 189339b2b14SStephen M. Cameron unsigned char *hba_inquiry_data; 190283b4a9bSStephen M. Cameron u32 driver_support; 191283b4a9bSStephen M. Cameron u32 fw_support; 192283b4a9bSStephen M. Cameron int ioaccel_support; 193283b4a9bSStephen M. Cameron int ioaccel_maxsg; 194a0c12413SStephen M. Cameron u64 last_intr_timestamp; 195a0c12413SStephen M. Cameron u32 last_heartbeat; 196a0c12413SStephen M. Cameron u64 last_heartbeat_timestamp; 197e85c5974SStephen M. Cameron u32 heartbeat_sample_interval; 198e85c5974SStephen M. Cameron atomic_t firmware_flash_in_progress; 199a0c12413SStephen M. Cameron u32 lockup_detected; 2008a98db73SStephen M. Cameron struct delayed_work monitor_ctlr_work; 2018a98db73SStephen M. Cameron int remove_in_progress; 202396883e2SStephen M. Cameron u32 fifo_recently_full; 203254f796bSMatt Gates /* Address of h->q[x] is passed to intr handler to know which queue */ 204254f796bSMatt Gates u8 q[MAX_REPLY_QUEUES]; 20575167d2cSStephen M. Cameron u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ 20675167d2cSStephen M. Cameron #define HPSATMF_BITS_SUPPORTED (1 << 0) 20775167d2cSStephen M. Cameron #define HPSATMF_PHYS_LUN_RESET (1 << 1) 20875167d2cSStephen M. Cameron #define HPSATMF_PHYS_NEX_RESET (1 << 2) 20975167d2cSStephen M. Cameron #define HPSATMF_PHYS_TASK_ABORT (1 << 3) 21075167d2cSStephen M. Cameron #define HPSATMF_PHYS_TSET_ABORT (1 << 4) 21175167d2cSStephen M. Cameron #define HPSATMF_PHYS_CLEAR_ACA (1 << 5) 21275167d2cSStephen M. Cameron #define HPSATMF_PHYS_CLEAR_TSET (1 << 6) 21375167d2cSStephen M. Cameron #define HPSATMF_PHYS_QRY_TASK (1 << 7) 21475167d2cSStephen M. Cameron #define HPSATMF_PHYS_QRY_TSET (1 << 8) 21575167d2cSStephen M. Cameron #define HPSATMF_PHYS_QRY_ASYNC (1 << 9) 21675167d2cSStephen M. Cameron #define HPSATMF_MASK_SUPPORTED (1 << 16) 21775167d2cSStephen M. Cameron #define HPSATMF_LOG_LUN_RESET (1 << 17) 21875167d2cSStephen M. Cameron #define HPSATMF_LOG_NEX_RESET (1 << 18) 21975167d2cSStephen M. Cameron #define HPSATMF_LOG_TASK_ABORT (1 << 19) 22075167d2cSStephen M. Cameron #define HPSATMF_LOG_TSET_ABORT (1 << 20) 22175167d2cSStephen M. Cameron #define HPSATMF_LOG_CLEAR_ACA (1 << 21) 22275167d2cSStephen M. Cameron #define HPSATMF_LOG_CLEAR_TSET (1 << 22) 22375167d2cSStephen M. Cameron #define HPSATMF_LOG_QRY_TASK (1 << 23) 22475167d2cSStephen M. Cameron #define HPSATMF_LOG_QRY_TSET (1 << 24) 22575167d2cSStephen M. Cameron #define HPSATMF_LOG_QRY_ASYNC (1 << 25) 22676438d08SStephen M. Cameron u32 events; 227faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT (1 << 0) 228faff6ee0SStephen M. Cameron #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1) 229faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4) 230faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5) 231faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6) 232faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30) 233faff6ee0SStephen M. Cameron #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) 234faff6ee0SStephen M. Cameron 235faff6ee0SStephen M. Cameron #define RESCAN_REQUIRED_EVENT_BITS \ 236faff6ee0SStephen M. Cameron (CTLR_STATE_CHANGE_EVENT | \ 237faff6ee0SStephen M. Cameron CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ 238faff6ee0SStephen M. Cameron CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ 239faff6ee0SStephen M. Cameron CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ 240faff6ee0SStephen M. Cameron CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \ 241faff6ee0SStephen M. Cameron CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ 242faff6ee0SStephen M. Cameron CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) 2439846590eSStephen M. Cameron spinlock_t offline_device_lock; 2449846590eSStephen M. Cameron struct list_head offline_device_list; 245da0697bdSScott Teel int acciopath_status; 246e863d68eSScott Teel int drv_req_rescan; /* flag for driver to request rescan event */ 2472ba8bfc8SStephen M. Cameron int raid_offload_debug; 248edd16368SStephen M. Cameron }; 2499846590eSStephen M. Cameron 2509846590eSStephen M. Cameron struct offline_device_entry { 2519846590eSStephen M. Cameron unsigned char scsi3addr[8]; 2529846590eSStephen M. Cameron struct list_head offline_list; 2539846590eSStephen M. Cameron }; 2549846590eSStephen M. Cameron 255edd16368SStephen M. Cameron #define HPSA_ABORT_MSG 0 256edd16368SStephen M. Cameron #define HPSA_DEVICE_RESET_MSG 1 25764670ac8SStephen M. Cameron #define HPSA_RESET_TYPE_CONTROLLER 0x00 25864670ac8SStephen M. Cameron #define HPSA_RESET_TYPE_BUS 0x01 25964670ac8SStephen M. Cameron #define HPSA_RESET_TYPE_TARGET 0x03 26064670ac8SStephen M. Cameron #define HPSA_RESET_TYPE_LUN 0x04 261edd16368SStephen M. Cameron #define HPSA_MSG_SEND_RETRY_LIMIT 10 262516fda49SStephen M. Cameron #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000) 263edd16368SStephen M. Cameron 264edd16368SStephen M. Cameron /* Maximum time in seconds driver will wait for command completions 265edd16368SStephen M. Cameron * when polling before giving up. 266edd16368SStephen M. Cameron */ 267edd16368SStephen M. Cameron #define HPSA_MAX_POLL_TIME_SECS (20) 268edd16368SStephen M. Cameron 269edd16368SStephen M. Cameron /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines 270edd16368SStephen M. Cameron * how many times to retry TEST UNIT READY on a device 271edd16368SStephen M. Cameron * while waiting for it to become ready before giving up. 272edd16368SStephen M. Cameron * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval 273edd16368SStephen M. Cameron * between sending TURs while waiting for a device 274edd16368SStephen M. Cameron * to become ready. 275edd16368SStephen M. Cameron */ 276edd16368SStephen M. Cameron #define HPSA_TUR_RETRY_LIMIT (20) 277edd16368SStephen M. Cameron #define HPSA_MAX_WAIT_INTERVAL_SECS (30) 278edd16368SStephen M. Cameron 279edd16368SStephen M. Cameron /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board 280edd16368SStephen M. Cameron * to become ready, in seconds, before giving up on it. 281edd16368SStephen M. Cameron * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait 282edd16368SStephen M. Cameron * between polling the board to see if it is ready, in 283edd16368SStephen M. Cameron * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and 284edd16368SStephen M. Cameron * HPSA_BOARD_READY_ITERATIONS are derived from those. 285edd16368SStephen M. Cameron */ 286edd16368SStephen M. Cameron #define HPSA_BOARD_READY_WAIT_SECS (120) 2872ed7127bSStephen M. Cameron #define HPSA_BOARD_NOT_READY_WAIT_SECS (100) 288edd16368SStephen M. Cameron #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) 289edd16368SStephen M. Cameron #define HPSA_BOARD_READY_POLL_INTERVAL \ 290edd16368SStephen M. Cameron ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) 291edd16368SStephen M. Cameron #define HPSA_BOARD_READY_ITERATIONS \ 292edd16368SStephen M. Cameron ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ 293edd16368SStephen M. Cameron HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 294fe5389c8SStephen M. Cameron #define HPSA_BOARD_NOT_READY_ITERATIONS \ 295fe5389c8SStephen M. Cameron ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ 296fe5389c8SStephen M. Cameron HPSA_BOARD_READY_POLL_INTERVAL_MSECS) 297edd16368SStephen M. Cameron #define HPSA_POST_RESET_PAUSE_MSECS (3000) 298edd16368SStephen M. Cameron #define HPSA_POST_RESET_NOOP_RETRIES (12) 299edd16368SStephen M. Cameron 300edd16368SStephen M. Cameron /* Defining the diffent access_menthods */ 301edd16368SStephen M. Cameron /* 302edd16368SStephen M. Cameron * Memory mapped FIFO interface (SMART 53xx cards) 303edd16368SStephen M. Cameron */ 304edd16368SStephen M. Cameron #define SA5_DOORBELL 0x20 305edd16368SStephen M. Cameron #define SA5_REQUEST_PORT_OFFSET 0x40 306edd16368SStephen M. Cameron #define SA5_REPLY_INTR_MASK_OFFSET 0x34 307edd16368SStephen M. Cameron #define SA5_REPLY_PORT_OFFSET 0x44 308edd16368SStephen M. Cameron #define SA5_INTR_STATUS 0x30 309edd16368SStephen M. Cameron #define SA5_SCRATCHPAD_OFFSET 0xB0 310edd16368SStephen M. Cameron 311edd16368SStephen M. Cameron #define SA5_CTCFG_OFFSET 0xB4 312edd16368SStephen M. Cameron #define SA5_CTMEM_OFFSET 0xB8 313edd16368SStephen M. Cameron 314edd16368SStephen M. Cameron #define SA5_INTR_OFF 0x08 315edd16368SStephen M. Cameron #define SA5B_INTR_OFF 0x04 316edd16368SStephen M. Cameron #define SA5_INTR_PENDING 0x08 317edd16368SStephen M. Cameron #define SA5B_INTR_PENDING 0x04 318edd16368SStephen M. Cameron #define FIFO_EMPTY 0xffffffff 319edd16368SStephen M. Cameron #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 320edd16368SStephen M. Cameron 321edd16368SStephen M. Cameron #define HPSA_ERROR_BIT 0x02 322edd16368SStephen M. Cameron 323303932fdSDon Brace /* Performant mode flags */ 324303932fdSDon Brace #define SA5_PERF_INTR_PENDING 0x04 325303932fdSDon Brace #define SA5_PERF_INTR_OFF 0x05 326303932fdSDon Brace #define SA5_OUTDB_STATUS_PERF_BIT 0x01 327303932fdSDon Brace #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 328303932fdSDon Brace #define SA5_OUTDB_CLEAR 0xA0 329303932fdSDon Brace #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 330303932fdSDon Brace #define SA5_OUTDB_STATUS 0x9C 331303932fdSDon Brace 332303932fdSDon Brace 333edd16368SStephen M. Cameron #define HPSA_INTR_ON 1 334edd16368SStephen M. Cameron #define HPSA_INTR_OFF 0 335b66cc250SMike Miller 336b66cc250SMike Miller /* 337b66cc250SMike Miller * Inbound Post Queue offsets for IO Accelerator Mode 2 338b66cc250SMike Miller */ 339b66cc250SMike Miller #define IOACCEL2_INBOUND_POSTQ_32 0x48 340b66cc250SMike Miller #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0 341b66cc250SMike Miller #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4 342b66cc250SMike Miller 343edd16368SStephen M. Cameron /* 344edd16368SStephen M. Cameron Send the command to the hardware 345edd16368SStephen M. Cameron */ 346edd16368SStephen M. Cameron static void SA5_submit_command(struct ctlr_info *h, 347edd16368SStephen M. Cameron struct CommandList *c) 348edd16368SStephen M. Cameron { 349303932fdSDon Brace dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 350303932fdSDon Brace c->Header.Tag.lower); 351edd16368SStephen M. Cameron writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 352fec62c36SStephen M. Cameron (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 353edd16368SStephen M. Cameron } 354edd16368SStephen M. Cameron 355c349775eSScott Teel static void SA5_submit_command_ioaccel2(struct ctlr_info *h, 356c349775eSScott Teel struct CommandList *c) 357c349775eSScott Teel { 358c349775eSScott Teel dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 359c349775eSScott Teel c->Header.Tag.lower); 360c349775eSScott Teel if (c->cmd_type == CMD_IOACCEL2) 361c349775eSScott Teel writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); 362c349775eSScott Teel else 363c349775eSScott Teel writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 364c349775eSScott Teel (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 365c349775eSScott Teel } 366c349775eSScott Teel 367edd16368SStephen M. Cameron /* 368edd16368SStephen M. Cameron * This card is the opposite of the other cards. 369edd16368SStephen M. Cameron * 0 turns interrupts on... 370edd16368SStephen M. Cameron * 0x08 turns them off... 371edd16368SStephen M. Cameron */ 372edd16368SStephen M. Cameron static void SA5_intr_mask(struct ctlr_info *h, unsigned long val) 373edd16368SStephen M. Cameron { 374edd16368SStephen M. Cameron if (val) { /* Turn interrupts on */ 375edd16368SStephen M. Cameron h->interrupts_enabled = 1; 376edd16368SStephen M. Cameron writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 3778cd21da7SStephen M. Cameron (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 378edd16368SStephen M. Cameron } else { /* Turn them off */ 379edd16368SStephen M. Cameron h->interrupts_enabled = 0; 380edd16368SStephen M. Cameron writel(SA5_INTR_OFF, 381edd16368SStephen M. Cameron h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 3828cd21da7SStephen M. Cameron (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 383edd16368SStephen M. Cameron } 384edd16368SStephen M. Cameron } 385303932fdSDon Brace 386303932fdSDon Brace static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) 387303932fdSDon Brace { 388303932fdSDon Brace if (val) { /* turn on interrupts */ 389303932fdSDon Brace h->interrupts_enabled = 1; 390303932fdSDon Brace writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 3918cd21da7SStephen M. Cameron (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 392303932fdSDon Brace } else { 393303932fdSDon Brace h->interrupts_enabled = 0; 394303932fdSDon Brace writel(SA5_PERF_INTR_OFF, 395303932fdSDon Brace h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 3968cd21da7SStephen M. Cameron (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 397303932fdSDon Brace } 398303932fdSDon Brace } 399303932fdSDon Brace 400254f796bSMatt Gates static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) 401303932fdSDon Brace { 402254f796bSMatt Gates struct reply_pool *rq = &h->reply_queue[q]; 403e16a33adSMatt Gates unsigned long flags, register_value = FIFO_EMPTY; 404303932fdSDon Brace 4052c17d2daSStephen M. Cameron /* msi auto clears the interrupt pending bit. */ 4062c17d2daSStephen M. Cameron if (!(h->msi_vector || h->msix_vector)) { 407303932fdSDon Brace /* flush the controller write of the reply queue by reading 408303932fdSDon Brace * outbound doorbell status register. 409303932fdSDon Brace */ 410303932fdSDon Brace register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 411303932fdSDon Brace writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 412303932fdSDon Brace /* Do a read in order to flush the write to the controller 413303932fdSDon Brace * (as per spec.) 414303932fdSDon Brace */ 415303932fdSDon Brace register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 416303932fdSDon Brace } 417303932fdSDon Brace 418254f796bSMatt Gates if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { 419254f796bSMatt Gates register_value = rq->head[rq->current_entry]; 420254f796bSMatt Gates rq->current_entry++; 421e16a33adSMatt Gates spin_lock_irqsave(&h->lock, flags); 422303932fdSDon Brace h->commands_outstanding--; 423e16a33adSMatt Gates spin_unlock_irqrestore(&h->lock, flags); 424303932fdSDon Brace } else { 425303932fdSDon Brace register_value = FIFO_EMPTY; 426303932fdSDon Brace } 427303932fdSDon Brace /* Check for wraparound */ 428254f796bSMatt Gates if (rq->current_entry == h->max_commands) { 429254f796bSMatt Gates rq->current_entry = 0; 430254f796bSMatt Gates rq->wraparound ^= 1; 431303932fdSDon Brace } 432303932fdSDon Brace return register_value; 433303932fdSDon Brace } 434303932fdSDon Brace 435edd16368SStephen M. Cameron /* 436edd16368SStephen M. Cameron * Returns true if fifo is full. 437edd16368SStephen M. Cameron * 438edd16368SStephen M. Cameron */ 439edd16368SStephen M. Cameron static unsigned long SA5_fifo_full(struct ctlr_info *h) 440edd16368SStephen M. Cameron { 441edd16368SStephen M. Cameron if (h->commands_outstanding >= h->max_commands) 442edd16368SStephen M. Cameron return 1; 443edd16368SStephen M. Cameron else 444edd16368SStephen M. Cameron return 0; 445edd16368SStephen M. Cameron 446edd16368SStephen M. Cameron } 447edd16368SStephen M. Cameron /* 448edd16368SStephen M. Cameron * returns value read from hardware. 449edd16368SStephen M. Cameron * returns FIFO_EMPTY if there is nothing to read 450edd16368SStephen M. Cameron */ 451254f796bSMatt Gates static unsigned long SA5_completed(struct ctlr_info *h, 452254f796bSMatt Gates __attribute__((unused)) u8 q) 453edd16368SStephen M. Cameron { 454edd16368SStephen M. Cameron unsigned long register_value 455edd16368SStephen M. Cameron = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); 456e16a33adSMatt Gates unsigned long flags; 457edd16368SStephen M. Cameron 458e16a33adSMatt Gates if (register_value != FIFO_EMPTY) { 459e16a33adSMatt Gates spin_lock_irqsave(&h->lock, flags); 460edd16368SStephen M. Cameron h->commands_outstanding--; 461e16a33adSMatt Gates spin_unlock_irqrestore(&h->lock, flags); 462e16a33adSMatt Gates } 463edd16368SStephen M. Cameron 464edd16368SStephen M. Cameron #ifdef HPSA_DEBUG 465edd16368SStephen M. Cameron if (register_value != FIFO_EMPTY) 46684ca0be2SStephen M. Cameron dev_dbg(&h->pdev->dev, "Read %lx back from board\n", 467edd16368SStephen M. Cameron register_value); 468edd16368SStephen M. Cameron else 469f79cfec6SStephen M. Cameron dev_dbg(&h->pdev->dev, "FIFO Empty read\n"); 470edd16368SStephen M. Cameron #endif 471edd16368SStephen M. Cameron 472edd16368SStephen M. Cameron return register_value; 473edd16368SStephen M. Cameron } 474edd16368SStephen M. Cameron /* 475edd16368SStephen M. Cameron * Returns true if an interrupt is pending.. 476edd16368SStephen M. Cameron */ 477900c5440SStephen M. Cameron static bool SA5_intr_pending(struct ctlr_info *h) 478edd16368SStephen M. Cameron { 479edd16368SStephen M. Cameron unsigned long register_value = 480edd16368SStephen M. Cameron readl(h->vaddr + SA5_INTR_STATUS); 48184ca0be2SStephen M. Cameron dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value); 482900c5440SStephen M. Cameron return register_value & SA5_INTR_PENDING; 483edd16368SStephen M. Cameron } 484edd16368SStephen M. Cameron 485303932fdSDon Brace static bool SA5_performant_intr_pending(struct ctlr_info *h) 486303932fdSDon Brace { 487303932fdSDon Brace unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 488303932fdSDon Brace 489303932fdSDon Brace if (!register_value) 490303932fdSDon Brace return false; 491303932fdSDon Brace 492303932fdSDon Brace if (h->msi_vector || h->msix_vector) 493303932fdSDon Brace return true; 494303932fdSDon Brace 495303932fdSDon Brace /* Read outbound doorbell to flush */ 496303932fdSDon Brace register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 497303932fdSDon Brace return register_value & SA5_OUTDB_STATUS_PERF_BIT; 498303932fdSDon Brace } 499edd16368SStephen M. Cameron 500e1f7de0cSMatt Gates #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100 501e1f7de0cSMatt Gates 502e1f7de0cSMatt Gates static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) 503e1f7de0cSMatt Gates { 504e1f7de0cSMatt Gates unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 505e1f7de0cSMatt Gates 506e1f7de0cSMatt Gates return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? 507e1f7de0cSMatt Gates true : false; 508e1f7de0cSMatt Gates } 509e1f7de0cSMatt Gates 510e1f7de0cSMatt Gates #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 511e1f7de0cSMatt Gates #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 512e1f7de0cSMatt Gates #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC 513e1f7de0cSMatt Gates #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL 514e1f7de0cSMatt Gates 515283b4a9bSStephen M. Cameron static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) 516e1f7de0cSMatt Gates { 517e1f7de0cSMatt Gates u64 register_value; 518e1f7de0cSMatt Gates struct reply_pool *rq = &h->reply_queue[q]; 519e1f7de0cSMatt Gates unsigned long flags; 520e1f7de0cSMatt Gates 521e1f7de0cSMatt Gates BUG_ON(q >= h->nreply_queues); 522e1f7de0cSMatt Gates 523e1f7de0cSMatt Gates register_value = rq->head[rq->current_entry]; 524e1f7de0cSMatt Gates if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { 525e1f7de0cSMatt Gates rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; 526e1f7de0cSMatt Gates if (++rq->current_entry == rq->size) 527e1f7de0cSMatt Gates rq->current_entry = 0; 528283b4a9bSStephen M. Cameron /* 529283b4a9bSStephen M. Cameron * @todo 530283b4a9bSStephen M. Cameron * 531283b4a9bSStephen M. Cameron * Don't really need to write the new index after each command, 532283b4a9bSStephen M. Cameron * but with current driver design this is easiest. 533283b4a9bSStephen M. Cameron */ 534283b4a9bSStephen M. Cameron wmb(); 535283b4a9bSStephen M. Cameron writel((q << 24) | rq->current_entry, h->vaddr + 536283b4a9bSStephen M. Cameron IOACCEL_MODE1_CONSUMER_INDEX); 537e1f7de0cSMatt Gates spin_lock_irqsave(&h->lock, flags); 538e1f7de0cSMatt Gates h->commands_outstanding--; 539e1f7de0cSMatt Gates spin_unlock_irqrestore(&h->lock, flags); 540e1f7de0cSMatt Gates } 541e1f7de0cSMatt Gates return (unsigned long) register_value; 542e1f7de0cSMatt Gates } 543e1f7de0cSMatt Gates 544edd16368SStephen M. Cameron static struct access_method SA5_access = { 545edd16368SStephen M. Cameron SA5_submit_command, 546edd16368SStephen M. Cameron SA5_intr_mask, 547edd16368SStephen M. Cameron SA5_fifo_full, 548edd16368SStephen M. Cameron SA5_intr_pending, 549edd16368SStephen M. Cameron SA5_completed, 550edd16368SStephen M. Cameron }; 551edd16368SStephen M. Cameron 552e1f7de0cSMatt Gates static struct access_method SA5_ioaccel_mode1_access = { 553e1f7de0cSMatt Gates SA5_submit_command, 554e1f7de0cSMatt Gates SA5_performant_intr_mask, 555e1f7de0cSMatt Gates SA5_fifo_full, 556e1f7de0cSMatt Gates SA5_ioaccel_mode1_intr_pending, 557e1f7de0cSMatt Gates SA5_ioaccel_mode1_completed, 558e1f7de0cSMatt Gates }; 559e1f7de0cSMatt Gates 560c349775eSScott Teel static struct access_method SA5_ioaccel_mode2_access = { 561c349775eSScott Teel SA5_submit_command_ioaccel2, 562c349775eSScott Teel SA5_performant_intr_mask, 563c349775eSScott Teel SA5_fifo_full, 564c349775eSScott Teel SA5_performant_intr_pending, 565c349775eSScott Teel SA5_performant_completed, 566c349775eSScott Teel }; 567c349775eSScott Teel 568303932fdSDon Brace static struct access_method SA5_performant_access = { 569303932fdSDon Brace SA5_submit_command, 570303932fdSDon Brace SA5_performant_intr_mask, 571303932fdSDon Brace SA5_fifo_full, 572303932fdSDon Brace SA5_performant_intr_pending, 573303932fdSDon Brace SA5_performant_completed, 574303932fdSDon Brace }; 575303932fdSDon Brace 576edd16368SStephen M. Cameron struct board_type { 57701a02ffcSStephen M. Cameron u32 board_id; 578edd16368SStephen M. Cameron char *product_name; 579edd16368SStephen M. Cameron struct access_method *access; 580edd16368SStephen M. Cameron }; 581edd16368SStephen M. Cameron 582edd16368SStephen M. Cameron #endif /* HPSA_H */ 583edd16368SStephen M. Cameron 584