1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _PCI_H_ 19 #define _PCI_H_ 20 21 #include <linux/interrupt.h> 22 23 #include "hw.h" 24 #include "ce.h" 25 26 /* FW dump area */ 27 #define REG_DUMP_COUNT_QCA988X 60 28 29 /* 30 * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite 31 */ 32 #define DIAG_TRANSFER_LIMIT 2048 33 34 /* 35 * maximum number of bytes that can be 36 * handled atomically by DiagRead/DiagWrite 37 */ 38 #define DIAG_TRANSFER_LIMIT 2048 39 40 struct bmi_xfer { 41 struct completion done; 42 bool wait_for_resp; 43 u32 resp_len; 44 }; 45 46 /* 47 * PCI-specific Target state 48 * 49 * NOTE: Structure is shared between Host software and Target firmware! 50 * 51 * Much of this may be of interest to the Host so 52 * HOST_INTEREST->hi_interconnect_state points here 53 * (and all members are 32-bit quantities in order to 54 * facilitate Host access). In particular, Host software is 55 * required to initialize pipe_cfg_addr and svc_to_pipe_map. 56 */ 57 struct pcie_state { 58 /* Pipe configuration Target address */ 59 /* NB: ce_pipe_config[CE_COUNT] */ 60 u32 pipe_cfg_addr; 61 62 /* Service to pipe map Target address */ 63 /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */ 64 u32 svc_to_pipe_map; 65 66 /* number of MSI interrupts requested */ 67 u32 msi_requested; 68 69 /* number of MSI interrupts granted */ 70 u32 msi_granted; 71 72 /* Message Signalled Interrupt address */ 73 u32 msi_addr; 74 75 /* Base data */ 76 u32 msi_data; 77 78 /* 79 * Data for firmware interrupt; 80 * MSI data for other interrupts are 81 * in various SoC registers 82 */ 83 u32 msi_fw_intr_data; 84 85 /* PCIE_PWR_METHOD_* */ 86 u32 power_mgmt_method; 87 88 /* PCIE_CONFIG_FLAG_* */ 89 u32 config_flags; 90 }; 91 92 /* PCIE_CONFIG_FLAG definitions */ 93 #define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001 94 95 /* Host software's Copy Engine configuration. */ 96 #define CE_ATTR_FLAGS 0 97 98 /* 99 * Configuration information for a Copy Engine pipe. 100 * Passed from Host to Target during startup (one per CE). 101 * 102 * NOTE: Structure is shared between Host software and Target firmware! 103 */ 104 struct ce_pipe_config { 105 u32 pipenum; 106 u32 pipedir; 107 u32 nentries; 108 u32 nbytes_max; 109 u32 flags; 110 u32 reserved; 111 }; 112 113 /* 114 * Directions for interconnect pipe configuration. 115 * These definitions may be used during configuration and are shared 116 * between Host and Target. 117 * 118 * Pipe Directions are relative to the Host, so PIPEDIR_IN means 119 * "coming IN over air through Target to Host" as with a WiFi Rx operation. 120 * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" 121 * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" 122 * Target since things that are "PIPEDIR_OUT" are coming IN to the Target 123 * over the interconnect. 124 */ 125 #define PIPEDIR_NONE 0 126 #define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ 127 #define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ 128 #define PIPEDIR_INOUT 3 /* bidirectional */ 129 130 /* Establish a mapping between a service/direction and a pipe. */ 131 struct service_to_pipe { 132 u32 service_id; 133 u32 pipedir; 134 u32 pipenum; 135 }; 136 137 enum ath10k_pci_features { 138 ATH10K_PCI_FEATURE_MSI_X = 0, 139 ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1, 140 141 /* keep last */ 142 ATH10K_PCI_FEATURE_COUNT 143 }; 144 145 /* Per-pipe state. */ 146 struct ath10k_pci_pipe { 147 /* Handle of underlying Copy Engine */ 148 struct ath10k_ce_pipe *ce_hdl; 149 150 /* Our pipe number; facilitiates use of pipe_info ptrs. */ 151 u8 pipe_num; 152 153 /* Convenience back pointer to hif_ce_state. */ 154 struct ath10k *hif_ce_state; 155 156 size_t buf_sz; 157 158 /* protects compl_free and num_send_allowed */ 159 spinlock_t pipe_lock; 160 161 struct ath10k_pci *ar_pci; 162 struct tasklet_struct intr; 163 }; 164 165 struct ath10k_pci { 166 struct pci_dev *pdev; 167 struct device *dev; 168 struct ath10k *ar; 169 void __iomem *mem; 170 171 DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); 172 173 /* 174 * Number of MSI interrupts granted, 0 --> using legacy PCI line 175 * interrupts. 176 */ 177 int num_msi_intrs; 178 179 struct tasklet_struct intr_tq; 180 struct tasklet_struct msi_fw_err; 181 struct tasklet_struct early_irq_tasklet; 182 183 int started; 184 185 atomic_t keep_awake_count; 186 bool verified_awake; 187 188 struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; 189 190 struct ath10k_hif_cb msg_callbacks_current; 191 192 /* Copy Engine used for Diagnostic Accesses */ 193 struct ath10k_ce_pipe *ce_diag; 194 195 /* FIXME: document what this really protects */ 196 spinlock_t ce_lock; 197 198 /* Map CE id to ce_state */ 199 struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; 200 }; 201 202 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) 203 { 204 return ar->hif.priv; 205 } 206 207 static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) 208 { 209 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 210 211 return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); 212 } 213 214 static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) 215 { 216 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 217 218 iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); 219 } 220 221 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ 222 #define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ 223 224 #define BAR_NUM 0 225 226 #define CDC_WAR_MAGIC_STR 0xceef0000 227 #define CDC_WAR_DATA_CE 4 228 229 /* 230 * TODO: Should be a function call specific to each Target-type. 231 * This convoluted macro converts from Target CPU Virtual Address Space to CE 232 * Address Space. As part of this process, we conservatively fetch the current 233 * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space 234 * for this device; but that's not guaranteed. 235 */ 236 #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \ 237 (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \ 238 CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \ 239 0x100000 | ((addr) & 0xfffff)) 240 241 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ 242 #define DIAG_ACCESS_CE_TIMEOUT_MS 10 243 244 /* 245 * This API allows the Host to access Target registers directly 246 * and relatively efficiently over PCIe. 247 * This allows the Host to avoid extra overhead associated with 248 * sending a message to firmware and waiting for a response message 249 * from firmware, as is done on other interconnects. 250 * 251 * Yet there is some complexity with direct accesses because the 252 * Target's power state is not known a priori. The Host must issue 253 * special PCIe reads/writes in order to explicitly wake the Target 254 * and to verify that it is awake and will remain awake. 255 * 256 * Usage: 257 * 258 * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space. 259 * These calls must be bracketed by ath10k_pci_wake and 260 * ath10k_pci_sleep. A single BEGIN/END pair is adequate for 261 * multiple READ/WRITE operations. 262 * 263 * Use ath10k_pci_wake to put the Target in a state in 264 * which it is legal for the Host to directly access it. This 265 * may involve waking the Target from a low power state, which 266 * may take up to 2Ms! 267 * 268 * Use ath10k_pci_sleep to tell the Target that as far as 269 * this code path is concerned, it no longer needs to remain 270 * directly accessible. BEGIN/END is under a reference counter; 271 * multiple code paths may issue BEGIN/END on a single targid. 272 */ 273 static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, 274 u32 value) 275 { 276 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 277 278 iowrite32(value, ar_pci->mem + offset); 279 } 280 281 static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 282 { 283 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 284 285 return ioread32(ar_pci->mem + offset); 286 } 287 288 static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) 289 { 290 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); 291 } 292 293 static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) 294 { 295 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); 296 } 297 298 int ath10k_do_pci_wake(struct ath10k *ar); 299 void ath10k_do_pci_sleep(struct ath10k *ar); 300 301 static inline int ath10k_pci_wake(struct ath10k *ar) 302 { 303 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 304 305 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 306 return ath10k_do_pci_wake(ar); 307 308 return 0; 309 } 310 311 static inline void ath10k_pci_sleep(struct ath10k *ar) 312 { 313 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 314 315 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 316 ath10k_do_pci_sleep(ar); 317 } 318 319 #endif /* _PCI_H_ */ 320