1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _PCI_H_ 19 #define _PCI_H_ 20 21 #include <linux/interrupt.h> 22 23 #include "hw.h" 24 #include "ce.h" 25 26 /* FW dump area */ 27 #define REG_DUMP_COUNT_QCA988X 60 28 29 /* 30 * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite 31 */ 32 #define DIAG_TRANSFER_LIMIT 2048 33 34 /* 35 * maximum number of bytes that can be 36 * handled atomically by DiagRead/DiagWrite 37 */ 38 #define DIAG_TRANSFER_LIMIT 2048 39 40 struct bmi_xfer { 41 struct completion done; 42 bool wait_for_resp; 43 u32 resp_len; 44 }; 45 46 enum ath10k_pci_compl_state { 47 ATH10K_PCI_COMPL_FREE = 0, 48 ATH10K_PCI_COMPL_SEND, 49 ATH10K_PCI_COMPL_RECV, 50 }; 51 52 struct ath10k_pci_compl { 53 struct list_head list; 54 enum ath10k_pci_compl_state state; 55 struct ath10k_ce_pipe *ce_state; 56 struct ath10k_pci_pipe *pipe_info; 57 struct sk_buff *skb; 58 unsigned int nbytes; 59 unsigned int transfer_id; 60 unsigned int flags; 61 }; 62 63 /* 64 * PCI-specific Target state 65 * 66 * NOTE: Structure is shared between Host software and Target firmware! 67 * 68 * Much of this may be of interest to the Host so 69 * HOST_INTEREST->hi_interconnect_state points here 70 * (and all members are 32-bit quantities in order to 71 * facilitate Host access). In particular, Host software is 72 * required to initialize pipe_cfg_addr and svc_to_pipe_map. 73 */ 74 struct pcie_state { 75 /* Pipe configuration Target address */ 76 /* NB: ce_pipe_config[CE_COUNT] */ 77 u32 pipe_cfg_addr; 78 79 /* Service to pipe map Target address */ 80 /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */ 81 u32 svc_to_pipe_map; 82 83 /* number of MSI interrupts requested */ 84 u32 msi_requested; 85 86 /* number of MSI interrupts granted */ 87 u32 msi_granted; 88 89 /* Message Signalled Interrupt address */ 90 u32 msi_addr; 91 92 /* Base data */ 93 u32 msi_data; 94 95 /* 96 * Data for firmware interrupt; 97 * MSI data for other interrupts are 98 * in various SoC registers 99 */ 100 u32 msi_fw_intr_data; 101 102 /* PCIE_PWR_METHOD_* */ 103 u32 power_mgmt_method; 104 105 /* PCIE_CONFIG_FLAG_* */ 106 u32 config_flags; 107 }; 108 109 /* PCIE_CONFIG_FLAG definitions */ 110 #define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001 111 112 /* Host software's Copy Engine configuration. */ 113 #define CE_ATTR_FLAGS 0 114 115 /* 116 * Configuration information for a Copy Engine pipe. 117 * Passed from Host to Target during startup (one per CE). 118 * 119 * NOTE: Structure is shared between Host software and Target firmware! 120 */ 121 struct ce_pipe_config { 122 u32 pipenum; 123 u32 pipedir; 124 u32 nentries; 125 u32 nbytes_max; 126 u32 flags; 127 u32 reserved; 128 }; 129 130 /* 131 * Directions for interconnect pipe configuration. 132 * These definitions may be used during configuration and are shared 133 * between Host and Target. 134 * 135 * Pipe Directions are relative to the Host, so PIPEDIR_IN means 136 * "coming IN over air through Target to Host" as with a WiFi Rx operation. 137 * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" 138 * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" 139 * Target since things that are "PIPEDIR_OUT" are coming IN to the Target 140 * over the interconnect. 141 */ 142 #define PIPEDIR_NONE 0 143 #define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ 144 #define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ 145 #define PIPEDIR_INOUT 3 /* bidirectional */ 146 147 /* Establish a mapping between a service/direction and a pipe. */ 148 struct service_to_pipe { 149 u32 service_id; 150 u32 pipedir; 151 u32 pipenum; 152 }; 153 154 enum ath10k_pci_features { 155 ATH10K_PCI_FEATURE_MSI_X = 0, 156 ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1, 157 158 /* keep last */ 159 ATH10K_PCI_FEATURE_COUNT 160 }; 161 162 /* Per-pipe state. */ 163 struct ath10k_pci_pipe { 164 /* Handle of underlying Copy Engine */ 165 struct ath10k_ce_pipe *ce_hdl; 166 167 /* Our pipe number; facilitiates use of pipe_info ptrs. */ 168 u8 pipe_num; 169 170 /* Convenience back pointer to hif_ce_state. */ 171 struct ath10k *hif_ce_state; 172 173 size_t buf_sz; 174 175 /* protects compl_free and num_send_allowed */ 176 spinlock_t pipe_lock; 177 178 /* List of free CE completion slots */ 179 struct list_head compl_free; 180 181 struct ath10k_pci *ar_pci; 182 struct tasklet_struct intr; 183 }; 184 185 struct ath10k_pci { 186 struct pci_dev *pdev; 187 struct device *dev; 188 struct ath10k *ar; 189 void __iomem *mem; 190 191 DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); 192 193 /* 194 * Number of MSI interrupts granted, 0 --> using legacy PCI line 195 * interrupts. 196 */ 197 int num_msi_intrs; 198 199 struct tasklet_struct intr_tq; 200 struct tasklet_struct msi_fw_err; 201 202 /* Number of Copy Engines supported */ 203 unsigned int ce_count; 204 205 int started; 206 207 atomic_t keep_awake_count; 208 bool verified_awake; 209 210 /* List of CE completions to be processed */ 211 struct list_head compl_process; 212 213 /* protects compl_processing and compl_process */ 214 spinlock_t compl_lock; 215 216 bool compl_processing; 217 218 struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; 219 220 struct ath10k_hif_cb msg_callbacks_current; 221 222 /* Target address used to signal a pending firmware event */ 223 u32 fw_indicator_address; 224 225 /* Copy Engine used for Diagnostic Accesses */ 226 struct ath10k_ce_pipe *ce_diag; 227 228 /* FIXME: document what this really protects */ 229 spinlock_t ce_lock; 230 231 /* Map CE id to ce_state */ 232 struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; 233 }; 234 235 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) 236 { 237 return ar->hif.priv; 238 } 239 240 static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) 241 { 242 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 243 244 return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); 245 } 246 247 static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) 248 { 249 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 250 251 iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); 252 } 253 254 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ 255 #define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ 256 257 #define BAR_NUM 0 258 259 #define CDC_WAR_MAGIC_STR 0xceef0000 260 #define CDC_WAR_DATA_CE 4 261 262 /* 263 * TODO: Should be a function call specific to each Target-type. 264 * This convoluted macro converts from Target CPU Virtual Address Space to CE 265 * Address Space. As part of this process, we conservatively fetch the current 266 * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space 267 * for this device; but that's not guaranteed. 268 */ 269 #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \ 270 (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \ 271 CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \ 272 0x100000 | ((addr) & 0xfffff)) 273 274 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ 275 #define DIAG_ACCESS_CE_TIMEOUT_MS 10 276 277 /* 278 * This API allows the Host to access Target registers directly 279 * and relatively efficiently over PCIe. 280 * This allows the Host to avoid extra overhead associated with 281 * sending a message to firmware and waiting for a response message 282 * from firmware, as is done on other interconnects. 283 * 284 * Yet there is some complexity with direct accesses because the 285 * Target's power state is not known a priori. The Host must issue 286 * special PCIe reads/writes in order to explicitly wake the Target 287 * and to verify that it is awake and will remain awake. 288 * 289 * Usage: 290 * 291 * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space. 292 * These calls must be bracketed by ath10k_pci_wake and 293 * ath10k_pci_sleep. A single BEGIN/END pair is adequate for 294 * multiple READ/WRITE operations. 295 * 296 * Use ath10k_pci_wake to put the Target in a state in 297 * which it is legal for the Host to directly access it. This 298 * may involve waking the Target from a low power state, which 299 * may take up to 2Ms! 300 * 301 * Use ath10k_pci_sleep to tell the Target that as far as 302 * this code path is concerned, it no longer needs to remain 303 * directly accessible. BEGIN/END is under a reference counter; 304 * multiple code paths may issue BEGIN/END on a single targid. 305 */ 306 static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, 307 u32 value) 308 { 309 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 310 311 iowrite32(value, ar_pci->mem + offset); 312 } 313 314 static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 315 { 316 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 317 318 return ioread32(ar_pci->mem + offset); 319 } 320 321 int ath10k_do_pci_wake(struct ath10k *ar); 322 void ath10k_do_pci_sleep(struct ath10k *ar); 323 324 static inline int ath10k_pci_wake(struct ath10k *ar) 325 { 326 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 327 328 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 329 return ath10k_do_pci_wake(ar); 330 331 return 0; 332 } 333 334 static inline void ath10k_pci_sleep(struct ath10k *ar) 335 { 336 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 337 338 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) 339 ath10k_do_pci_sleep(ar); 340 } 341 342 #endif /* _PCI_H_ */ 343