xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/pci.h (revision c1d45424)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _PCI_H_
19 #define _PCI_H_
20 
21 #include <linux/interrupt.h>
22 
23 #include "hw.h"
24 #include "ce.h"
25 
26 /* FW dump area */
27 #define REG_DUMP_COUNT_QCA988X 60
28 
29 /*
30  * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
31  */
32 #define DIAG_TRANSFER_LIMIT 2048
33 
34 /*
35  * maximum number of bytes that can be
36  * handled atomically by DiagRead/DiagWrite
37  */
38 #define DIAG_TRANSFER_LIMIT 2048
39 
40 struct bmi_xfer {
41 	struct completion done;
42 	bool wait_for_resp;
43 	u32 resp_len;
44 };
45 
46 struct ath10k_pci_compl {
47 	struct list_head list;
48 	int send_or_recv;
49 	struct ce_state *ce_state;
50 	struct hif_ce_pipe_info *pipe_info;
51 	void *transfer_context;
52 	unsigned int nbytes;
53 	unsigned int transfer_id;
54 	unsigned int flags;
55 };
56 
57 /* compl_state.send_or_recv */
58 #define HIF_CE_COMPLETE_FREE 0
59 #define HIF_CE_COMPLETE_SEND 1
60 #define HIF_CE_COMPLETE_RECV 2
61 
62 /*
63  * PCI-specific Target state
64  *
65  * NOTE: Structure is shared between Host software and Target firmware!
66  *
67  * Much of this may be of interest to the Host so
68  * HOST_INTEREST->hi_interconnect_state points here
69  * (and all members are 32-bit quantities in order to
70  * facilitate Host access). In particular, Host software is
71  * required to initialize pipe_cfg_addr and svc_to_pipe_map.
72  */
73 struct pcie_state {
74 	/* Pipe configuration Target address */
75 	/* NB: ce_pipe_config[CE_COUNT] */
76 	u32 pipe_cfg_addr;
77 
78 	/* Service to pipe map Target address */
79 	/* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
80 	u32 svc_to_pipe_map;
81 
82 	/* number of MSI interrupts requested */
83 	u32 msi_requested;
84 
85 	/* number of MSI interrupts granted */
86 	u32 msi_granted;
87 
88 	/* Message Signalled Interrupt address */
89 	u32 msi_addr;
90 
91 	/* Base data */
92 	u32 msi_data;
93 
94 	/*
95 	 * Data for firmware interrupt;
96 	 * MSI data for other interrupts are
97 	 * in various SoC registers
98 	 */
99 	u32 msi_fw_intr_data;
100 
101 	/* PCIE_PWR_METHOD_* */
102 	u32 power_mgmt_method;
103 
104 	/* PCIE_CONFIG_FLAG_* */
105 	u32 config_flags;
106 };
107 
108 /* PCIE_CONFIG_FLAG definitions */
109 #define PCIE_CONFIG_FLAG_ENABLE_L1  0x0000001
110 
111 /* Host software's Copy Engine configuration. */
112 #define CE_ATTR_FLAGS 0
113 
114 /*
115  * Configuration information for a Copy Engine pipe.
116  * Passed from Host to Target during startup (one per CE).
117  *
118  * NOTE: Structure is shared between Host software and Target firmware!
119  */
120 struct ce_pipe_config {
121 	u32 pipenum;
122 	u32 pipedir;
123 	u32 nentries;
124 	u32 nbytes_max;
125 	u32 flags;
126 	u32 reserved;
127 };
128 
129 /*
130  * Directions for interconnect pipe configuration.
131  * These definitions may be used during configuration and are shared
132  * between Host and Target.
133  *
134  * Pipe Directions are relative to the Host, so PIPEDIR_IN means
135  * "coming IN over air through Target to Host" as with a WiFi Rx operation.
136  * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
137  * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
138  * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
139  * over the interconnect.
140  */
141 #define PIPEDIR_NONE    0
142 #define PIPEDIR_IN      1  /* Target-->Host, WiFi Rx direction */
143 #define PIPEDIR_OUT     2  /* Host->Target, WiFi Tx direction */
144 #define PIPEDIR_INOUT   3  /* bidirectional */
145 
146 /* Establish a mapping between a service/direction and a pipe. */
147 struct service_to_pipe {
148 	u32 service_id;
149 	u32 pipedir;
150 	u32 pipenum;
151 };
152 
153 enum ath10k_pci_features {
154 	ATH10K_PCI_FEATURE_MSI_X		= 0,
155 	ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND	= 1,
156 
157 	/* keep last */
158 	ATH10K_PCI_FEATURE_COUNT
159 };
160 
161 /* Per-pipe state. */
162 struct hif_ce_pipe_info {
163 	/* Handle of underlying Copy Engine */
164 	struct ce_state *ce_hdl;
165 
166 	/* Our pipe number; facilitiates use of pipe_info ptrs. */
167 	u8 pipe_num;
168 
169 	/* Convenience back pointer to hif_ce_state. */
170 	struct ath10k *hif_ce_state;
171 
172 	size_t buf_sz;
173 
174 	/* protects compl_free and num_send_allowed */
175 	spinlock_t pipe_lock;
176 
177 	/* List of free CE completion slots */
178 	struct list_head compl_free;
179 
180 	/* Limit the number of outstanding send requests. */
181 	int num_sends_allowed;
182 
183 	struct ath10k_pci *ar_pci;
184 	struct tasklet_struct intr;
185 };
186 
187 struct ath10k_pci {
188 	struct pci_dev *pdev;
189 	struct device *dev;
190 	struct ath10k *ar;
191 	void __iomem *mem;
192 	int cacheline_sz;
193 
194 	DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
195 
196 	/*
197 	 * Number of MSI interrupts granted, 0 --> using legacy PCI line
198 	 * interrupts.
199 	 */
200 	int num_msi_intrs;
201 
202 	struct tasklet_struct intr_tq;
203 	struct tasklet_struct msi_fw_err;
204 
205 	/* Number of Copy Engines supported */
206 	unsigned int ce_count;
207 
208 	int started;
209 
210 	atomic_t keep_awake_count;
211 	bool verified_awake;
212 
213 	/* List of CE completions to be processed */
214 	struct list_head compl_process;
215 
216 	/* protects compl_processing and compl_process */
217 	spinlock_t compl_lock;
218 
219 	bool compl_processing;
220 
221 	struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
222 
223 	struct ath10k_hif_cb msg_callbacks_current;
224 
225 	/* Target address used to signal a pending firmware event */
226 	u32 fw_indicator_address;
227 
228 	/* Copy Engine used for Diagnostic Accesses */
229 	struct ce_state *ce_diag;
230 
231 	/* FIXME: document what this really protects */
232 	spinlock_t ce_lock;
233 
234 	/* Map CE id to ce_state */
235 	struct ce_state *ce_id_to_state[CE_COUNT_MAX];
236 
237 	/* makes sure that dummy reads are atomic */
238 	spinlock_t hw_v1_workaround_lock;
239 };
240 
241 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
242 {
243 	return ar->hif.priv;
244 }
245 
246 static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
247 {
248 	return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
249 }
250 
251 static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
252 {
253 	iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
254 }
255 
256 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
257 #define PCIE_WAKE_TIMEOUT 5000	/* 5ms */
258 
259 #define BAR_NUM 0
260 
261 #define CDC_WAR_MAGIC_STR   0xceef0000
262 #define CDC_WAR_DATA_CE     4
263 
264 /*
265  * TODO: Should be a function call specific to each Target-type.
266  * This convoluted macro converts from Target CPU Virtual Address Space to CE
267  * Address Space. As part of this process, we conservatively fetch the current
268  * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
269  * for this device; but that's not guaranteed.
270  */
271 #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr)			\
272 	(((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS|			\
273 	  CORE_CTRL_ADDRESS)) & 0x7ff) << 21) |				\
274 	 0x100000 | ((addr) & 0xfffff))
275 
276 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
277 #define DIAG_ACCESS_CE_TIMEOUT_MS 10
278 
279 /*
280  * This API allows the Host to access Target registers directly
281  * and relatively efficiently over PCIe.
282  * This allows the Host to avoid extra overhead associated with
283  * sending a message to firmware and waiting for a response message
284  * from firmware, as is done on other interconnects.
285  *
286  * Yet there is some complexity with direct accesses because the
287  * Target's power state is not known a priori. The Host must issue
288  * special PCIe reads/writes in order to explicitly wake the Target
289  * and to verify that it is awake and will remain awake.
290  *
291  * Usage:
292  *
293  *   Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
294  *   These calls must be bracketed by ath10k_pci_wake and
295  *   ath10k_pci_sleep.  A single BEGIN/END pair is adequate for
296  *   multiple READ/WRITE operations.
297  *
298  *   Use ath10k_pci_wake to put the Target in a state in
299  *   which it is legal for the Host to directly access it. This
300  *   may involve waking the Target from a low power state, which
301  *   may take up to 2Ms!
302  *
303  *   Use ath10k_pci_sleep to tell the Target that as far as
304  *   this code path is concerned, it no longer needs to remain
305  *   directly accessible.  BEGIN/END is under a reference counter;
306  *   multiple code paths may issue BEGIN/END on a single targid.
307  */
308 static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
309 				      u32 value)
310 {
311 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
312 	void __iomem *addr = ar_pci->mem;
313 
314 	if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
315 		unsigned long irq_flags;
316 
317 		spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
318 
319 		ioread32(addr+offset+4); /* 3rd read prior to write */
320 		ioread32(addr+offset+4); /* 2nd read prior to write */
321 		ioread32(addr+offset+4); /* 1st read prior to write */
322 		iowrite32(value, addr+offset);
323 
324 		spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
325 				       irq_flags);
326 	} else {
327 		iowrite32(value, addr+offset);
328 	}
329 }
330 
331 static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
332 {
333 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
334 
335 	return ioread32(ar_pci->mem + offset);
336 }
337 
338 extern unsigned int ath10k_target_ps;
339 
340 void ath10k_do_pci_wake(struct ath10k *ar);
341 void ath10k_do_pci_sleep(struct ath10k *ar);
342 
343 static inline void ath10k_pci_wake(struct ath10k *ar)
344 {
345 	if (ath10k_target_ps)
346 		ath10k_do_pci_wake(ar);
347 }
348 
349 static inline void ath10k_pci_sleep(struct ath10k *ar)
350 {
351 	if (ath10k_target_ps)
352 		ath10k_do_pci_sleep(ar);
353 }
354 
355 #endif /* _PCI_H_ */
356