xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/pci.c (revision e2f1cf25)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
23 
24 #include "core.h"
25 #include "debug.h"
26 
27 #include "targaddrs.h"
28 #include "bmi.h"
29 
30 #include "hif.h"
31 #include "htc.h"
32 
33 #include "ce.h"
34 #include "pci.h"
35 
36 enum ath10k_pci_irq_mode {
37 	ATH10K_PCI_IRQ_AUTO = 0,
38 	ATH10K_PCI_IRQ_LEGACY = 1,
39 	ATH10K_PCI_IRQ_MSI = 2,
40 };
41 
42 enum ath10k_pci_reset_mode {
43 	ATH10K_PCI_RESET_AUTO = 0,
44 	ATH10K_PCI_RESET_WARM_ONLY = 1,
45 };
46 
47 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
48 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
49 
50 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
51 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
52 
53 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
54 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
55 
56 /* how long wait to wait for target to initialise, in ms */
57 #define ATH10K_PCI_TARGET_WAIT 3000
58 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
59 
60 #define QCA988X_2_0_DEVICE_ID	(0x003c)
61 #define QCA6174_2_1_DEVICE_ID	(0x003e)
62 
63 static const struct pci_device_id ath10k_pci_id_table[] = {
64 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
65 	{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
66 	{0}
67 };
68 
69 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
70 	/* QCA988X pre 2.0 chips are not supported because they need some nasty
71 	 * hacks. ath10k doesn't have them and these devices crash horribly
72 	 * because of that.
73 	 */
74 	{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
75 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
76 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
77 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
78 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
79 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
80 };
81 
82 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
83 static int ath10k_pci_cold_reset(struct ath10k *ar);
84 static int ath10k_pci_warm_reset(struct ath10k *ar);
85 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
86 static int ath10k_pci_init_irq(struct ath10k *ar);
87 static int ath10k_pci_deinit_irq(struct ath10k *ar);
88 static int ath10k_pci_request_irq(struct ath10k *ar);
89 static void ath10k_pci_free_irq(struct ath10k *ar);
90 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
91 			       struct ath10k_ce_pipe *rx_pipe,
92 			       struct bmi_xfer *xfer);
93 
94 static const struct ce_attr host_ce_config_wlan[] = {
95 	/* CE0: host->target HTC control and raw streams */
96 	{
97 		.flags = CE_ATTR_FLAGS,
98 		.src_nentries = 16,
99 		.src_sz_max = 256,
100 		.dest_nentries = 0,
101 	},
102 
103 	/* CE1: target->host HTT + HTC control */
104 	{
105 		.flags = CE_ATTR_FLAGS,
106 		.src_nentries = 0,
107 		.src_sz_max = 2048,
108 		.dest_nentries = 512,
109 	},
110 
111 	/* CE2: target->host WMI */
112 	{
113 		.flags = CE_ATTR_FLAGS,
114 		.src_nentries = 0,
115 		.src_sz_max = 2048,
116 		.dest_nentries = 128,
117 	},
118 
119 	/* CE3: host->target WMI */
120 	{
121 		.flags = CE_ATTR_FLAGS,
122 		.src_nentries = 32,
123 		.src_sz_max = 2048,
124 		.dest_nentries = 0,
125 	},
126 
127 	/* CE4: host->target HTT */
128 	{
129 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
130 		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
131 		.src_sz_max = 256,
132 		.dest_nentries = 0,
133 	},
134 
135 	/* CE5: unused */
136 	{
137 		.flags = CE_ATTR_FLAGS,
138 		.src_nentries = 0,
139 		.src_sz_max = 0,
140 		.dest_nentries = 0,
141 	},
142 
143 	/* CE6: target autonomous hif_memcpy */
144 	{
145 		.flags = CE_ATTR_FLAGS,
146 		.src_nentries = 0,
147 		.src_sz_max = 0,
148 		.dest_nentries = 0,
149 	},
150 
151 	/* CE7: ce_diag, the Diagnostic Window */
152 	{
153 		.flags = CE_ATTR_FLAGS,
154 		.src_nentries = 2,
155 		.src_sz_max = DIAG_TRANSFER_LIMIT,
156 		.dest_nentries = 2,
157 	},
158 };
159 
160 /* Target firmware's Copy Engine configuration. */
161 static const struct ce_pipe_config target_ce_config_wlan[] = {
162 	/* CE0: host->target HTC control and raw streams */
163 	{
164 		.pipenum = __cpu_to_le32(0),
165 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
166 		.nentries = __cpu_to_le32(32),
167 		.nbytes_max = __cpu_to_le32(256),
168 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
169 		.reserved = __cpu_to_le32(0),
170 	},
171 
172 	/* CE1: target->host HTT + HTC control */
173 	{
174 		.pipenum = __cpu_to_le32(1),
175 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
176 		.nentries = __cpu_to_le32(32),
177 		.nbytes_max = __cpu_to_le32(2048),
178 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
179 		.reserved = __cpu_to_le32(0),
180 	},
181 
182 	/* CE2: target->host WMI */
183 	{
184 		.pipenum = __cpu_to_le32(2),
185 		.pipedir = __cpu_to_le32(PIPEDIR_IN),
186 		.nentries = __cpu_to_le32(64),
187 		.nbytes_max = __cpu_to_le32(2048),
188 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
189 		.reserved = __cpu_to_le32(0),
190 	},
191 
192 	/* CE3: host->target WMI */
193 	{
194 		.pipenum = __cpu_to_le32(3),
195 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
196 		.nentries = __cpu_to_le32(32),
197 		.nbytes_max = __cpu_to_le32(2048),
198 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
199 		.reserved = __cpu_to_le32(0),
200 	},
201 
202 	/* CE4: host->target HTT */
203 	{
204 		.pipenum = __cpu_to_le32(4),
205 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
206 		.nentries = __cpu_to_le32(256),
207 		.nbytes_max = __cpu_to_le32(256),
208 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
209 		.reserved = __cpu_to_le32(0),
210 	},
211 
212 	/* NB: 50% of src nentries, since tx has 2 frags */
213 
214 	/* CE5: unused */
215 	{
216 		.pipenum = __cpu_to_le32(5),
217 		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
218 		.nentries = __cpu_to_le32(32),
219 		.nbytes_max = __cpu_to_le32(2048),
220 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
221 		.reserved = __cpu_to_le32(0),
222 	},
223 
224 	/* CE6: Reserved for target autonomous hif_memcpy */
225 	{
226 		.pipenum = __cpu_to_le32(6),
227 		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
228 		.nentries = __cpu_to_le32(32),
229 		.nbytes_max = __cpu_to_le32(4096),
230 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
231 		.reserved = __cpu_to_le32(0),
232 	},
233 
234 	/* CE7 used only by Host */
235 };
236 
237 /*
238  * Map from service/endpoint to Copy Engine.
239  * This table is derived from the CE_PCI TABLE, above.
240  * It is passed to the Target at startup for use by firmware.
241  */
242 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
243 	{
244 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
245 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
246 		__cpu_to_le32(3),
247 	},
248 	{
249 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
250 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
251 		__cpu_to_le32(2),
252 	},
253 	{
254 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
255 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
256 		__cpu_to_le32(3),
257 	},
258 	{
259 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
260 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
261 		__cpu_to_le32(2),
262 	},
263 	{
264 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
265 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
266 		__cpu_to_le32(3),
267 	},
268 	{
269 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
270 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
271 		__cpu_to_le32(2),
272 	},
273 	{
274 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
275 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
276 		__cpu_to_le32(3),
277 	},
278 	{
279 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
280 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
281 		__cpu_to_le32(2),
282 	},
283 	{
284 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
285 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
286 		__cpu_to_le32(3),
287 	},
288 	{
289 		__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
290 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
291 		__cpu_to_le32(2),
292 	},
293 	{
294 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
295 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
296 		__cpu_to_le32(0),
297 	},
298 	{
299 		__cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
300 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
301 		__cpu_to_le32(1),
302 	},
303 	{ /* not used */
304 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
305 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
306 		__cpu_to_le32(0),
307 	},
308 	{ /* not used */
309 		__cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
310 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
311 		__cpu_to_le32(1),
312 	},
313 	{
314 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
315 		__cpu_to_le32(PIPEDIR_OUT),	/* out = UL = host -> target */
316 		__cpu_to_le32(4),
317 	},
318 	{
319 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
320 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
321 		__cpu_to_le32(1),
322 	},
323 
324 	/* (Additions here) */
325 
326 	{ /* must be last */
327 		__cpu_to_le32(0),
328 		__cpu_to_le32(0),
329 		__cpu_to_le32(0),
330 	},
331 };
332 
333 static bool ath10k_pci_is_awake(struct ath10k *ar)
334 {
335 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
336 	u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
337 			   RTC_STATE_ADDRESS);
338 
339 	return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
340 }
341 
342 static void __ath10k_pci_wake(struct ath10k *ar)
343 {
344 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
345 
346 	lockdep_assert_held(&ar_pci->ps_lock);
347 
348 	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
349 		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
350 
351 	iowrite32(PCIE_SOC_WAKE_V_MASK,
352 		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
353 		  PCIE_SOC_WAKE_ADDRESS);
354 }
355 
356 static void __ath10k_pci_sleep(struct ath10k *ar)
357 {
358 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
359 
360 	lockdep_assert_held(&ar_pci->ps_lock);
361 
362 	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
363 		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
364 
365 	iowrite32(PCIE_SOC_WAKE_RESET,
366 		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
367 		  PCIE_SOC_WAKE_ADDRESS);
368 	ar_pci->ps_awake = false;
369 }
370 
371 static int ath10k_pci_wake_wait(struct ath10k *ar)
372 {
373 	int tot_delay = 0;
374 	int curr_delay = 5;
375 
376 	while (tot_delay < PCIE_WAKE_TIMEOUT) {
377 		if (ath10k_pci_is_awake(ar))
378 			return 0;
379 
380 		udelay(curr_delay);
381 		tot_delay += curr_delay;
382 
383 		if (curr_delay < 50)
384 			curr_delay += 5;
385 	}
386 
387 	return -ETIMEDOUT;
388 }
389 
390 static int ath10k_pci_wake(struct ath10k *ar)
391 {
392 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
393 	unsigned long flags;
394 	int ret = 0;
395 
396 	spin_lock_irqsave(&ar_pci->ps_lock, flags);
397 
398 	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
399 		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
400 
401 	/* This function can be called very frequently. To avoid excessive
402 	 * CPU stalls for MMIO reads use a cache var to hold the device state.
403 	 */
404 	if (!ar_pci->ps_awake) {
405 		__ath10k_pci_wake(ar);
406 
407 		ret = ath10k_pci_wake_wait(ar);
408 		if (ret == 0)
409 			ar_pci->ps_awake = true;
410 	}
411 
412 	if (ret == 0) {
413 		ar_pci->ps_wake_refcount++;
414 		WARN_ON(ar_pci->ps_wake_refcount == 0);
415 	}
416 
417 	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
418 
419 	return ret;
420 }
421 
422 static void ath10k_pci_sleep(struct ath10k *ar)
423 {
424 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
425 	unsigned long flags;
426 
427 	spin_lock_irqsave(&ar_pci->ps_lock, flags);
428 
429 	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
430 		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
431 
432 	if (WARN_ON(ar_pci->ps_wake_refcount == 0))
433 		goto skip;
434 
435 	ar_pci->ps_wake_refcount--;
436 
437 	mod_timer(&ar_pci->ps_timer, jiffies +
438 		  msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
439 
440 skip:
441 	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
442 }
443 
444 static void ath10k_pci_ps_timer(unsigned long ptr)
445 {
446 	struct ath10k *ar = (void *)ptr;
447 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
448 	unsigned long flags;
449 
450 	spin_lock_irqsave(&ar_pci->ps_lock, flags);
451 
452 	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
453 		   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
454 
455 	if (ar_pci->ps_wake_refcount > 0)
456 		goto skip;
457 
458 	__ath10k_pci_sleep(ar);
459 
460 skip:
461 	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
462 }
463 
464 static void ath10k_pci_sleep_sync(struct ath10k *ar)
465 {
466 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
467 	unsigned long flags;
468 
469 	del_timer_sync(&ar_pci->ps_timer);
470 
471 	spin_lock_irqsave(&ar_pci->ps_lock, flags);
472 	WARN_ON(ar_pci->ps_wake_refcount > 0);
473 	__ath10k_pci_sleep(ar);
474 	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
475 }
476 
477 void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
478 {
479 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
480 	int ret;
481 
482 	ret = ath10k_pci_wake(ar);
483 	if (ret) {
484 		ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
485 			    value, offset, ret);
486 		return;
487 	}
488 
489 	iowrite32(value, ar_pci->mem + offset);
490 	ath10k_pci_sleep(ar);
491 }
492 
493 u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
494 {
495 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
496 	u32 val;
497 	int ret;
498 
499 	ret = ath10k_pci_wake(ar);
500 	if (ret) {
501 		ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
502 			    offset, ret);
503 		return 0xffffffff;
504 	}
505 
506 	val = ioread32(ar_pci->mem + offset);
507 	ath10k_pci_sleep(ar);
508 
509 	return val;
510 }
511 
512 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
513 {
514 	return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
515 }
516 
517 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
518 {
519 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
520 }
521 
522 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
523 {
524 	return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
525 }
526 
527 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
528 {
529 	ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
530 }
531 
532 static bool ath10k_pci_irq_pending(struct ath10k *ar)
533 {
534 	u32 cause;
535 
536 	/* Check if the shared legacy irq is for us */
537 	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
538 				  PCIE_INTR_CAUSE_ADDRESS);
539 	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
540 		return true;
541 
542 	return false;
543 }
544 
545 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
546 {
547 	/* IMPORTANT: INTR_CLR register has to be set after
548 	 * INTR_ENABLE is set to 0, otherwise interrupt can not be
549 	 * really cleared. */
550 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
551 			   0);
552 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
553 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
554 
555 	/* IMPORTANT: this extra read transaction is required to
556 	 * flush the posted write buffer. */
557 	(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
558 				PCIE_INTR_ENABLE_ADDRESS);
559 }
560 
561 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
562 {
563 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
564 			   PCIE_INTR_ENABLE_ADDRESS,
565 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
566 
567 	/* IMPORTANT: this extra read transaction is required to
568 	 * flush the posted write buffer. */
569 	(void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
570 				PCIE_INTR_ENABLE_ADDRESS);
571 }
572 
573 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
574 {
575 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
576 
577 	if (ar_pci->num_msi_intrs > 1)
578 		return "msi-x";
579 
580 	if (ar_pci->num_msi_intrs == 1)
581 		return "msi";
582 
583 	return "legacy";
584 }
585 
586 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
587 {
588 	struct ath10k *ar = pipe->hif_ce_state;
589 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
590 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
591 	struct sk_buff *skb;
592 	dma_addr_t paddr;
593 	int ret;
594 
595 	lockdep_assert_held(&ar_pci->ce_lock);
596 
597 	skb = dev_alloc_skb(pipe->buf_sz);
598 	if (!skb)
599 		return -ENOMEM;
600 
601 	WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
602 
603 	paddr = dma_map_single(ar->dev, skb->data,
604 			       skb->len + skb_tailroom(skb),
605 			       DMA_FROM_DEVICE);
606 	if (unlikely(dma_mapping_error(ar->dev, paddr))) {
607 		ath10k_warn(ar, "failed to dma map pci rx buf\n");
608 		dev_kfree_skb_any(skb);
609 		return -EIO;
610 	}
611 
612 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
613 
614 	ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
615 	if (ret) {
616 		ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
617 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
618 				 DMA_FROM_DEVICE);
619 		dev_kfree_skb_any(skb);
620 		return ret;
621 	}
622 
623 	return 0;
624 }
625 
626 static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
627 {
628 	struct ath10k *ar = pipe->hif_ce_state;
629 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
630 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
631 	int ret, num;
632 
633 	lockdep_assert_held(&ar_pci->ce_lock);
634 
635 	if (pipe->buf_sz == 0)
636 		return;
637 
638 	if (!ce_pipe->dest_ring)
639 		return;
640 
641 	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
642 	while (num--) {
643 		ret = __ath10k_pci_rx_post_buf(pipe);
644 		if (ret) {
645 			ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
646 			mod_timer(&ar_pci->rx_post_retry, jiffies +
647 				  ATH10K_PCI_RX_POST_RETRY_MS);
648 			break;
649 		}
650 	}
651 }
652 
653 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
654 {
655 	struct ath10k *ar = pipe->hif_ce_state;
656 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
657 
658 	spin_lock_bh(&ar_pci->ce_lock);
659 	__ath10k_pci_rx_post_pipe(pipe);
660 	spin_unlock_bh(&ar_pci->ce_lock);
661 }
662 
663 static void ath10k_pci_rx_post(struct ath10k *ar)
664 {
665 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
666 	int i;
667 
668 	spin_lock_bh(&ar_pci->ce_lock);
669 	for (i = 0; i < CE_COUNT; i++)
670 		__ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
671 	spin_unlock_bh(&ar_pci->ce_lock);
672 }
673 
674 static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
675 {
676 	struct ath10k *ar = (void *)ptr;
677 
678 	ath10k_pci_rx_post(ar);
679 }
680 
681 /*
682  * Diagnostic read/write access is provided for startup/config/debug usage.
683  * Caller must guarantee proper alignment, when applicable, and single user
684  * at any moment.
685  */
686 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
687 				    int nbytes)
688 {
689 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
690 	int ret = 0;
691 	u32 buf;
692 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
693 	unsigned int id;
694 	unsigned int flags;
695 	struct ath10k_ce_pipe *ce_diag;
696 	/* Host buffer address in CE space */
697 	u32 ce_data;
698 	dma_addr_t ce_data_base = 0;
699 	void *data_buf = NULL;
700 	int i;
701 
702 	spin_lock_bh(&ar_pci->ce_lock);
703 
704 	ce_diag = ar_pci->ce_diag;
705 
706 	/*
707 	 * Allocate a temporary bounce buffer to hold caller's data
708 	 * to be DMA'ed from Target. This guarantees
709 	 *   1) 4-byte alignment
710 	 *   2) Buffer in DMA-able space
711 	 */
712 	orig_nbytes = nbytes;
713 	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
714 						       orig_nbytes,
715 						       &ce_data_base,
716 						       GFP_ATOMIC);
717 
718 	if (!data_buf) {
719 		ret = -ENOMEM;
720 		goto done;
721 	}
722 	memset(data_buf, 0, orig_nbytes);
723 
724 	remaining_bytes = orig_nbytes;
725 	ce_data = ce_data_base;
726 	while (remaining_bytes) {
727 		nbytes = min_t(unsigned int, remaining_bytes,
728 			       DIAG_TRANSFER_LIMIT);
729 
730 		ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
731 		if (ret != 0)
732 			goto done;
733 
734 		/* Request CE to send from Target(!) address to Host buffer */
735 		/*
736 		 * The address supplied by the caller is in the
737 		 * Target CPU virtual address space.
738 		 *
739 		 * In order to use this address with the diagnostic CE,
740 		 * convert it from Target CPU virtual address space
741 		 * to CE address space
742 		 */
743 		address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
744 						     address);
745 
746 		ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
747 					    0);
748 		if (ret)
749 			goto done;
750 
751 		i = 0;
752 		while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
753 							    &completed_nbytes,
754 							    &id) != 0) {
755 			mdelay(1);
756 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
757 				ret = -EBUSY;
758 				goto done;
759 			}
760 		}
761 
762 		if (nbytes != completed_nbytes) {
763 			ret = -EIO;
764 			goto done;
765 		}
766 
767 		if (buf != (u32)address) {
768 			ret = -EIO;
769 			goto done;
770 		}
771 
772 		i = 0;
773 		while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
774 							    &completed_nbytes,
775 							    &id, &flags) != 0) {
776 			mdelay(1);
777 
778 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
779 				ret = -EBUSY;
780 				goto done;
781 			}
782 		}
783 
784 		if (nbytes != completed_nbytes) {
785 			ret = -EIO;
786 			goto done;
787 		}
788 
789 		if (buf != ce_data) {
790 			ret = -EIO;
791 			goto done;
792 		}
793 
794 		remaining_bytes -= nbytes;
795 		address += nbytes;
796 		ce_data += nbytes;
797 	}
798 
799 done:
800 	if (ret == 0)
801 		memcpy(data, data_buf, orig_nbytes);
802 	else
803 		ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
804 			    address, ret);
805 
806 	if (data_buf)
807 		dma_free_coherent(ar->dev, orig_nbytes, data_buf,
808 				  ce_data_base);
809 
810 	spin_unlock_bh(&ar_pci->ce_lock);
811 
812 	return ret;
813 }
814 
815 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
816 {
817 	__le32 val = 0;
818 	int ret;
819 
820 	ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
821 	*value = __le32_to_cpu(val);
822 
823 	return ret;
824 }
825 
826 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
827 				     u32 src, u32 len)
828 {
829 	u32 host_addr, addr;
830 	int ret;
831 
832 	host_addr = host_interest_item_address(src);
833 
834 	ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
835 	if (ret != 0) {
836 		ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
837 			    src, ret);
838 		return ret;
839 	}
840 
841 	ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
842 	if (ret != 0) {
843 		ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
844 			    addr, len, ret);
845 		return ret;
846 	}
847 
848 	return 0;
849 }
850 
851 #define ath10k_pci_diag_read_hi(ar, dest, src, len)		\
852 	__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
853 
854 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
855 				     const void *data, int nbytes)
856 {
857 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
858 	int ret = 0;
859 	u32 buf;
860 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
861 	unsigned int id;
862 	unsigned int flags;
863 	struct ath10k_ce_pipe *ce_diag;
864 	void *data_buf = NULL;
865 	u32 ce_data;	/* Host buffer address in CE space */
866 	dma_addr_t ce_data_base = 0;
867 	int i;
868 
869 	spin_lock_bh(&ar_pci->ce_lock);
870 
871 	ce_diag = ar_pci->ce_diag;
872 
873 	/*
874 	 * Allocate a temporary bounce buffer to hold caller's data
875 	 * to be DMA'ed to Target. This guarantees
876 	 *   1) 4-byte alignment
877 	 *   2) Buffer in DMA-able space
878 	 */
879 	orig_nbytes = nbytes;
880 	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
881 						       orig_nbytes,
882 						       &ce_data_base,
883 						       GFP_ATOMIC);
884 	if (!data_buf) {
885 		ret = -ENOMEM;
886 		goto done;
887 	}
888 
889 	/* Copy caller's data to allocated DMA buf */
890 	memcpy(data_buf, data, orig_nbytes);
891 
892 	/*
893 	 * The address supplied by the caller is in the
894 	 * Target CPU virtual address space.
895 	 *
896 	 * In order to use this address with the diagnostic CE,
897 	 * convert it from
898 	 *    Target CPU virtual address space
899 	 * to
900 	 *    CE address space
901 	 */
902 	address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
903 
904 	remaining_bytes = orig_nbytes;
905 	ce_data = ce_data_base;
906 	while (remaining_bytes) {
907 		/* FIXME: check cast */
908 		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
909 
910 		/* Set up to receive directly into Target(!) address */
911 		ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
912 		if (ret != 0)
913 			goto done;
914 
915 		/*
916 		 * Request CE to send caller-supplied data that
917 		 * was copied to bounce buffer to Target(!) address.
918 		 */
919 		ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
920 					    nbytes, 0, 0);
921 		if (ret != 0)
922 			goto done;
923 
924 		i = 0;
925 		while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
926 							    &completed_nbytes,
927 							    &id) != 0) {
928 			mdelay(1);
929 
930 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
931 				ret = -EBUSY;
932 				goto done;
933 			}
934 		}
935 
936 		if (nbytes != completed_nbytes) {
937 			ret = -EIO;
938 			goto done;
939 		}
940 
941 		if (buf != ce_data) {
942 			ret = -EIO;
943 			goto done;
944 		}
945 
946 		i = 0;
947 		while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
948 							    &completed_nbytes,
949 							    &id, &flags) != 0) {
950 			mdelay(1);
951 
952 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
953 				ret = -EBUSY;
954 				goto done;
955 			}
956 		}
957 
958 		if (nbytes != completed_nbytes) {
959 			ret = -EIO;
960 			goto done;
961 		}
962 
963 		if (buf != address) {
964 			ret = -EIO;
965 			goto done;
966 		}
967 
968 		remaining_bytes -= nbytes;
969 		address += nbytes;
970 		ce_data += nbytes;
971 	}
972 
973 done:
974 	if (data_buf) {
975 		dma_free_coherent(ar->dev, orig_nbytes, data_buf,
976 				  ce_data_base);
977 	}
978 
979 	if (ret != 0)
980 		ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
981 			    address, ret);
982 
983 	spin_unlock_bh(&ar_pci->ce_lock);
984 
985 	return ret;
986 }
987 
988 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
989 {
990 	__le32 val = __cpu_to_le32(value);
991 
992 	return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
993 }
994 
995 /* Called by lower (CE) layer when a send to Target completes. */
996 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
997 {
998 	struct ath10k *ar = ce_state->ar;
999 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1000 	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1001 	struct sk_buff_head list;
1002 	struct sk_buff *skb;
1003 	u32 ce_data;
1004 	unsigned int nbytes;
1005 	unsigned int transfer_id;
1006 
1007 	__skb_queue_head_init(&list);
1008 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
1009 					     &nbytes, &transfer_id) == 0) {
1010 		/* no need to call tx completion for NULL pointers */
1011 		if (skb == NULL)
1012 			continue;
1013 
1014 		__skb_queue_tail(&list, skb);
1015 	}
1016 
1017 	while ((skb = __skb_dequeue(&list)))
1018 		cb->tx_completion(ar, skb);
1019 }
1020 
1021 /* Called by lower (CE) layer when data is received from the Target. */
1022 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
1023 {
1024 	struct ath10k *ar = ce_state->ar;
1025 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1026 	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1027 	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
1028 	struct sk_buff *skb;
1029 	struct sk_buff_head list;
1030 	void *transfer_context;
1031 	u32 ce_data;
1032 	unsigned int nbytes, max_nbytes;
1033 	unsigned int transfer_id;
1034 	unsigned int flags;
1035 
1036 	__skb_queue_head_init(&list);
1037 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1038 					     &ce_data, &nbytes, &transfer_id,
1039 					     &flags) == 0) {
1040 		skb = transfer_context;
1041 		max_nbytes = skb->len + skb_tailroom(skb);
1042 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1043 				 max_nbytes, DMA_FROM_DEVICE);
1044 
1045 		if (unlikely(max_nbytes < nbytes)) {
1046 			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1047 				    nbytes, max_nbytes);
1048 			dev_kfree_skb_any(skb);
1049 			continue;
1050 		}
1051 
1052 		skb_put(skb, nbytes);
1053 		__skb_queue_tail(&list, skb);
1054 	}
1055 
1056 	while ((skb = __skb_dequeue(&list))) {
1057 		ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1058 			   ce_state->id, skb->len);
1059 		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1060 				skb->data, skb->len);
1061 
1062 		cb->rx_completion(ar, skb);
1063 	}
1064 
1065 	ath10k_pci_rx_post_pipe(pipe_info);
1066 }
1067 
1068 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1069 				struct ath10k_hif_sg_item *items, int n_items)
1070 {
1071 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1072 	struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1073 	struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1074 	struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1075 	unsigned int nentries_mask;
1076 	unsigned int sw_index;
1077 	unsigned int write_index;
1078 	int err, i = 0;
1079 
1080 	spin_lock_bh(&ar_pci->ce_lock);
1081 
1082 	nentries_mask = src_ring->nentries_mask;
1083 	sw_index = src_ring->sw_index;
1084 	write_index = src_ring->write_index;
1085 
1086 	if (unlikely(CE_RING_DELTA(nentries_mask,
1087 				   write_index, sw_index - 1) < n_items)) {
1088 		err = -ENOBUFS;
1089 		goto err;
1090 	}
1091 
1092 	for (i = 0; i < n_items - 1; i++) {
1093 		ath10k_dbg(ar, ATH10K_DBG_PCI,
1094 			   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1095 			   i, items[i].paddr, items[i].len, n_items);
1096 		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1097 				items[i].vaddr, items[i].len);
1098 
1099 		err = ath10k_ce_send_nolock(ce_pipe,
1100 					    items[i].transfer_context,
1101 					    items[i].paddr,
1102 					    items[i].len,
1103 					    items[i].transfer_id,
1104 					    CE_SEND_FLAG_GATHER);
1105 		if (err)
1106 			goto err;
1107 	}
1108 
1109 	/* `i` is equal to `n_items -1` after for() */
1110 
1111 	ath10k_dbg(ar, ATH10K_DBG_PCI,
1112 		   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1113 		   i, items[i].paddr, items[i].len, n_items);
1114 	ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1115 			items[i].vaddr, items[i].len);
1116 
1117 	err = ath10k_ce_send_nolock(ce_pipe,
1118 				    items[i].transfer_context,
1119 				    items[i].paddr,
1120 				    items[i].len,
1121 				    items[i].transfer_id,
1122 				    0);
1123 	if (err)
1124 		goto err;
1125 
1126 	spin_unlock_bh(&ar_pci->ce_lock);
1127 	return 0;
1128 
1129 err:
1130 	for (; i > 0; i--)
1131 		__ath10k_ce_send_revert(ce_pipe);
1132 
1133 	spin_unlock_bh(&ar_pci->ce_lock);
1134 	return err;
1135 }
1136 
1137 static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1138 				    size_t buf_len)
1139 {
1140 	return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1141 }
1142 
1143 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1144 {
1145 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1146 
1147 	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1148 
1149 	return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1150 }
1151 
1152 static void ath10k_pci_dump_registers(struct ath10k *ar,
1153 				      struct ath10k_fw_crash_data *crash_data)
1154 {
1155 	__le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1156 	int i, ret;
1157 
1158 	lockdep_assert_held(&ar->data_lock);
1159 
1160 	ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1161 				      hi_failure_state,
1162 				      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1163 	if (ret) {
1164 		ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1165 		return;
1166 	}
1167 
1168 	BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1169 
1170 	ath10k_err(ar, "firmware register dump:\n");
1171 	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1172 		ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1173 			   i,
1174 			   __le32_to_cpu(reg_dump_values[i]),
1175 			   __le32_to_cpu(reg_dump_values[i + 1]),
1176 			   __le32_to_cpu(reg_dump_values[i + 2]),
1177 			   __le32_to_cpu(reg_dump_values[i + 3]));
1178 
1179 	if (!crash_data)
1180 		return;
1181 
1182 	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1183 		crash_data->registers[i] = reg_dump_values[i];
1184 }
1185 
1186 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1187 {
1188 	struct ath10k_fw_crash_data *crash_data;
1189 	char uuid[50];
1190 
1191 	spin_lock_bh(&ar->data_lock);
1192 
1193 	ar->stats.fw_crash_counter++;
1194 
1195 	crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1196 
1197 	if (crash_data)
1198 		scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1199 	else
1200 		scnprintf(uuid, sizeof(uuid), "n/a");
1201 
1202 	ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
1203 	ath10k_print_driver_info(ar);
1204 	ath10k_pci_dump_registers(ar, crash_data);
1205 
1206 	spin_unlock_bh(&ar->data_lock);
1207 
1208 	queue_work(ar->workqueue, &ar->restart_work);
1209 }
1210 
1211 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1212 					       int force)
1213 {
1214 	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1215 
1216 	if (!force) {
1217 		int resources;
1218 		/*
1219 		 * Decide whether to actually poll for completions, or just
1220 		 * wait for a later chance.
1221 		 * If there seem to be plenty of resources left, then just wait
1222 		 * since checking involves reading a CE register, which is a
1223 		 * relatively expensive operation.
1224 		 */
1225 		resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1226 
1227 		/*
1228 		 * If at least 50% of the total resources are still available,
1229 		 * don't bother checking again yet.
1230 		 */
1231 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1232 			return;
1233 	}
1234 	ath10k_ce_per_engine_service(ar, pipe);
1235 }
1236 
1237 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1238 					 struct ath10k_hif_cb *callbacks)
1239 {
1240 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1241 
1242 	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
1243 
1244 	memcpy(&ar_pci->msg_callbacks_current, callbacks,
1245 	       sizeof(ar_pci->msg_callbacks_current));
1246 }
1247 
1248 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
1249 {
1250 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1251 	int i;
1252 
1253 	tasklet_kill(&ar_pci->intr_tq);
1254 	tasklet_kill(&ar_pci->msi_fw_err);
1255 
1256 	for (i = 0; i < CE_COUNT; i++)
1257 		tasklet_kill(&ar_pci->pipe_info[i].intr);
1258 
1259 	del_timer_sync(&ar_pci->rx_post_retry);
1260 }
1261 
1262 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1263 					      u16 service_id, u8 *ul_pipe,
1264 					      u8 *dl_pipe, int *ul_is_polled,
1265 					      int *dl_is_polled)
1266 {
1267 	const struct service_to_pipe *entry;
1268 	bool ul_set = false, dl_set = false;
1269 	int i;
1270 
1271 	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1272 
1273 	/* polling for received messages not supported */
1274 	*dl_is_polled = 0;
1275 
1276 	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1277 		entry = &target_service_to_ce_map_wlan[i];
1278 
1279 		if (__le32_to_cpu(entry->service_id) != service_id)
1280 			continue;
1281 
1282 		switch (__le32_to_cpu(entry->pipedir)) {
1283 		case PIPEDIR_NONE:
1284 			break;
1285 		case PIPEDIR_IN:
1286 			WARN_ON(dl_set);
1287 			*dl_pipe = __le32_to_cpu(entry->pipenum);
1288 			dl_set = true;
1289 			break;
1290 		case PIPEDIR_OUT:
1291 			WARN_ON(ul_set);
1292 			*ul_pipe = __le32_to_cpu(entry->pipenum);
1293 			ul_set = true;
1294 			break;
1295 		case PIPEDIR_INOUT:
1296 			WARN_ON(dl_set);
1297 			WARN_ON(ul_set);
1298 			*dl_pipe = __le32_to_cpu(entry->pipenum);
1299 			*ul_pipe = __le32_to_cpu(entry->pipenum);
1300 			dl_set = true;
1301 			ul_set = true;
1302 			break;
1303 		}
1304 	}
1305 
1306 	if (WARN_ON(!ul_set || !dl_set))
1307 		return -ENOENT;
1308 
1309 	*ul_is_polled =
1310 		(host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1311 
1312 	return 0;
1313 }
1314 
1315 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1316 					    u8 *ul_pipe, u8 *dl_pipe)
1317 {
1318 	int ul_is_polled, dl_is_polled;
1319 
1320 	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1321 
1322 	(void)ath10k_pci_hif_map_service_to_pipe(ar,
1323 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1324 						 ul_pipe,
1325 						 dl_pipe,
1326 						 &ul_is_polled,
1327 						 &dl_is_polled);
1328 }
1329 
1330 static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1331 {
1332 	u32 val;
1333 
1334 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
1335 	val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1336 
1337 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
1338 }
1339 
1340 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1341 {
1342 	u32 val;
1343 
1344 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
1345 	val |= CORE_CTRL_PCIE_REG_31_MASK;
1346 
1347 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
1348 }
1349 
1350 static void ath10k_pci_irq_disable(struct ath10k *ar)
1351 {
1352 	ath10k_ce_disable_interrupts(ar);
1353 	ath10k_pci_disable_and_clear_legacy_irq(ar);
1354 	ath10k_pci_irq_msi_fw_mask(ar);
1355 }
1356 
1357 static void ath10k_pci_irq_sync(struct ath10k *ar)
1358 {
1359 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1360 	int i;
1361 
1362 	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1363 		synchronize_irq(ar_pci->pdev->irq + i);
1364 }
1365 
1366 static void ath10k_pci_irq_enable(struct ath10k *ar)
1367 {
1368 	ath10k_ce_enable_interrupts(ar);
1369 	ath10k_pci_enable_legacy_irq(ar);
1370 	ath10k_pci_irq_msi_fw_unmask(ar);
1371 }
1372 
1373 static int ath10k_pci_hif_start(struct ath10k *ar)
1374 {
1375 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1376 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1377 
1378 	ath10k_pci_irq_enable(ar);
1379 	ath10k_pci_rx_post(ar);
1380 
1381 	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1382 				   ar_pci->link_ctl);
1383 
1384 	return 0;
1385 }
1386 
1387 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1388 {
1389 	struct ath10k *ar;
1390 	struct ath10k_ce_pipe *ce_pipe;
1391 	struct ath10k_ce_ring *ce_ring;
1392 	struct sk_buff *skb;
1393 	int i;
1394 
1395 	ar = pci_pipe->hif_ce_state;
1396 	ce_pipe = pci_pipe->ce_hdl;
1397 	ce_ring = ce_pipe->dest_ring;
1398 
1399 	if (!ce_ring)
1400 		return;
1401 
1402 	if (!pci_pipe->buf_sz)
1403 		return;
1404 
1405 	for (i = 0; i < ce_ring->nentries; i++) {
1406 		skb = ce_ring->per_transfer_context[i];
1407 		if (!skb)
1408 			continue;
1409 
1410 		ce_ring->per_transfer_context[i] = NULL;
1411 
1412 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1413 				 skb->len + skb_tailroom(skb),
1414 				 DMA_FROM_DEVICE);
1415 		dev_kfree_skb_any(skb);
1416 	}
1417 }
1418 
1419 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1420 {
1421 	struct ath10k *ar;
1422 	struct ath10k_pci *ar_pci;
1423 	struct ath10k_ce_pipe *ce_pipe;
1424 	struct ath10k_ce_ring *ce_ring;
1425 	struct ce_desc *ce_desc;
1426 	struct sk_buff *skb;
1427 	int i;
1428 
1429 	ar = pci_pipe->hif_ce_state;
1430 	ar_pci = ath10k_pci_priv(ar);
1431 	ce_pipe = pci_pipe->ce_hdl;
1432 	ce_ring = ce_pipe->src_ring;
1433 
1434 	if (!ce_ring)
1435 		return;
1436 
1437 	if (!pci_pipe->buf_sz)
1438 		return;
1439 
1440 	ce_desc = ce_ring->shadow_base;
1441 	if (WARN_ON(!ce_desc))
1442 		return;
1443 
1444 	for (i = 0; i < ce_ring->nentries; i++) {
1445 		skb = ce_ring->per_transfer_context[i];
1446 		if (!skb)
1447 			continue;
1448 
1449 		ce_ring->per_transfer_context[i] = NULL;
1450 
1451 		ar_pci->msg_callbacks_current.tx_completion(ar, skb);
1452 	}
1453 }
1454 
1455 /*
1456  * Cleanup residual buffers for device shutdown:
1457  *    buffers that were enqueued for receive
1458  *    buffers that were to be sent
1459  * Note: Buffers that had completed but which were
1460  * not yet processed are on a completion queue. They
1461  * are handled when the completion thread shuts down.
1462  */
1463 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1464 {
1465 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1466 	int pipe_num;
1467 
1468 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1469 		struct ath10k_pci_pipe *pipe_info;
1470 
1471 		pipe_info = &ar_pci->pipe_info[pipe_num];
1472 		ath10k_pci_rx_pipe_cleanup(pipe_info);
1473 		ath10k_pci_tx_pipe_cleanup(pipe_info);
1474 	}
1475 }
1476 
1477 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1478 {
1479 	int i;
1480 
1481 	for (i = 0; i < CE_COUNT; i++)
1482 		ath10k_ce_deinit_pipe(ar, i);
1483 }
1484 
1485 static void ath10k_pci_flush(struct ath10k *ar)
1486 {
1487 	ath10k_pci_kill_tasklet(ar);
1488 	ath10k_pci_buffer_cleanup(ar);
1489 }
1490 
1491 static void ath10k_pci_hif_stop(struct ath10k *ar)
1492 {
1493 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1494 	unsigned long flags;
1495 
1496 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1497 
1498 	/* Most likely the device has HTT Rx ring configured. The only way to
1499 	 * prevent the device from accessing (and possible corrupting) host
1500 	 * memory is to reset the chip now.
1501 	 *
1502 	 * There's also no known way of masking MSI interrupts on the device.
1503 	 * For ranged MSI the CE-related interrupts can be masked. However
1504 	 * regardless how many MSI interrupts are assigned the first one
1505 	 * is always used for firmware indications (crashes) and cannot be
1506 	 * masked. To prevent the device from asserting the interrupt reset it
1507 	 * before proceeding with cleanup.
1508 	 */
1509 	ath10k_pci_warm_reset(ar);
1510 
1511 	ath10k_pci_irq_disable(ar);
1512 	ath10k_pci_irq_sync(ar);
1513 	ath10k_pci_flush(ar);
1514 
1515 	spin_lock_irqsave(&ar_pci->ps_lock, flags);
1516 	WARN_ON(ar_pci->ps_wake_refcount > 0);
1517 	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
1518 }
1519 
1520 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1521 					   void *req, u32 req_len,
1522 					   void *resp, u32 *resp_len)
1523 {
1524 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1525 	struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1526 	struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1527 	struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1528 	struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1529 	dma_addr_t req_paddr = 0;
1530 	dma_addr_t resp_paddr = 0;
1531 	struct bmi_xfer xfer = {};
1532 	void *treq, *tresp = NULL;
1533 	int ret = 0;
1534 
1535 	might_sleep();
1536 
1537 	if (resp && !resp_len)
1538 		return -EINVAL;
1539 
1540 	if (resp && resp_len && *resp_len == 0)
1541 		return -EINVAL;
1542 
1543 	treq = kmemdup(req, req_len, GFP_KERNEL);
1544 	if (!treq)
1545 		return -ENOMEM;
1546 
1547 	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1548 	ret = dma_mapping_error(ar->dev, req_paddr);
1549 	if (ret)
1550 		goto err_dma;
1551 
1552 	if (resp && resp_len) {
1553 		tresp = kzalloc(*resp_len, GFP_KERNEL);
1554 		if (!tresp) {
1555 			ret = -ENOMEM;
1556 			goto err_req;
1557 		}
1558 
1559 		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1560 					    DMA_FROM_DEVICE);
1561 		ret = dma_mapping_error(ar->dev, resp_paddr);
1562 		if (ret)
1563 			goto err_req;
1564 
1565 		xfer.wait_for_resp = true;
1566 		xfer.resp_len = 0;
1567 
1568 		ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1569 	}
1570 
1571 	ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1572 	if (ret)
1573 		goto err_resp;
1574 
1575 	ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1576 	if (ret) {
1577 		u32 unused_buffer;
1578 		unsigned int unused_nbytes;
1579 		unsigned int unused_id;
1580 
1581 		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1582 					   &unused_nbytes, &unused_id);
1583 	} else {
1584 		/* non-zero means we did not time out */
1585 		ret = 0;
1586 	}
1587 
1588 err_resp:
1589 	if (resp) {
1590 		u32 unused_buffer;
1591 
1592 		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1593 		dma_unmap_single(ar->dev, resp_paddr,
1594 				 *resp_len, DMA_FROM_DEVICE);
1595 	}
1596 err_req:
1597 	dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1598 
1599 	if (ret == 0 && resp_len) {
1600 		*resp_len = min(*resp_len, xfer.resp_len);
1601 		memcpy(resp, tresp, xfer.resp_len);
1602 	}
1603 err_dma:
1604 	kfree(treq);
1605 	kfree(tresp);
1606 
1607 	return ret;
1608 }
1609 
1610 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1611 {
1612 	struct bmi_xfer *xfer;
1613 	u32 ce_data;
1614 	unsigned int nbytes;
1615 	unsigned int transfer_id;
1616 
1617 	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1618 					  &nbytes, &transfer_id))
1619 		return;
1620 
1621 	xfer->tx_done = true;
1622 }
1623 
1624 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1625 {
1626 	struct ath10k *ar = ce_state->ar;
1627 	struct bmi_xfer *xfer;
1628 	u32 ce_data;
1629 	unsigned int nbytes;
1630 	unsigned int transfer_id;
1631 	unsigned int flags;
1632 
1633 	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1634 					  &nbytes, &transfer_id, &flags))
1635 		return;
1636 
1637 	if (WARN_ON_ONCE(!xfer))
1638 		return;
1639 
1640 	if (!xfer->wait_for_resp) {
1641 		ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1642 		return;
1643 	}
1644 
1645 	xfer->resp_len = nbytes;
1646 	xfer->rx_done = true;
1647 }
1648 
1649 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1650 			       struct ath10k_ce_pipe *rx_pipe,
1651 			       struct bmi_xfer *xfer)
1652 {
1653 	unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1654 
1655 	while (time_before_eq(jiffies, timeout)) {
1656 		ath10k_pci_bmi_send_done(tx_pipe);
1657 		ath10k_pci_bmi_recv_data(rx_pipe);
1658 
1659 		if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1660 			return 0;
1661 
1662 		schedule();
1663 	}
1664 
1665 	return -ETIMEDOUT;
1666 }
1667 
1668 /*
1669  * Send an interrupt to the device to wake up the Target CPU
1670  * so it has an opportunity to notice any changed state.
1671  */
1672 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1673 {
1674 	u32 addr, val;
1675 
1676 	addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1677 	val = ath10k_pci_read32(ar, addr);
1678 	val |= CORE_CTRL_CPU_INTR_MASK;
1679 	ath10k_pci_write32(ar, addr, val);
1680 
1681 	return 0;
1682 }
1683 
1684 static int ath10k_pci_get_num_banks(struct ath10k *ar)
1685 {
1686 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1687 
1688 	switch (ar_pci->pdev->device) {
1689 	case QCA988X_2_0_DEVICE_ID:
1690 		return 1;
1691 	case QCA6174_2_1_DEVICE_ID:
1692 		switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1693 		case QCA6174_HW_1_0_CHIP_ID_REV:
1694 		case QCA6174_HW_1_1_CHIP_ID_REV:
1695 		case QCA6174_HW_2_1_CHIP_ID_REV:
1696 		case QCA6174_HW_2_2_CHIP_ID_REV:
1697 			return 3;
1698 		case QCA6174_HW_1_3_CHIP_ID_REV:
1699 			return 2;
1700 		case QCA6174_HW_3_0_CHIP_ID_REV:
1701 		case QCA6174_HW_3_1_CHIP_ID_REV:
1702 		case QCA6174_HW_3_2_CHIP_ID_REV:
1703 			return 9;
1704 		}
1705 		break;
1706 	}
1707 
1708 	ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1709 	return 1;
1710 }
1711 
1712 static int ath10k_pci_init_config(struct ath10k *ar)
1713 {
1714 	u32 interconnect_targ_addr;
1715 	u32 pcie_state_targ_addr = 0;
1716 	u32 pipe_cfg_targ_addr = 0;
1717 	u32 svc_to_pipe_map = 0;
1718 	u32 pcie_config_flags = 0;
1719 	u32 ealloc_value;
1720 	u32 ealloc_targ_addr;
1721 	u32 flag2_value;
1722 	u32 flag2_targ_addr;
1723 	int ret = 0;
1724 
1725 	/* Download to Target the CE Config and the service-to-CE map */
1726 	interconnect_targ_addr =
1727 		host_interest_item_address(HI_ITEM(hi_interconnect_state));
1728 
1729 	/* Supply Target-side CE configuration */
1730 	ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1731 				     &pcie_state_targ_addr);
1732 	if (ret != 0) {
1733 		ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
1734 		return ret;
1735 	}
1736 
1737 	if (pcie_state_targ_addr == 0) {
1738 		ret = -EIO;
1739 		ath10k_err(ar, "Invalid pcie state addr\n");
1740 		return ret;
1741 	}
1742 
1743 	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1744 					  offsetof(struct pcie_state,
1745 						   pipe_cfg_addr)),
1746 				     &pipe_cfg_targ_addr);
1747 	if (ret != 0) {
1748 		ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
1749 		return ret;
1750 	}
1751 
1752 	if (pipe_cfg_targ_addr == 0) {
1753 		ret = -EIO;
1754 		ath10k_err(ar, "Invalid pipe cfg addr\n");
1755 		return ret;
1756 	}
1757 
1758 	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1759 					target_ce_config_wlan,
1760 					sizeof(target_ce_config_wlan));
1761 
1762 	if (ret != 0) {
1763 		ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
1764 		return ret;
1765 	}
1766 
1767 	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1768 					  offsetof(struct pcie_state,
1769 						   svc_to_pipe_map)),
1770 				     &svc_to_pipe_map);
1771 	if (ret != 0) {
1772 		ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
1773 		return ret;
1774 	}
1775 
1776 	if (svc_to_pipe_map == 0) {
1777 		ret = -EIO;
1778 		ath10k_err(ar, "Invalid svc_to_pipe map\n");
1779 		return ret;
1780 	}
1781 
1782 	ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1783 					target_service_to_ce_map_wlan,
1784 					sizeof(target_service_to_ce_map_wlan));
1785 	if (ret != 0) {
1786 		ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
1787 		return ret;
1788 	}
1789 
1790 	ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1791 					  offsetof(struct pcie_state,
1792 						   config_flags)),
1793 				     &pcie_config_flags);
1794 	if (ret != 0) {
1795 		ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
1796 		return ret;
1797 	}
1798 
1799 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1800 
1801 	ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
1802 					   offsetof(struct pcie_state,
1803 						    config_flags)),
1804 				      pcie_config_flags);
1805 	if (ret != 0) {
1806 		ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
1807 		return ret;
1808 	}
1809 
1810 	/* configure early allocation */
1811 	ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1812 
1813 	ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
1814 	if (ret != 0) {
1815 		ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
1816 		return ret;
1817 	}
1818 
1819 	/* first bank is switched to IRAM */
1820 	ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1821 			 HI_EARLY_ALLOC_MAGIC_MASK);
1822 	ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
1823 			  HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1824 			 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1825 
1826 	ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
1827 	if (ret != 0) {
1828 		ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
1829 		return ret;
1830 	}
1831 
1832 	/* Tell Target to proceed with initialization */
1833 	flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1834 
1835 	ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
1836 	if (ret != 0) {
1837 		ath10k_err(ar, "Failed to get option val: %d\n", ret);
1838 		return ret;
1839 	}
1840 
1841 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1842 
1843 	ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
1844 	if (ret != 0) {
1845 		ath10k_err(ar, "Failed to set option val: %d\n", ret);
1846 		return ret;
1847 	}
1848 
1849 	return 0;
1850 }
1851 
1852 static int ath10k_pci_alloc_pipes(struct ath10k *ar)
1853 {
1854 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1855 	struct ath10k_pci_pipe *pipe;
1856 	int i, ret;
1857 
1858 	for (i = 0; i < CE_COUNT; i++) {
1859 		pipe = &ar_pci->pipe_info[i];
1860 		pipe->ce_hdl = &ar_pci->ce_states[i];
1861 		pipe->pipe_num = i;
1862 		pipe->hif_ce_state = ar;
1863 
1864 		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
1865 					   ath10k_pci_ce_send_done,
1866 					   ath10k_pci_ce_recv_data);
1867 		if (ret) {
1868 			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1869 				   i, ret);
1870 			return ret;
1871 		}
1872 
1873 		/* Last CE is Diagnostic Window */
1874 		if (i == CE_COUNT - 1) {
1875 			ar_pci->ce_diag = pipe->ce_hdl;
1876 			continue;
1877 		}
1878 
1879 		pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
1880 	}
1881 
1882 	return 0;
1883 }
1884 
1885 static void ath10k_pci_free_pipes(struct ath10k *ar)
1886 {
1887 	int i;
1888 
1889 	for (i = 0; i < CE_COUNT; i++)
1890 		ath10k_ce_free_pipe(ar, i);
1891 }
1892 
1893 static int ath10k_pci_init_pipes(struct ath10k *ar)
1894 {
1895 	int i, ret;
1896 
1897 	for (i = 0; i < CE_COUNT; i++) {
1898 		ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
1899 		if (ret) {
1900 			ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
1901 				   i, ret);
1902 			return ret;
1903 		}
1904 	}
1905 
1906 	return 0;
1907 }
1908 
1909 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
1910 {
1911 	return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
1912 	       FW_IND_EVENT_PENDING;
1913 }
1914 
1915 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
1916 {
1917 	u32 val;
1918 
1919 	val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1920 	val &= ~FW_IND_EVENT_PENDING;
1921 	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
1922 }
1923 
1924 /* this function effectively clears target memory controller assert line */
1925 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1926 {
1927 	u32 val;
1928 
1929 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1930 	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1931 			       val | SOC_RESET_CONTROL_SI0_RST_MASK);
1932 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1933 
1934 	msleep(10);
1935 
1936 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1937 	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1938 			       val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1939 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1940 
1941 	msleep(10);
1942 }
1943 
1944 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
1945 {
1946 	u32 val;
1947 
1948 	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1949 
1950 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1951 				SOC_RESET_CONTROL_ADDRESS);
1952 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1953 			   val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1954 }
1955 
1956 static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
1957 {
1958 	u32 val;
1959 
1960 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1961 				SOC_RESET_CONTROL_ADDRESS);
1962 
1963 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1964 			   val | SOC_RESET_CONTROL_CE_RST_MASK);
1965 	msleep(10);
1966 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1967 			   val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1968 }
1969 
1970 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
1971 {
1972 	u32 val;
1973 
1974 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1975 				SOC_LF_TIMER_CONTROL0_ADDRESS);
1976 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1977 			   SOC_LF_TIMER_CONTROL0_ADDRESS,
1978 			   val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1979 }
1980 
1981 static int ath10k_pci_warm_reset(struct ath10k *ar)
1982 {
1983 	int ret;
1984 
1985 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
1986 
1987 	spin_lock_bh(&ar->data_lock);
1988 	ar->stats.fw_warm_reset_counter++;
1989 	spin_unlock_bh(&ar->data_lock);
1990 
1991 	ath10k_pci_irq_disable(ar);
1992 
1993 	/* Make sure the target CPU is not doing anything dangerous, e.g. if it
1994 	 * were to access copy engine while host performs copy engine reset
1995 	 * then it is possible for the device to confuse pci-e controller to
1996 	 * the point of bringing host system to a complete stop (i.e. hang).
1997 	 */
1998 	ath10k_pci_warm_reset_si0(ar);
1999 	ath10k_pci_warm_reset_cpu(ar);
2000 	ath10k_pci_init_pipes(ar);
2001 	ath10k_pci_wait_for_target_init(ar);
2002 
2003 	ath10k_pci_warm_reset_clear_lf(ar);
2004 	ath10k_pci_warm_reset_ce(ar);
2005 	ath10k_pci_warm_reset_cpu(ar);
2006 	ath10k_pci_init_pipes(ar);
2007 
2008 	ret = ath10k_pci_wait_for_target_init(ar);
2009 	if (ret) {
2010 		ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2011 		return ret;
2012 	}
2013 
2014 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2015 
2016 	return 0;
2017 }
2018 
2019 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2020 {
2021 	int i, ret;
2022 	u32 val;
2023 
2024 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2025 
2026 	/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2027 	 * It is thus preferred to use warm reset which is safer but may not be
2028 	 * able to recover the device from all possible fail scenarios.
2029 	 *
2030 	 * Warm reset doesn't always work on first try so attempt it a few
2031 	 * times before giving up.
2032 	 */
2033 	for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2034 		ret = ath10k_pci_warm_reset(ar);
2035 		if (ret) {
2036 			ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2037 				    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2038 				    ret);
2039 			continue;
2040 		}
2041 
2042 		/* FIXME: Sometimes copy engine doesn't recover after warm
2043 		 * reset. In most cases this needs cold reset. In some of these
2044 		 * cases the device is in such a state that a cold reset may
2045 		 * lock up the host.
2046 		 *
2047 		 * Reading any host interest register via copy engine is
2048 		 * sufficient to verify if device is capable of booting
2049 		 * firmware blob.
2050 		 */
2051 		ret = ath10k_pci_init_pipes(ar);
2052 		if (ret) {
2053 			ath10k_warn(ar, "failed to init copy engine: %d\n",
2054 				    ret);
2055 			continue;
2056 		}
2057 
2058 		ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2059 					     &val);
2060 		if (ret) {
2061 			ath10k_warn(ar, "failed to poke copy engine: %d\n",
2062 				    ret);
2063 			continue;
2064 		}
2065 
2066 		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2067 		return 0;
2068 	}
2069 
2070 	if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2071 		ath10k_warn(ar, "refusing cold reset as requested\n");
2072 		return -EPERM;
2073 	}
2074 
2075 	ret = ath10k_pci_cold_reset(ar);
2076 	if (ret) {
2077 		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2078 		return ret;
2079 	}
2080 
2081 	ret = ath10k_pci_wait_for_target_init(ar);
2082 	if (ret) {
2083 		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2084 			    ret);
2085 		return ret;
2086 	}
2087 
2088 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2089 
2090 	return 0;
2091 }
2092 
2093 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2094 {
2095 	int ret;
2096 
2097 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2098 
2099 	/* FIXME: QCA6174 requires cold + warm reset to work. */
2100 
2101 	ret = ath10k_pci_cold_reset(ar);
2102 	if (ret) {
2103 		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2104 		return ret;
2105 	}
2106 
2107 	ret = ath10k_pci_wait_for_target_init(ar);
2108 	if (ret) {
2109 		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2110 				ret);
2111 		return ret;
2112 	}
2113 
2114 	ret = ath10k_pci_warm_reset(ar);
2115 	if (ret) {
2116 		ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2117 		return ret;
2118 	}
2119 
2120 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2121 
2122 	return 0;
2123 }
2124 
2125 static int ath10k_pci_chip_reset(struct ath10k *ar)
2126 {
2127 	if (QCA_REV_988X(ar))
2128 		return ath10k_pci_qca988x_chip_reset(ar);
2129 	else if (QCA_REV_6174(ar))
2130 		return ath10k_pci_qca6174_chip_reset(ar);
2131 	else
2132 		return -ENOTSUPP;
2133 }
2134 
2135 static int ath10k_pci_hif_power_up(struct ath10k *ar)
2136 {
2137 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2138 	int ret;
2139 
2140 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2141 
2142 	pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2143 				  &ar_pci->link_ctl);
2144 	pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2145 				   ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2146 
2147 	/*
2148 	 * Bring the target up cleanly.
2149 	 *
2150 	 * The target may be in an undefined state with an AUX-powered Target
2151 	 * and a Host in WoW mode. If the Host crashes, loses power, or is
2152 	 * restarted (without unloading the driver) then the Target is left
2153 	 * (aux) powered and running. On a subsequent driver load, the Target
2154 	 * is in an unexpected state. We try to catch that here in order to
2155 	 * reset the Target and retry the probe.
2156 	 */
2157 	ret = ath10k_pci_chip_reset(ar);
2158 	if (ret) {
2159 		if (ath10k_pci_has_fw_crashed(ar)) {
2160 			ath10k_warn(ar, "firmware crashed during chip reset\n");
2161 			ath10k_pci_fw_crashed_clear(ar);
2162 			ath10k_pci_fw_crashed_dump(ar);
2163 		}
2164 
2165 		ath10k_err(ar, "failed to reset chip: %d\n", ret);
2166 		goto err_sleep;
2167 	}
2168 
2169 	ret = ath10k_pci_init_pipes(ar);
2170 	if (ret) {
2171 		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2172 		goto err_sleep;
2173 	}
2174 
2175 	ret = ath10k_pci_init_config(ar);
2176 	if (ret) {
2177 		ath10k_err(ar, "failed to setup init config: %d\n", ret);
2178 		goto err_ce;
2179 	}
2180 
2181 	ret = ath10k_pci_wake_target_cpu(ar);
2182 	if (ret) {
2183 		ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2184 		goto err_ce;
2185 	}
2186 
2187 	return 0;
2188 
2189 err_ce:
2190 	ath10k_pci_ce_deinit(ar);
2191 
2192 err_sleep:
2193 	return ret;
2194 }
2195 
2196 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2197 {
2198 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2199 
2200 	/* Currently hif_power_up performs effectively a reset and hif_stop
2201 	 * resets the chip as well so there's no point in resetting here.
2202 	 */
2203 }
2204 
2205 #ifdef CONFIG_PM
2206 
2207 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2208 {
2209 	/* The grace timer can still be counting down and ar->ps_awake be true.
2210 	 * It is known that the device may be asleep after resuming regardless
2211 	 * of the SoC powersave state before suspending. Hence make sure the
2212 	 * device is asleep before proceeding.
2213 	 */
2214 	ath10k_pci_sleep_sync(ar);
2215 
2216 	return 0;
2217 }
2218 
2219 static int ath10k_pci_hif_resume(struct ath10k *ar)
2220 {
2221 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2222 	struct pci_dev *pdev = ar_pci->pdev;
2223 	u32 val;
2224 
2225 	/* Suspend/Resume resets the PCI configuration space, so we have to
2226 	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2227 	 * from interfering with C3 CPU state. pci_restore_state won't help
2228 	 * here since it only restores the first 64 bytes pci config header.
2229 	 */
2230 	pci_read_config_dword(pdev, 0x40, &val);
2231 	if ((val & 0x0000ff00) != 0)
2232 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2233 
2234 	return 0;
2235 }
2236 #endif
2237 
2238 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2239 	.tx_sg			= ath10k_pci_hif_tx_sg,
2240 	.diag_read		= ath10k_pci_hif_diag_read,
2241 	.diag_write		= ath10k_pci_diag_write_mem,
2242 	.exchange_bmi_msg	= ath10k_pci_hif_exchange_bmi_msg,
2243 	.start			= ath10k_pci_hif_start,
2244 	.stop			= ath10k_pci_hif_stop,
2245 	.map_service_to_pipe	= ath10k_pci_hif_map_service_to_pipe,
2246 	.get_default_pipe	= ath10k_pci_hif_get_default_pipe,
2247 	.send_complete_check	= ath10k_pci_hif_send_complete_check,
2248 	.set_callbacks		= ath10k_pci_hif_set_callbacks,
2249 	.get_free_queue_number	= ath10k_pci_hif_get_free_queue_number,
2250 	.power_up		= ath10k_pci_hif_power_up,
2251 	.power_down		= ath10k_pci_hif_power_down,
2252 	.read32			= ath10k_pci_read32,
2253 	.write32		= ath10k_pci_write32,
2254 #ifdef CONFIG_PM
2255 	.suspend		= ath10k_pci_hif_suspend,
2256 	.resume			= ath10k_pci_hif_resume,
2257 #endif
2258 };
2259 
2260 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2261 {
2262 	struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2263 	struct ath10k_pci *ar_pci = pipe->ar_pci;
2264 
2265 	ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2266 }
2267 
2268 static void ath10k_msi_err_tasklet(unsigned long data)
2269 {
2270 	struct ath10k *ar = (struct ath10k *)data;
2271 
2272 	if (!ath10k_pci_has_fw_crashed(ar)) {
2273 		ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
2274 		return;
2275 	}
2276 
2277 	ath10k_pci_irq_disable(ar);
2278 	ath10k_pci_fw_crashed_clear(ar);
2279 	ath10k_pci_fw_crashed_dump(ar);
2280 }
2281 
2282 /*
2283  * Handler for a per-engine interrupt on a PARTICULAR CE.
2284  * This is used in cases where each CE has a private MSI interrupt.
2285  */
2286 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2287 {
2288 	struct ath10k *ar = arg;
2289 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2290 	int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2291 
2292 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2293 		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
2294 			    ce_id);
2295 		return IRQ_HANDLED;
2296 	}
2297 
2298 	/*
2299 	 * NOTE: We are able to derive ce_id from irq because we
2300 	 * use a one-to-one mapping for CE's 0..5.
2301 	 * CE's 6 & 7 do not use interrupts at all.
2302 	 *
2303 	 * This mapping must be kept in sync with the mapping
2304 	 * used by firmware.
2305 	 */
2306 	tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2307 	return IRQ_HANDLED;
2308 }
2309 
2310 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2311 {
2312 	struct ath10k *ar = arg;
2313 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2314 
2315 	tasklet_schedule(&ar_pci->msi_fw_err);
2316 	return IRQ_HANDLED;
2317 }
2318 
2319 /*
2320  * Top-level interrupt handler for all PCI interrupts from a Target.
2321  * When a block of MSI interrupts is allocated, this top-level handler
2322  * is not used; instead, we directly call the correct sub-handler.
2323  */
2324 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2325 {
2326 	struct ath10k *ar = arg;
2327 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2328 
2329 	if (ar_pci->num_msi_intrs == 0) {
2330 		if (!ath10k_pci_irq_pending(ar))
2331 			return IRQ_NONE;
2332 
2333 		ath10k_pci_disable_and_clear_legacy_irq(ar);
2334 	}
2335 
2336 	tasklet_schedule(&ar_pci->intr_tq);
2337 
2338 	return IRQ_HANDLED;
2339 }
2340 
2341 static void ath10k_pci_tasklet(unsigned long data)
2342 {
2343 	struct ath10k *ar = (struct ath10k *)data;
2344 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2345 
2346 	if (ath10k_pci_has_fw_crashed(ar)) {
2347 		ath10k_pci_irq_disable(ar);
2348 		ath10k_pci_fw_crashed_clear(ar);
2349 		ath10k_pci_fw_crashed_dump(ar);
2350 		return;
2351 	}
2352 
2353 	ath10k_ce_per_engine_service_any(ar);
2354 
2355 	/* Re-enable legacy irq that was disabled in the irq handler */
2356 	if (ar_pci->num_msi_intrs == 0)
2357 		ath10k_pci_enable_legacy_irq(ar);
2358 }
2359 
2360 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2361 {
2362 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2363 	int ret, i;
2364 
2365 	ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2366 			  ath10k_pci_msi_fw_handler,
2367 			  IRQF_SHARED, "ath10k_pci", ar);
2368 	if (ret) {
2369 		ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
2370 			    ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2371 		return ret;
2372 	}
2373 
2374 	for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2375 		ret = request_irq(ar_pci->pdev->irq + i,
2376 				  ath10k_pci_per_engine_handler,
2377 				  IRQF_SHARED, "ath10k_pci", ar);
2378 		if (ret) {
2379 			ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
2380 				    ar_pci->pdev->irq + i, ret);
2381 
2382 			for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2383 				free_irq(ar_pci->pdev->irq + i, ar);
2384 
2385 			free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2386 			return ret;
2387 		}
2388 	}
2389 
2390 	return 0;
2391 }
2392 
2393 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2394 {
2395 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2396 	int ret;
2397 
2398 	ret = request_irq(ar_pci->pdev->irq,
2399 			  ath10k_pci_interrupt_handler,
2400 			  IRQF_SHARED, "ath10k_pci", ar);
2401 	if (ret) {
2402 		ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2403 			    ar_pci->pdev->irq, ret);
2404 		return ret;
2405 	}
2406 
2407 	return 0;
2408 }
2409 
2410 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2411 {
2412 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2413 	int ret;
2414 
2415 	ret = request_irq(ar_pci->pdev->irq,
2416 			  ath10k_pci_interrupt_handler,
2417 			  IRQF_SHARED, "ath10k_pci", ar);
2418 	if (ret) {
2419 		ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2420 			    ar_pci->pdev->irq, ret);
2421 		return ret;
2422 	}
2423 
2424 	return 0;
2425 }
2426 
2427 static int ath10k_pci_request_irq(struct ath10k *ar)
2428 {
2429 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2430 
2431 	switch (ar_pci->num_msi_intrs) {
2432 	case 0:
2433 		return ath10k_pci_request_irq_legacy(ar);
2434 	case 1:
2435 		return ath10k_pci_request_irq_msi(ar);
2436 	case MSI_NUM_REQUEST:
2437 		return ath10k_pci_request_irq_msix(ar);
2438 	}
2439 
2440 	ath10k_warn(ar, "unknown irq configuration upon request\n");
2441 	return -EINVAL;
2442 }
2443 
2444 static void ath10k_pci_free_irq(struct ath10k *ar)
2445 {
2446 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2447 	int i;
2448 
2449 	/* There's at least one interrupt irregardless whether its legacy INTR
2450 	 * or MSI or MSI-X */
2451 	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2452 		free_irq(ar_pci->pdev->irq + i, ar);
2453 }
2454 
2455 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2456 {
2457 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2458 	int i;
2459 
2460 	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2461 	tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2462 		     (unsigned long)ar);
2463 
2464 	for (i = 0; i < CE_COUNT; i++) {
2465 		ar_pci->pipe_info[i].ar_pci = ar_pci;
2466 		tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2467 			     (unsigned long)&ar_pci->pipe_info[i]);
2468 	}
2469 }
2470 
2471 static int ath10k_pci_init_irq(struct ath10k *ar)
2472 {
2473 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2474 	int ret;
2475 
2476 	ath10k_pci_init_irq_tasklets(ar);
2477 
2478 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2479 		ath10k_info(ar, "limiting irq mode to: %d\n",
2480 			    ath10k_pci_irq_mode);
2481 
2482 	/* Try MSI-X */
2483 	if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
2484 		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2485 		ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2486 					   ar_pci->num_msi_intrs);
2487 		if (ret > 0)
2488 			return 0;
2489 
2490 		/* fall-through */
2491 	}
2492 
2493 	/* Try MSI */
2494 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2495 		ar_pci->num_msi_intrs = 1;
2496 		ret = pci_enable_msi(ar_pci->pdev);
2497 		if (ret == 0)
2498 			return 0;
2499 
2500 		/* fall-through */
2501 	}
2502 
2503 	/* Try legacy irq
2504 	 *
2505 	 * A potential race occurs here: The CORE_BASE write
2506 	 * depends on target correctly decoding AXI address but
2507 	 * host won't know when target writes BAR to CORE_CTRL.
2508 	 * This write might get lost if target has NOT written BAR.
2509 	 * For now, fix the race by repeating the write in below
2510 	 * synchronization checking. */
2511 	ar_pci->num_msi_intrs = 0;
2512 
2513 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2514 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2515 
2516 	return 0;
2517 }
2518 
2519 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2520 {
2521 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2522 			   0);
2523 }
2524 
2525 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2526 {
2527 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2528 
2529 	switch (ar_pci->num_msi_intrs) {
2530 	case 0:
2531 		ath10k_pci_deinit_irq_legacy(ar);
2532 		return 0;
2533 	case 1:
2534 		/* fall-through */
2535 	case MSI_NUM_REQUEST:
2536 		pci_disable_msi(ar_pci->pdev);
2537 		return 0;
2538 	default:
2539 		pci_disable_msi(ar_pci->pdev);
2540 	}
2541 
2542 	ath10k_warn(ar, "unknown irq configuration upon deinit\n");
2543 	return -EINVAL;
2544 }
2545 
2546 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2547 {
2548 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2549 	unsigned long timeout;
2550 	u32 val;
2551 
2552 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2553 
2554 	timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2555 
2556 	do {
2557 		val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2558 
2559 		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2560 			   val);
2561 
2562 		/* target should never return this */
2563 		if (val == 0xffffffff)
2564 			continue;
2565 
2566 		/* the device has crashed so don't bother trying anymore */
2567 		if (val & FW_IND_EVENT_PENDING)
2568 			break;
2569 
2570 		if (val & FW_IND_INITIALIZED)
2571 			break;
2572 
2573 		if (ar_pci->num_msi_intrs == 0)
2574 			/* Fix potential race by repeating CORE_BASE writes */
2575 			ath10k_pci_enable_legacy_irq(ar);
2576 
2577 		mdelay(10);
2578 	} while (time_before(jiffies, timeout));
2579 
2580 	ath10k_pci_disable_and_clear_legacy_irq(ar);
2581 	ath10k_pci_irq_msi_fw_mask(ar);
2582 
2583 	if (val == 0xffffffff) {
2584 		ath10k_err(ar, "failed to read device register, device is gone\n");
2585 		return -EIO;
2586 	}
2587 
2588 	if (val & FW_IND_EVENT_PENDING) {
2589 		ath10k_warn(ar, "device has crashed during init\n");
2590 		return -ECOMM;
2591 	}
2592 
2593 	if (!(val & FW_IND_INITIALIZED)) {
2594 		ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2595 			   val);
2596 		return -ETIMEDOUT;
2597 	}
2598 
2599 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2600 	return 0;
2601 }
2602 
2603 static int ath10k_pci_cold_reset(struct ath10k *ar)
2604 {
2605 	int i;
2606 	u32 val;
2607 
2608 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
2609 
2610 	spin_lock_bh(&ar->data_lock);
2611 
2612 	ar->stats.fw_cold_reset_counter++;
2613 
2614 	spin_unlock_bh(&ar->data_lock);
2615 
2616 	/* Put Target, including PCIe, into RESET. */
2617 	val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2618 	val |= 1;
2619 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2620 
2621 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2622 		if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2623 					  RTC_STATE_COLD_RESET_MASK)
2624 			break;
2625 		msleep(1);
2626 	}
2627 
2628 	/* Pull Target, including PCIe, out of RESET. */
2629 	val &= ~1;
2630 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2631 
2632 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2633 		if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2634 					    RTC_STATE_COLD_RESET_MASK))
2635 			break;
2636 		msleep(1);
2637 	}
2638 
2639 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
2640 
2641 	return 0;
2642 }
2643 
2644 static int ath10k_pci_claim(struct ath10k *ar)
2645 {
2646 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2647 	struct pci_dev *pdev = ar_pci->pdev;
2648 	int ret;
2649 
2650 	pci_set_drvdata(pdev, ar);
2651 
2652 	ret = pci_enable_device(pdev);
2653 	if (ret) {
2654 		ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2655 		return ret;
2656 	}
2657 
2658 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2659 	if (ret) {
2660 		ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2661 			   ret);
2662 		goto err_device;
2663 	}
2664 
2665 	/* Target expects 32 bit DMA. Enforce it. */
2666 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2667 	if (ret) {
2668 		ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
2669 		goto err_region;
2670 	}
2671 
2672 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2673 	if (ret) {
2674 		ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2675 			   ret);
2676 		goto err_region;
2677 	}
2678 
2679 	pci_set_master(pdev);
2680 
2681 	/* Arrange for access to Target SoC registers. */
2682 	ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2683 	if (!ar_pci->mem) {
2684 		ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
2685 		ret = -EIO;
2686 		goto err_master;
2687 	}
2688 
2689 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2690 	return 0;
2691 
2692 err_master:
2693 	pci_clear_master(pdev);
2694 
2695 err_region:
2696 	pci_release_region(pdev, BAR_NUM);
2697 
2698 err_device:
2699 	pci_disable_device(pdev);
2700 
2701 	return ret;
2702 }
2703 
2704 static void ath10k_pci_release(struct ath10k *ar)
2705 {
2706 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2707 	struct pci_dev *pdev = ar_pci->pdev;
2708 
2709 	pci_iounmap(pdev, ar_pci->mem);
2710 	pci_release_region(pdev, BAR_NUM);
2711 	pci_clear_master(pdev);
2712 	pci_disable_device(pdev);
2713 }
2714 
2715 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
2716 {
2717 	const struct ath10k_pci_supp_chip *supp_chip;
2718 	int i;
2719 	u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
2720 
2721 	for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
2722 		supp_chip = &ath10k_pci_supp_chips[i];
2723 
2724 		if (supp_chip->dev_id == dev_id &&
2725 		    supp_chip->rev_id == rev_id)
2726 			return true;
2727 	}
2728 
2729 	return false;
2730 }
2731 
2732 static int ath10k_pci_probe(struct pci_dev *pdev,
2733 			    const struct pci_device_id *pci_dev)
2734 {
2735 	int ret = 0;
2736 	struct ath10k *ar;
2737 	struct ath10k_pci *ar_pci;
2738 	enum ath10k_hw_rev hw_rev;
2739 	u32 chip_id;
2740 
2741 	switch (pci_dev->device) {
2742 	case QCA988X_2_0_DEVICE_ID:
2743 		hw_rev = ATH10K_HW_QCA988X;
2744 		break;
2745 	case QCA6174_2_1_DEVICE_ID:
2746 		hw_rev = ATH10K_HW_QCA6174;
2747 		break;
2748 	default:
2749 		WARN_ON(1);
2750 		return -ENOTSUPP;
2751 	}
2752 
2753 	ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
2754 				hw_rev, &ath10k_pci_hif_ops);
2755 	if (!ar) {
2756 		dev_err(&pdev->dev, "failed to allocate core\n");
2757 		return -ENOMEM;
2758 	}
2759 
2760 	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2761 
2762 	ar_pci = ath10k_pci_priv(ar);
2763 	ar_pci->pdev = pdev;
2764 	ar_pci->dev = &pdev->dev;
2765 	ar_pci->ar = ar;
2766 
2767 	if (pdev->subsystem_vendor || pdev->subsystem_device)
2768 		scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
2769 			  "%04x:%04x:%04x:%04x",
2770 			  pdev->vendor, pdev->device,
2771 			  pdev->subsystem_vendor, pdev->subsystem_device);
2772 
2773 	spin_lock_init(&ar_pci->ce_lock);
2774 	spin_lock_init(&ar_pci->ps_lock);
2775 
2776 	setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2777 		    (unsigned long)ar);
2778 	setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
2779 		    (unsigned long)ar);
2780 
2781 	ret = ath10k_pci_claim(ar);
2782 	if (ret) {
2783 		ath10k_err(ar, "failed to claim device: %d\n", ret);
2784 		goto err_core_destroy;
2785 	}
2786 
2787 	ret = ath10k_pci_alloc_pipes(ar);
2788 	if (ret) {
2789 		ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2790 			   ret);
2791 		goto err_sleep;
2792 	}
2793 
2794 	ath10k_pci_ce_deinit(ar);
2795 	ath10k_pci_irq_disable(ar);
2796 
2797 	ret = ath10k_pci_init_irq(ar);
2798 	if (ret) {
2799 		ath10k_err(ar, "failed to init irqs: %d\n", ret);
2800 		goto err_free_pipes;
2801 	}
2802 
2803 	ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
2804 		    ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2805 		    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2806 
2807 	ret = ath10k_pci_request_irq(ar);
2808 	if (ret) {
2809 		ath10k_warn(ar, "failed to request irqs: %d\n", ret);
2810 		goto err_deinit_irq;
2811 	}
2812 
2813 	ret = ath10k_pci_chip_reset(ar);
2814 	if (ret) {
2815 		ath10k_err(ar, "failed to reset chip: %d\n", ret);
2816 		goto err_free_irq;
2817 	}
2818 
2819 	chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2820 	if (chip_id == 0xffffffff) {
2821 		ath10k_err(ar, "failed to get chip id\n");
2822 		goto err_free_irq;
2823 	}
2824 
2825 	if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
2826 		ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
2827 			   pdev->device, chip_id);
2828 		goto err_free_irq;
2829 	}
2830 
2831 	ret = ath10k_core_register(ar, chip_id);
2832 	if (ret) {
2833 		ath10k_err(ar, "failed to register driver core: %d\n", ret);
2834 		goto err_free_irq;
2835 	}
2836 
2837 	return 0;
2838 
2839 err_free_irq:
2840 	ath10k_pci_free_irq(ar);
2841 	ath10k_pci_kill_tasklet(ar);
2842 
2843 err_deinit_irq:
2844 	ath10k_pci_deinit_irq(ar);
2845 
2846 err_free_pipes:
2847 	ath10k_pci_free_pipes(ar);
2848 
2849 err_sleep:
2850 	ath10k_pci_sleep_sync(ar);
2851 	ath10k_pci_release(ar);
2852 
2853 err_core_destroy:
2854 	ath10k_core_destroy(ar);
2855 
2856 	return ret;
2857 }
2858 
2859 static void ath10k_pci_remove(struct pci_dev *pdev)
2860 {
2861 	struct ath10k *ar = pci_get_drvdata(pdev);
2862 	struct ath10k_pci *ar_pci;
2863 
2864 	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
2865 
2866 	if (!ar)
2867 		return;
2868 
2869 	ar_pci = ath10k_pci_priv(ar);
2870 
2871 	if (!ar_pci)
2872 		return;
2873 
2874 	ath10k_core_unregister(ar);
2875 	ath10k_pci_free_irq(ar);
2876 	ath10k_pci_kill_tasklet(ar);
2877 	ath10k_pci_deinit_irq(ar);
2878 	ath10k_pci_ce_deinit(ar);
2879 	ath10k_pci_free_pipes(ar);
2880 	ath10k_pci_sleep_sync(ar);
2881 	ath10k_pci_release(ar);
2882 	ath10k_core_destroy(ar);
2883 }
2884 
2885 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2886 
2887 static struct pci_driver ath10k_pci_driver = {
2888 	.name = "ath10k_pci",
2889 	.id_table = ath10k_pci_id_table,
2890 	.probe = ath10k_pci_probe,
2891 	.remove = ath10k_pci_remove,
2892 };
2893 
2894 static int __init ath10k_pci_init(void)
2895 {
2896 	int ret;
2897 
2898 	ret = pci_register_driver(&ath10k_pci_driver);
2899 	if (ret)
2900 		printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
2901 		       ret);
2902 
2903 	return ret;
2904 }
2905 module_init(ath10k_pci_init);
2906 
2907 static void __exit ath10k_pci_exit(void)
2908 {
2909 	pci_unregister_driver(&ath10k_pci_driver);
2910 }
2911 
2912 module_exit(ath10k_pci_exit);
2913 
2914 MODULE_AUTHOR("Qualcomm Atheros");
2915 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2916 MODULE_LICENSE("Dual BSD/GPL");
2917 
2918 /* QCA988x 2.0 firmware files */
2919 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2920 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
2921 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
2922 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
2923 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
2924 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2925 
2926 /* QCA6174 2.1 firmware files */
2927 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
2928 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
2929 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
2930 
2931 /* QCA6174 3.1 firmware files */
2932 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
2933 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
2934 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
2935