xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/pci.c (revision af958a38)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
23 
24 #include "core.h"
25 #include "debug.h"
26 
27 #include "targaddrs.h"
28 #include "bmi.h"
29 
30 #include "hif.h"
31 #include "htc.h"
32 
33 #include "ce.h"
34 #include "pci.h"
35 
36 enum ath10k_pci_irq_mode {
37 	ATH10K_PCI_IRQ_AUTO = 0,
38 	ATH10K_PCI_IRQ_LEGACY = 1,
39 	ATH10K_PCI_IRQ_MSI = 2,
40 };
41 
42 enum ath10k_pci_reset_mode {
43 	ATH10K_PCI_RESET_AUTO = 0,
44 	ATH10K_PCI_RESET_WARM_ONLY = 1,
45 };
46 
47 static unsigned int ath10k_pci_target_ps;
48 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
49 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
50 
51 module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
52 MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
53 
54 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
55 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
56 
57 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
58 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
59 
60 /* how long wait to wait for target to initialise, in ms */
61 #define ATH10K_PCI_TARGET_WAIT 3000
62 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
63 
64 #define QCA988X_2_0_DEVICE_ID	(0x003c)
65 
66 static const struct pci_device_id ath10k_pci_id_table[] = {
67 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
68 	{0}
69 };
70 
71 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
72 				       u32 *data);
73 
74 static int ath10k_pci_post_rx(struct ath10k *ar);
75 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
76 					     int num);
77 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
78 static int ath10k_pci_cold_reset(struct ath10k *ar);
79 static int ath10k_pci_warm_reset(struct ath10k *ar);
80 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
81 static int ath10k_pci_init_irq(struct ath10k *ar);
82 static int ath10k_pci_deinit_irq(struct ath10k *ar);
83 static int ath10k_pci_request_irq(struct ath10k *ar);
84 static void ath10k_pci_free_irq(struct ath10k *ar);
85 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
86 			       struct ath10k_ce_pipe *rx_pipe,
87 			       struct bmi_xfer *xfer);
88 
89 static const struct ce_attr host_ce_config_wlan[] = {
90 	/* CE0: host->target HTC control and raw streams */
91 	{
92 		.flags = CE_ATTR_FLAGS,
93 		.src_nentries = 16,
94 		.src_sz_max = 256,
95 		.dest_nentries = 0,
96 	},
97 
98 	/* CE1: target->host HTT + HTC control */
99 	{
100 		.flags = CE_ATTR_FLAGS,
101 		.src_nentries = 0,
102 		.src_sz_max = 512,
103 		.dest_nentries = 512,
104 	},
105 
106 	/* CE2: target->host WMI */
107 	{
108 		.flags = CE_ATTR_FLAGS,
109 		.src_nentries = 0,
110 		.src_sz_max = 2048,
111 		.dest_nentries = 32,
112 	},
113 
114 	/* CE3: host->target WMI */
115 	{
116 		.flags = CE_ATTR_FLAGS,
117 		.src_nentries = 32,
118 		.src_sz_max = 2048,
119 		.dest_nentries = 0,
120 	},
121 
122 	/* CE4: host->target HTT */
123 	{
124 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
125 		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
126 		.src_sz_max = 256,
127 		.dest_nentries = 0,
128 	},
129 
130 	/* CE5: unused */
131 	{
132 		.flags = CE_ATTR_FLAGS,
133 		.src_nentries = 0,
134 		.src_sz_max = 0,
135 		.dest_nentries = 0,
136 	},
137 
138 	/* CE6: target autonomous hif_memcpy */
139 	{
140 		.flags = CE_ATTR_FLAGS,
141 		.src_nentries = 0,
142 		.src_sz_max = 0,
143 		.dest_nentries = 0,
144 	},
145 
146 	/* CE7: ce_diag, the Diagnostic Window */
147 	{
148 		.flags = CE_ATTR_FLAGS,
149 		.src_nentries = 2,
150 		.src_sz_max = DIAG_TRANSFER_LIMIT,
151 		.dest_nentries = 2,
152 	},
153 };
154 
155 /* Target firmware's Copy Engine configuration. */
156 static const struct ce_pipe_config target_ce_config_wlan[] = {
157 	/* CE0: host->target HTC control and raw streams */
158 	{
159 		.pipenum = 0,
160 		.pipedir = PIPEDIR_OUT,
161 		.nentries = 32,
162 		.nbytes_max = 256,
163 		.flags = CE_ATTR_FLAGS,
164 		.reserved = 0,
165 	},
166 
167 	/* CE1: target->host HTT + HTC control */
168 	{
169 		.pipenum = 1,
170 		.pipedir = PIPEDIR_IN,
171 		.nentries = 32,
172 		.nbytes_max = 512,
173 		.flags = CE_ATTR_FLAGS,
174 		.reserved = 0,
175 	},
176 
177 	/* CE2: target->host WMI */
178 	{
179 		.pipenum = 2,
180 		.pipedir = PIPEDIR_IN,
181 		.nentries = 32,
182 		.nbytes_max = 2048,
183 		.flags = CE_ATTR_FLAGS,
184 		.reserved = 0,
185 	},
186 
187 	/* CE3: host->target WMI */
188 	{
189 		.pipenum = 3,
190 		.pipedir = PIPEDIR_OUT,
191 		.nentries = 32,
192 		.nbytes_max = 2048,
193 		.flags = CE_ATTR_FLAGS,
194 		.reserved = 0,
195 	},
196 
197 	/* CE4: host->target HTT */
198 	{
199 		.pipenum = 4,
200 		.pipedir = PIPEDIR_OUT,
201 		.nentries = 256,
202 		.nbytes_max = 256,
203 		.flags = CE_ATTR_FLAGS,
204 		.reserved = 0,
205 	},
206 
207 	/* NB: 50% of src nentries, since tx has 2 frags */
208 
209 	/* CE5: unused */
210 	{
211 		.pipenum = 5,
212 		.pipedir = PIPEDIR_OUT,
213 		.nentries = 32,
214 		.nbytes_max = 2048,
215 		.flags = CE_ATTR_FLAGS,
216 		.reserved = 0,
217 	},
218 
219 	/* CE6: Reserved for target autonomous hif_memcpy */
220 	{
221 		.pipenum = 6,
222 		.pipedir = PIPEDIR_INOUT,
223 		.nentries = 32,
224 		.nbytes_max = 4096,
225 		.flags = CE_ATTR_FLAGS,
226 		.reserved = 0,
227 	},
228 
229 	/* CE7 used only by Host */
230 };
231 
232 static bool ath10k_pci_irq_pending(struct ath10k *ar)
233 {
234 	u32 cause;
235 
236 	/* Check if the shared legacy irq is for us */
237 	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
238 				  PCIE_INTR_CAUSE_ADDRESS);
239 	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
240 		return true;
241 
242 	return false;
243 }
244 
245 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
246 {
247 	/* IMPORTANT: INTR_CLR register has to be set after
248 	 * INTR_ENABLE is set to 0, otherwise interrupt can not be
249 	 * really cleared. */
250 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
251 			   0);
252 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
253 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
254 
255 	/* IMPORTANT: this extra read transaction is required to
256 	 * flush the posted write buffer. */
257 	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
258 				 PCIE_INTR_ENABLE_ADDRESS);
259 }
260 
261 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
262 {
263 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
264 			   PCIE_INTR_ENABLE_ADDRESS,
265 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
266 
267 	/* IMPORTANT: this extra read transaction is required to
268 	 * flush the posted write buffer. */
269 	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
270 				 PCIE_INTR_ENABLE_ADDRESS);
271 }
272 
273 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
274 {
275 	struct ath10k *ar = arg;
276 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
277 
278 	if (ar_pci->num_msi_intrs == 0) {
279 		if (!ath10k_pci_irq_pending(ar))
280 			return IRQ_NONE;
281 
282 		ath10k_pci_disable_and_clear_legacy_irq(ar);
283 	}
284 
285 	tasklet_schedule(&ar_pci->early_irq_tasklet);
286 
287 	return IRQ_HANDLED;
288 }
289 
290 static int ath10k_pci_request_early_irq(struct ath10k *ar)
291 {
292 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
293 	int ret;
294 
295 	/* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
296 	 * interrupt from irq vector is triggered in all cases for FW
297 	 * indication/errors */
298 	ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
299 			  IRQF_SHARED, "ath10k_pci (early)", ar);
300 	if (ret) {
301 		ath10k_warn("failed to request early irq: %d\n", ret);
302 		return ret;
303 	}
304 
305 	return 0;
306 }
307 
308 static void ath10k_pci_free_early_irq(struct ath10k *ar)
309 {
310 	free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
311 }
312 
313 /*
314  * Diagnostic read/write access is provided for startup/config/debug usage.
315  * Caller must guarantee proper alignment, when applicable, and single user
316  * at any moment.
317  */
318 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
319 				    int nbytes)
320 {
321 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
322 	int ret = 0;
323 	u32 buf;
324 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
325 	unsigned int id;
326 	unsigned int flags;
327 	struct ath10k_ce_pipe *ce_diag;
328 	/* Host buffer address in CE space */
329 	u32 ce_data;
330 	dma_addr_t ce_data_base = 0;
331 	void *data_buf = NULL;
332 	int i;
333 
334 	/*
335 	 * This code cannot handle reads to non-memory space. Redirect to the
336 	 * register read fn but preserve the multi word read capability of
337 	 * this fn
338 	 */
339 	if (address < DRAM_BASE_ADDRESS) {
340 		if (!IS_ALIGNED(address, 4) ||
341 		    !IS_ALIGNED((unsigned long)data, 4))
342 			return -EIO;
343 
344 		while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
345 					   ar, address, (u32 *)data)) == 0)) {
346 			nbytes -= sizeof(u32);
347 			address += sizeof(u32);
348 			data += sizeof(u32);
349 		}
350 		return ret;
351 	}
352 
353 	ce_diag = ar_pci->ce_diag;
354 
355 	/*
356 	 * Allocate a temporary bounce buffer to hold caller's data
357 	 * to be DMA'ed from Target. This guarantees
358 	 *   1) 4-byte alignment
359 	 *   2) Buffer in DMA-able space
360 	 */
361 	orig_nbytes = nbytes;
362 	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
363 						       orig_nbytes,
364 						       &ce_data_base,
365 						       GFP_ATOMIC);
366 
367 	if (!data_buf) {
368 		ret = -ENOMEM;
369 		goto done;
370 	}
371 	memset(data_buf, 0, orig_nbytes);
372 
373 	remaining_bytes = orig_nbytes;
374 	ce_data = ce_data_base;
375 	while (remaining_bytes) {
376 		nbytes = min_t(unsigned int, remaining_bytes,
377 			       DIAG_TRANSFER_LIMIT);
378 
379 		ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
380 		if (ret != 0)
381 			goto done;
382 
383 		/* Request CE to send from Target(!) address to Host buffer */
384 		/*
385 		 * The address supplied by the caller is in the
386 		 * Target CPU virtual address space.
387 		 *
388 		 * In order to use this address with the diagnostic CE,
389 		 * convert it from Target CPU virtual address space
390 		 * to CE address space
391 		 */
392 		ath10k_pci_wake(ar);
393 		address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
394 						     address);
395 		ath10k_pci_sleep(ar);
396 
397 		ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
398 				 0);
399 		if (ret)
400 			goto done;
401 
402 		i = 0;
403 		while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
404 						     &completed_nbytes,
405 						     &id) != 0) {
406 			mdelay(1);
407 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
408 				ret = -EBUSY;
409 				goto done;
410 			}
411 		}
412 
413 		if (nbytes != completed_nbytes) {
414 			ret = -EIO;
415 			goto done;
416 		}
417 
418 		if (buf != (u32) address) {
419 			ret = -EIO;
420 			goto done;
421 		}
422 
423 		i = 0;
424 		while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
425 						     &completed_nbytes,
426 						     &id, &flags) != 0) {
427 			mdelay(1);
428 
429 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
430 				ret = -EBUSY;
431 				goto done;
432 			}
433 		}
434 
435 		if (nbytes != completed_nbytes) {
436 			ret = -EIO;
437 			goto done;
438 		}
439 
440 		if (buf != ce_data) {
441 			ret = -EIO;
442 			goto done;
443 		}
444 
445 		remaining_bytes -= nbytes;
446 		address += nbytes;
447 		ce_data += nbytes;
448 	}
449 
450 done:
451 	if (ret == 0) {
452 		/* Copy data from allocated DMA buf to caller's buf */
453 		WARN_ON_ONCE(orig_nbytes & 3);
454 		for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
455 			((u32 *)data)[i] =
456 				__le32_to_cpu(((__le32 *)data_buf)[i]);
457 		}
458 	} else
459 		ath10k_warn("failed to read diag value at 0x%x: %d\n",
460 			    address, ret);
461 
462 	if (data_buf)
463 		dma_free_coherent(ar->dev, orig_nbytes, data_buf,
464 				  ce_data_base);
465 
466 	return ret;
467 }
468 
469 /* Read 4-byte aligned data from Target memory or register */
470 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
471 				       u32 *data)
472 {
473 	/* Assume range doesn't cross this boundary */
474 	if (address >= DRAM_BASE_ADDRESS)
475 		return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
476 
477 	ath10k_pci_wake(ar);
478 	*data = ath10k_pci_read32(ar, address);
479 	ath10k_pci_sleep(ar);
480 	return 0;
481 }
482 
483 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
484 				     const void *data, int nbytes)
485 {
486 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
487 	int ret = 0;
488 	u32 buf;
489 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
490 	unsigned int id;
491 	unsigned int flags;
492 	struct ath10k_ce_pipe *ce_diag;
493 	void *data_buf = NULL;
494 	u32 ce_data;	/* Host buffer address in CE space */
495 	dma_addr_t ce_data_base = 0;
496 	int i;
497 
498 	ce_diag = ar_pci->ce_diag;
499 
500 	/*
501 	 * Allocate a temporary bounce buffer to hold caller's data
502 	 * to be DMA'ed to Target. This guarantees
503 	 *   1) 4-byte alignment
504 	 *   2) Buffer in DMA-able space
505 	 */
506 	orig_nbytes = nbytes;
507 	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
508 						       orig_nbytes,
509 						       &ce_data_base,
510 						       GFP_ATOMIC);
511 	if (!data_buf) {
512 		ret = -ENOMEM;
513 		goto done;
514 	}
515 
516 	/* Copy caller's data to allocated DMA buf */
517 	WARN_ON_ONCE(orig_nbytes & 3);
518 	for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
519 		((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
520 
521 	/*
522 	 * The address supplied by the caller is in the
523 	 * Target CPU virtual address space.
524 	 *
525 	 * In order to use this address with the diagnostic CE,
526 	 * convert it from
527 	 *    Target CPU virtual address space
528 	 * to
529 	 *    CE address space
530 	 */
531 	ath10k_pci_wake(ar);
532 	address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
533 	ath10k_pci_sleep(ar);
534 
535 	remaining_bytes = orig_nbytes;
536 	ce_data = ce_data_base;
537 	while (remaining_bytes) {
538 		/* FIXME: check cast */
539 		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
540 
541 		/* Set up to receive directly into Target(!) address */
542 		ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
543 		if (ret != 0)
544 			goto done;
545 
546 		/*
547 		 * Request CE to send caller-supplied data that
548 		 * was copied to bounce buffer to Target(!) address.
549 		 */
550 		ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
551 				     nbytes, 0, 0);
552 		if (ret != 0)
553 			goto done;
554 
555 		i = 0;
556 		while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
557 						     &completed_nbytes,
558 						     &id) != 0) {
559 			mdelay(1);
560 
561 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
562 				ret = -EBUSY;
563 				goto done;
564 			}
565 		}
566 
567 		if (nbytes != completed_nbytes) {
568 			ret = -EIO;
569 			goto done;
570 		}
571 
572 		if (buf != ce_data) {
573 			ret = -EIO;
574 			goto done;
575 		}
576 
577 		i = 0;
578 		while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
579 						     &completed_nbytes,
580 						     &id, &flags) != 0) {
581 			mdelay(1);
582 
583 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
584 				ret = -EBUSY;
585 				goto done;
586 			}
587 		}
588 
589 		if (nbytes != completed_nbytes) {
590 			ret = -EIO;
591 			goto done;
592 		}
593 
594 		if (buf != address) {
595 			ret = -EIO;
596 			goto done;
597 		}
598 
599 		remaining_bytes -= nbytes;
600 		address += nbytes;
601 		ce_data += nbytes;
602 	}
603 
604 done:
605 	if (data_buf) {
606 		dma_free_coherent(ar->dev, orig_nbytes, data_buf,
607 				  ce_data_base);
608 	}
609 
610 	if (ret != 0)
611 		ath10k_warn("failed to write diag value at 0x%x: %d\n",
612 			    address, ret);
613 
614 	return ret;
615 }
616 
617 /* Write 4B data to Target memory or register */
618 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
619 					u32 data)
620 {
621 	/* Assume range doesn't cross this boundary */
622 	if (address >= DRAM_BASE_ADDRESS)
623 		return ath10k_pci_diag_write_mem(ar, address, &data,
624 						 sizeof(u32));
625 
626 	ath10k_pci_wake(ar);
627 	ath10k_pci_write32(ar, address, data);
628 	ath10k_pci_sleep(ar);
629 	return 0;
630 }
631 
632 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
633 {
634 	void __iomem *mem = ath10k_pci_priv(ar)->mem;
635 	u32 val;
636 	val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
637 		       RTC_STATE_ADDRESS);
638 	return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
639 }
640 
641 int ath10k_do_pci_wake(struct ath10k *ar)
642 {
643 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
644 	void __iomem *pci_addr = ar_pci->mem;
645 	int tot_delay = 0;
646 	int curr_delay = 5;
647 
648 	if (atomic_read(&ar_pci->keep_awake_count) == 0) {
649 		/* Force AWAKE */
650 		iowrite32(PCIE_SOC_WAKE_V_MASK,
651 			  pci_addr + PCIE_LOCAL_BASE_ADDRESS +
652 			  PCIE_SOC_WAKE_ADDRESS);
653 	}
654 	atomic_inc(&ar_pci->keep_awake_count);
655 
656 	if (ar_pci->verified_awake)
657 		return 0;
658 
659 	for (;;) {
660 		if (ath10k_pci_target_is_awake(ar)) {
661 			ar_pci->verified_awake = true;
662 			return 0;
663 		}
664 
665 		if (tot_delay > PCIE_WAKE_TIMEOUT) {
666 			ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
667 				    PCIE_WAKE_TIMEOUT,
668 				    atomic_read(&ar_pci->keep_awake_count));
669 			return -ETIMEDOUT;
670 		}
671 
672 		udelay(curr_delay);
673 		tot_delay += curr_delay;
674 
675 		if (curr_delay < 50)
676 			curr_delay += 5;
677 	}
678 }
679 
680 void ath10k_do_pci_sleep(struct ath10k *ar)
681 {
682 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
683 	void __iomem *pci_addr = ar_pci->mem;
684 
685 	if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
686 		/* Allow sleep */
687 		ar_pci->verified_awake = false;
688 		iowrite32(PCIE_SOC_WAKE_RESET,
689 			  pci_addr + PCIE_LOCAL_BASE_ADDRESS +
690 			  PCIE_SOC_WAKE_ADDRESS);
691 	}
692 }
693 
694 /* Called by lower (CE) layer when a send to Target completes. */
695 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
696 {
697 	struct ath10k *ar = ce_state->ar;
698 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
699 	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
700 	void *transfer_context;
701 	u32 ce_data;
702 	unsigned int nbytes;
703 	unsigned int transfer_id;
704 
705 	while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
706 					     &ce_data, &nbytes,
707 					     &transfer_id) == 0) {
708 		/* no need to call tx completion for NULL pointers */
709 		if (transfer_context == NULL)
710 			continue;
711 
712 		cb->tx_completion(ar, transfer_context, transfer_id);
713 	}
714 }
715 
716 /* Called by lower (CE) layer when data is received from the Target. */
717 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
718 {
719 	struct ath10k *ar = ce_state->ar;
720 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
721 	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
722 	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
723 	struct sk_buff *skb;
724 	void *transfer_context;
725 	u32 ce_data;
726 	unsigned int nbytes, max_nbytes;
727 	unsigned int transfer_id;
728 	unsigned int flags;
729 	int err, num_replenish = 0;
730 
731 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
732 					     &ce_data, &nbytes, &transfer_id,
733 					     &flags) == 0) {
734 		num_replenish++;
735 		skb = transfer_context;
736 		max_nbytes = skb->len + skb_tailroom(skb);
737 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
738 				 max_nbytes, DMA_FROM_DEVICE);
739 
740 		if (unlikely(max_nbytes < nbytes)) {
741 			ath10k_warn("rxed more than expected (nbytes %d, max %d)",
742 				    nbytes, max_nbytes);
743 			dev_kfree_skb_any(skb);
744 			continue;
745 		}
746 
747 		skb_put(skb, nbytes);
748 		cb->rx_completion(ar, skb, pipe_info->pipe_num);
749 	}
750 
751 	err = ath10k_pci_post_rx_pipe(pipe_info, num_replenish);
752 	if (unlikely(err)) {
753 		/* FIXME: retry */
754 		ath10k_warn("failed to replenish CE rx ring %d (%d bufs): %d\n",
755 			    pipe_info->pipe_num, num_replenish, err);
756 	}
757 }
758 
759 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
760 				struct ath10k_hif_sg_item *items, int n_items)
761 {
762 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
763 	struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
764 	struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
765 	struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
766 	unsigned int nentries_mask;
767 	unsigned int sw_index;
768 	unsigned int write_index;
769 	int err, i = 0;
770 
771 	spin_lock_bh(&ar_pci->ce_lock);
772 
773 	nentries_mask = src_ring->nentries_mask;
774 	sw_index = src_ring->sw_index;
775 	write_index = src_ring->write_index;
776 
777 	if (unlikely(CE_RING_DELTA(nentries_mask,
778 				   write_index, sw_index - 1) < n_items)) {
779 		err = -ENOBUFS;
780 		goto err;
781 	}
782 
783 	for (i = 0; i < n_items - 1; i++) {
784 		ath10k_dbg(ATH10K_DBG_PCI,
785 			   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
786 			   i, items[i].paddr, items[i].len, n_items);
787 		ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
788 				items[i].vaddr, items[i].len);
789 
790 		err = ath10k_ce_send_nolock(ce_pipe,
791 					    items[i].transfer_context,
792 					    items[i].paddr,
793 					    items[i].len,
794 					    items[i].transfer_id,
795 					    CE_SEND_FLAG_GATHER);
796 		if (err)
797 			goto err;
798 	}
799 
800 	/* `i` is equal to `n_items -1` after for() */
801 
802 	ath10k_dbg(ATH10K_DBG_PCI,
803 		   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
804 		   i, items[i].paddr, items[i].len, n_items);
805 	ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
806 			items[i].vaddr, items[i].len);
807 
808 	err = ath10k_ce_send_nolock(ce_pipe,
809 				    items[i].transfer_context,
810 				    items[i].paddr,
811 				    items[i].len,
812 				    items[i].transfer_id,
813 				    0);
814 	if (err)
815 		goto err;
816 
817 	spin_unlock_bh(&ar_pci->ce_lock);
818 	return 0;
819 
820 err:
821 	for (; i > 0; i--)
822 		__ath10k_ce_send_revert(ce_pipe);
823 
824 	spin_unlock_bh(&ar_pci->ce_lock);
825 	return err;
826 }
827 
828 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
829 {
830 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
831 
832 	ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
833 
834 	return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
835 }
836 
837 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
838 {
839 	u32 reg_dump_area = 0;
840 	u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
841 	u32 host_addr;
842 	int ret;
843 	u32 i;
844 
845 	ath10k_err("firmware crashed!\n");
846 	ath10k_err("hardware name %s version 0x%x\n",
847 		   ar->hw_params.name, ar->target_version);
848 	ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
849 
850 	host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
851 	ret = ath10k_pci_diag_read_mem(ar, host_addr,
852 				       &reg_dump_area, sizeof(u32));
853 	if (ret) {
854 		ath10k_err("failed to read FW dump area address: %d\n", ret);
855 		return;
856 	}
857 
858 	ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
859 
860 	ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
861 				       &reg_dump_values[0],
862 				       REG_DUMP_COUNT_QCA988X * sizeof(u32));
863 	if (ret != 0) {
864 		ath10k_err("failed to read FW dump area: %d\n", ret);
865 		return;
866 	}
867 
868 	BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
869 
870 	ath10k_err("target Register Dump\n");
871 	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
872 		ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
873 			   i,
874 			   reg_dump_values[i],
875 			   reg_dump_values[i + 1],
876 			   reg_dump_values[i + 2],
877 			   reg_dump_values[i + 3]);
878 
879 	queue_work(ar->workqueue, &ar->restart_work);
880 }
881 
882 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
883 					       int force)
884 {
885 	ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
886 
887 	if (!force) {
888 		int resources;
889 		/*
890 		 * Decide whether to actually poll for completions, or just
891 		 * wait for a later chance.
892 		 * If there seem to be plenty of resources left, then just wait
893 		 * since checking involves reading a CE register, which is a
894 		 * relatively expensive operation.
895 		 */
896 		resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
897 
898 		/*
899 		 * If at least 50% of the total resources are still available,
900 		 * don't bother checking again yet.
901 		 */
902 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
903 			return;
904 	}
905 	ath10k_ce_per_engine_service(ar, pipe);
906 }
907 
908 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
909 					 struct ath10k_hif_cb *callbacks)
910 {
911 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
912 
913 	ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
914 
915 	memcpy(&ar_pci->msg_callbacks_current, callbacks,
916 	       sizeof(ar_pci->msg_callbacks_current));
917 }
918 
919 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
920 {
921 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
922 	const struct ce_attr *attr;
923 	struct ath10k_pci_pipe *pipe_info;
924 	int pipe_num, disable_interrupts;
925 
926 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
927 		pipe_info = &ar_pci->pipe_info[pipe_num];
928 
929 		/* Handle Diagnostic CE specially */
930 		if (pipe_info->ce_hdl == ar_pci->ce_diag)
931 			continue;
932 
933 		attr = &host_ce_config_wlan[pipe_num];
934 
935 		if (attr->src_nentries) {
936 			disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
937 			ath10k_ce_send_cb_register(pipe_info->ce_hdl,
938 						   ath10k_pci_ce_send_done,
939 						   disable_interrupts);
940 		}
941 
942 		if (attr->dest_nentries)
943 			ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
944 						   ath10k_pci_ce_recv_data);
945 	}
946 
947 	return 0;
948 }
949 
950 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
951 {
952 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
953 	int i;
954 
955 	tasklet_kill(&ar_pci->intr_tq);
956 	tasklet_kill(&ar_pci->msi_fw_err);
957 	tasklet_kill(&ar_pci->early_irq_tasklet);
958 
959 	for (i = 0; i < CE_COUNT; i++)
960 		tasklet_kill(&ar_pci->pipe_info[i].intr);
961 }
962 
963 /* TODO - temporary mapping while we have too few CE's */
964 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
965 					      u16 service_id, u8 *ul_pipe,
966 					      u8 *dl_pipe, int *ul_is_polled,
967 					      int *dl_is_polled)
968 {
969 	int ret = 0;
970 
971 	ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
972 
973 	/* polling for received messages not supported */
974 	*dl_is_polled = 0;
975 
976 	switch (service_id) {
977 	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
978 		/*
979 		 * Host->target HTT gets its own pipe, so it can be polled
980 		 * while other pipes are interrupt driven.
981 		 */
982 		*ul_pipe = 4;
983 		/*
984 		 * Use the same target->host pipe for HTC ctrl, HTC raw
985 		 * streams, and HTT.
986 		 */
987 		*dl_pipe = 1;
988 		break;
989 
990 	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
991 	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
992 		/*
993 		 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
994 		 * HTC_CTRL_RSVD_SVC could share the same pipe as the
995 		 * WMI services.  So, if another CE is needed, change
996 		 * this to *ul_pipe = 3, which frees up CE 0.
997 		 */
998 		/* *ul_pipe = 3; */
999 		*ul_pipe = 0;
1000 		*dl_pipe = 1;
1001 		break;
1002 
1003 	case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1004 	case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1005 	case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1006 	case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1007 
1008 	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1009 		*ul_pipe = 3;
1010 		*dl_pipe = 2;
1011 		break;
1012 
1013 		/* pipe 5 unused   */
1014 		/* pipe 6 reserved */
1015 		/* pipe 7 reserved */
1016 
1017 	default:
1018 		ret = -1;
1019 		break;
1020 	}
1021 	*ul_is_polled =
1022 		(host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1023 
1024 	return ret;
1025 }
1026 
1027 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1028 						u8 *ul_pipe, u8 *dl_pipe)
1029 {
1030 	int ul_is_polled, dl_is_polled;
1031 
1032 	ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
1033 
1034 	(void)ath10k_pci_hif_map_service_to_pipe(ar,
1035 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1036 						 ul_pipe,
1037 						 dl_pipe,
1038 						 &ul_is_polled,
1039 						 &dl_is_polled);
1040 }
1041 
1042 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1043 				   int num)
1044 {
1045 	struct ath10k *ar = pipe_info->hif_ce_state;
1046 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1047 	struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1048 	struct sk_buff *skb;
1049 	dma_addr_t ce_data;
1050 	int i, ret = 0;
1051 
1052 	if (pipe_info->buf_sz == 0)
1053 		return 0;
1054 
1055 	for (i = 0; i < num; i++) {
1056 		skb = dev_alloc_skb(pipe_info->buf_sz);
1057 		if (!skb) {
1058 			ath10k_warn("failed to allocate skbuff for pipe %d\n",
1059 				    num);
1060 			ret = -ENOMEM;
1061 			goto err;
1062 		}
1063 
1064 		WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1065 
1066 		ce_data = dma_map_single(ar->dev, skb->data,
1067 					 skb->len + skb_tailroom(skb),
1068 					 DMA_FROM_DEVICE);
1069 
1070 		if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1071 			ath10k_warn("failed to DMA map sk_buff\n");
1072 			dev_kfree_skb_any(skb);
1073 			ret = -EIO;
1074 			goto err;
1075 		}
1076 
1077 		ATH10K_SKB_CB(skb)->paddr = ce_data;
1078 
1079 		pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1080 					       pipe_info->buf_sz,
1081 					       PCI_DMA_FROMDEVICE);
1082 
1083 		ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1084 						 ce_data);
1085 		if (ret) {
1086 			ath10k_warn("failed to enqueue to pipe %d: %d\n",
1087 				    num, ret);
1088 			goto err;
1089 		}
1090 	}
1091 
1092 	return ret;
1093 
1094 err:
1095 	ath10k_pci_rx_pipe_cleanup(pipe_info);
1096 	return ret;
1097 }
1098 
1099 static int ath10k_pci_post_rx(struct ath10k *ar)
1100 {
1101 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1102 	struct ath10k_pci_pipe *pipe_info;
1103 	const struct ce_attr *attr;
1104 	int pipe_num, ret = 0;
1105 
1106 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1107 		pipe_info = &ar_pci->pipe_info[pipe_num];
1108 		attr = &host_ce_config_wlan[pipe_num];
1109 
1110 		if (attr->dest_nentries == 0)
1111 			continue;
1112 
1113 		ret = ath10k_pci_post_rx_pipe(pipe_info,
1114 					      attr->dest_nentries - 1);
1115 		if (ret) {
1116 			ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1117 				    pipe_num, ret);
1118 
1119 			for (; pipe_num >= 0; pipe_num--) {
1120 				pipe_info = &ar_pci->pipe_info[pipe_num];
1121 				ath10k_pci_rx_pipe_cleanup(pipe_info);
1122 			}
1123 			return ret;
1124 		}
1125 	}
1126 
1127 	return 0;
1128 }
1129 
1130 static int ath10k_pci_hif_start(struct ath10k *ar)
1131 {
1132 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1133 	int ret, ret_early;
1134 
1135 	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1136 
1137 	ath10k_pci_free_early_irq(ar);
1138 	ath10k_pci_kill_tasklet(ar);
1139 
1140 	ret = ath10k_pci_request_irq(ar);
1141 	if (ret) {
1142 		ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1143 			    ret);
1144 		goto err_early_irq;
1145 	}
1146 
1147 	ret = ath10k_pci_setup_ce_irq(ar);
1148 	if (ret) {
1149 		ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1150 		goto err_stop;
1151 	}
1152 
1153 	/* Post buffers once to start things off. */
1154 	ret = ath10k_pci_post_rx(ar);
1155 	if (ret) {
1156 		ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1157 			    ret);
1158 		goto err_stop;
1159 	}
1160 
1161 	ar_pci->started = 1;
1162 	return 0;
1163 
1164 err_stop:
1165 	ath10k_ce_disable_interrupts(ar);
1166 	ath10k_pci_free_irq(ar);
1167 	ath10k_pci_kill_tasklet(ar);
1168 err_early_irq:
1169 	/* Though there should be no interrupts (device was reset)
1170 	 * power_down() expects the early IRQ to be installed as per the
1171 	 * driver lifecycle. */
1172 	ret_early = ath10k_pci_request_early_irq(ar);
1173 	if (ret_early)
1174 		ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1175 
1176 	return ret;
1177 }
1178 
1179 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1180 {
1181 	struct ath10k *ar;
1182 	struct ath10k_pci *ar_pci;
1183 	struct ath10k_ce_pipe *ce_hdl;
1184 	u32 buf_sz;
1185 	struct sk_buff *netbuf;
1186 	u32 ce_data;
1187 
1188 	buf_sz = pipe_info->buf_sz;
1189 
1190 	/* Unused Copy Engine */
1191 	if (buf_sz == 0)
1192 		return;
1193 
1194 	ar = pipe_info->hif_ce_state;
1195 	ar_pci = ath10k_pci_priv(ar);
1196 
1197 	if (!ar_pci->started)
1198 		return;
1199 
1200 	ce_hdl = pipe_info->ce_hdl;
1201 
1202 	while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1203 					  &ce_data) == 0) {
1204 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1205 				 netbuf->len + skb_tailroom(netbuf),
1206 				 DMA_FROM_DEVICE);
1207 		dev_kfree_skb_any(netbuf);
1208 	}
1209 }
1210 
1211 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1212 {
1213 	struct ath10k *ar;
1214 	struct ath10k_pci *ar_pci;
1215 	struct ath10k_ce_pipe *ce_hdl;
1216 	struct sk_buff *netbuf;
1217 	u32 ce_data;
1218 	unsigned int nbytes;
1219 	unsigned int id;
1220 	u32 buf_sz;
1221 
1222 	buf_sz = pipe_info->buf_sz;
1223 
1224 	/* Unused Copy Engine */
1225 	if (buf_sz == 0)
1226 		return;
1227 
1228 	ar = pipe_info->hif_ce_state;
1229 	ar_pci = ath10k_pci_priv(ar);
1230 
1231 	if (!ar_pci->started)
1232 		return;
1233 
1234 	ce_hdl = pipe_info->ce_hdl;
1235 
1236 	while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1237 					  &ce_data, &nbytes, &id) == 0) {
1238 		/* no need to call tx completion for NULL pointers */
1239 		if (!netbuf)
1240 			continue;
1241 
1242 		ar_pci->msg_callbacks_current.tx_completion(ar,
1243 							    netbuf,
1244 							    id);
1245 	}
1246 }
1247 
1248 /*
1249  * Cleanup residual buffers for device shutdown:
1250  *    buffers that were enqueued for receive
1251  *    buffers that were to be sent
1252  * Note: Buffers that had completed but which were
1253  * not yet processed are on a completion queue. They
1254  * are handled when the completion thread shuts down.
1255  */
1256 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1257 {
1258 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1259 	int pipe_num;
1260 
1261 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1262 		struct ath10k_pci_pipe *pipe_info;
1263 
1264 		pipe_info = &ar_pci->pipe_info[pipe_num];
1265 		ath10k_pci_rx_pipe_cleanup(pipe_info);
1266 		ath10k_pci_tx_pipe_cleanup(pipe_info);
1267 	}
1268 }
1269 
1270 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1271 {
1272 	int i;
1273 
1274 	for (i = 0; i < CE_COUNT; i++)
1275 		ath10k_ce_deinit_pipe(ar, i);
1276 }
1277 
1278 static void ath10k_pci_hif_stop(struct ath10k *ar)
1279 {
1280 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1281 	int ret;
1282 
1283 	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
1284 
1285 	if (WARN_ON(!ar_pci->started))
1286 		return;
1287 
1288 	ret = ath10k_ce_disable_interrupts(ar);
1289 	if (ret)
1290 		ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1291 
1292 	ath10k_pci_free_irq(ar);
1293 	ath10k_pci_kill_tasklet(ar);
1294 
1295 	ret = ath10k_pci_request_early_irq(ar);
1296 	if (ret)
1297 		ath10k_warn("failed to re-enable early irq: %d\n", ret);
1298 
1299 	/* At this point, asynchronous threads are stopped, the target should
1300 	 * not DMA nor interrupt. We process the leftovers and then free
1301 	 * everything else up. */
1302 
1303 	ath10k_pci_buffer_cleanup(ar);
1304 
1305 	/* Make the sure the device won't access any structures on the host by
1306 	 * resetting it. The device was fed with PCI CE ringbuffer
1307 	 * configuration during init. If ringbuffers are freed and the device
1308 	 * were to access them this could lead to memory corruption on the
1309 	 * host. */
1310 	ath10k_pci_warm_reset(ar);
1311 
1312 	ar_pci->started = 0;
1313 }
1314 
1315 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1316 					   void *req, u32 req_len,
1317 					   void *resp, u32 *resp_len)
1318 {
1319 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1320 	struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1321 	struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1322 	struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1323 	struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1324 	dma_addr_t req_paddr = 0;
1325 	dma_addr_t resp_paddr = 0;
1326 	struct bmi_xfer xfer = {};
1327 	void *treq, *tresp = NULL;
1328 	int ret = 0;
1329 
1330 	might_sleep();
1331 
1332 	if (resp && !resp_len)
1333 		return -EINVAL;
1334 
1335 	if (resp && resp_len && *resp_len == 0)
1336 		return -EINVAL;
1337 
1338 	treq = kmemdup(req, req_len, GFP_KERNEL);
1339 	if (!treq)
1340 		return -ENOMEM;
1341 
1342 	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1343 	ret = dma_mapping_error(ar->dev, req_paddr);
1344 	if (ret)
1345 		goto err_dma;
1346 
1347 	if (resp && resp_len) {
1348 		tresp = kzalloc(*resp_len, GFP_KERNEL);
1349 		if (!tresp) {
1350 			ret = -ENOMEM;
1351 			goto err_req;
1352 		}
1353 
1354 		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1355 					    DMA_FROM_DEVICE);
1356 		ret = dma_mapping_error(ar->dev, resp_paddr);
1357 		if (ret)
1358 			goto err_req;
1359 
1360 		xfer.wait_for_resp = true;
1361 		xfer.resp_len = 0;
1362 
1363 		ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1364 	}
1365 
1366 	ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1367 	if (ret)
1368 		goto err_resp;
1369 
1370 	ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1371 	if (ret) {
1372 		u32 unused_buffer;
1373 		unsigned int unused_nbytes;
1374 		unsigned int unused_id;
1375 
1376 		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1377 					   &unused_nbytes, &unused_id);
1378 	} else {
1379 		/* non-zero means we did not time out */
1380 		ret = 0;
1381 	}
1382 
1383 err_resp:
1384 	if (resp) {
1385 		u32 unused_buffer;
1386 
1387 		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1388 		dma_unmap_single(ar->dev, resp_paddr,
1389 				 *resp_len, DMA_FROM_DEVICE);
1390 	}
1391 err_req:
1392 	dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1393 
1394 	if (ret == 0 && resp_len) {
1395 		*resp_len = min(*resp_len, xfer.resp_len);
1396 		memcpy(resp, tresp, xfer.resp_len);
1397 	}
1398 err_dma:
1399 	kfree(treq);
1400 	kfree(tresp);
1401 
1402 	return ret;
1403 }
1404 
1405 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1406 {
1407 	struct bmi_xfer *xfer;
1408 	u32 ce_data;
1409 	unsigned int nbytes;
1410 	unsigned int transfer_id;
1411 
1412 	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1413 					  &nbytes, &transfer_id))
1414 		return;
1415 
1416 	xfer->tx_done = true;
1417 }
1418 
1419 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1420 {
1421 	struct bmi_xfer *xfer;
1422 	u32 ce_data;
1423 	unsigned int nbytes;
1424 	unsigned int transfer_id;
1425 	unsigned int flags;
1426 
1427 	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1428 					  &nbytes, &transfer_id, &flags))
1429 		return;
1430 
1431 	if (!xfer->wait_for_resp) {
1432 		ath10k_warn("unexpected: BMI data received; ignoring\n");
1433 		return;
1434 	}
1435 
1436 	xfer->resp_len = nbytes;
1437 	xfer->rx_done = true;
1438 }
1439 
1440 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1441 			       struct ath10k_ce_pipe *rx_pipe,
1442 			       struct bmi_xfer *xfer)
1443 {
1444 	unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1445 
1446 	while (time_before_eq(jiffies, timeout)) {
1447 		ath10k_pci_bmi_send_done(tx_pipe);
1448 		ath10k_pci_bmi_recv_data(rx_pipe);
1449 
1450 		if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1451 			return 0;
1452 
1453 		schedule();
1454 	}
1455 
1456 	return -ETIMEDOUT;
1457 }
1458 
1459 /*
1460  * Map from service/endpoint to Copy Engine.
1461  * This table is derived from the CE_PCI TABLE, above.
1462  * It is passed to the Target at startup for use by firmware.
1463  */
1464 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1465 	{
1466 		 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1467 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1468 		 3,
1469 	},
1470 	{
1471 		 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1472 		 PIPEDIR_IN,		/* in = DL = target -> host */
1473 		 2,
1474 	},
1475 	{
1476 		 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1477 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1478 		 3,
1479 	},
1480 	{
1481 		 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1482 		 PIPEDIR_IN,		/* in = DL = target -> host */
1483 		 2,
1484 	},
1485 	{
1486 		 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1487 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1488 		 3,
1489 	},
1490 	{
1491 		 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1492 		 PIPEDIR_IN,		/* in = DL = target -> host */
1493 		 2,
1494 	},
1495 	{
1496 		 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1497 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1498 		 3,
1499 	},
1500 	{
1501 		 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1502 		 PIPEDIR_IN,		/* in = DL = target -> host */
1503 		 2,
1504 	},
1505 	{
1506 		 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1507 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1508 		 3,
1509 	},
1510 	{
1511 		 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1512 		 PIPEDIR_IN,		/* in = DL = target -> host */
1513 		 2,
1514 	},
1515 	{
1516 		 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1517 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1518 		 0,		/* could be moved to 3 (share with WMI) */
1519 	},
1520 	{
1521 		 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1522 		 PIPEDIR_IN,		/* in = DL = target -> host */
1523 		 1,
1524 	},
1525 	{
1526 		 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,	/* not currently used */
1527 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1528 		 0,
1529 	},
1530 	{
1531 		 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,	/* not currently used */
1532 		 PIPEDIR_IN,		/* in = DL = target -> host */
1533 		 1,
1534 	},
1535 	{
1536 		 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1537 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1538 		 4,
1539 	},
1540 	{
1541 		 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1542 		 PIPEDIR_IN,		/* in = DL = target -> host */
1543 		 1,
1544 	},
1545 
1546 	/* (Additions here) */
1547 
1548 	{				/* Must be last */
1549 		 0,
1550 		 0,
1551 		 0,
1552 	},
1553 };
1554 
1555 /*
1556  * Send an interrupt to the device to wake up the Target CPU
1557  * so it has an opportunity to notice any changed state.
1558  */
1559 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1560 {
1561 	int ret;
1562 	u32 core_ctrl;
1563 
1564 	ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1565 					      CORE_CTRL_ADDRESS,
1566 					  &core_ctrl);
1567 	if (ret) {
1568 		ath10k_warn("failed to read core_ctrl: %d\n", ret);
1569 		return ret;
1570 	}
1571 
1572 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1573 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1574 
1575 	ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1576 					       CORE_CTRL_ADDRESS,
1577 					   core_ctrl);
1578 	if (ret) {
1579 		ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1580 			    ret);
1581 		return ret;
1582 	}
1583 
1584 	return 0;
1585 }
1586 
1587 static int ath10k_pci_init_config(struct ath10k *ar)
1588 {
1589 	u32 interconnect_targ_addr;
1590 	u32 pcie_state_targ_addr = 0;
1591 	u32 pipe_cfg_targ_addr = 0;
1592 	u32 svc_to_pipe_map = 0;
1593 	u32 pcie_config_flags = 0;
1594 	u32 ealloc_value;
1595 	u32 ealloc_targ_addr;
1596 	u32 flag2_value;
1597 	u32 flag2_targ_addr;
1598 	int ret = 0;
1599 
1600 	/* Download to Target the CE Config and the service-to-CE map */
1601 	interconnect_targ_addr =
1602 		host_interest_item_address(HI_ITEM(hi_interconnect_state));
1603 
1604 	/* Supply Target-side CE configuration */
1605 	ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1606 					  &pcie_state_targ_addr);
1607 	if (ret != 0) {
1608 		ath10k_err("Failed to get pcie state addr: %d\n", ret);
1609 		return ret;
1610 	}
1611 
1612 	if (pcie_state_targ_addr == 0) {
1613 		ret = -EIO;
1614 		ath10k_err("Invalid pcie state addr\n");
1615 		return ret;
1616 	}
1617 
1618 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1619 					  offsetof(struct pcie_state,
1620 						   pipe_cfg_addr),
1621 					  &pipe_cfg_targ_addr);
1622 	if (ret != 0) {
1623 		ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1624 		return ret;
1625 	}
1626 
1627 	if (pipe_cfg_targ_addr == 0) {
1628 		ret = -EIO;
1629 		ath10k_err("Invalid pipe cfg addr\n");
1630 		return ret;
1631 	}
1632 
1633 	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1634 				 target_ce_config_wlan,
1635 				 sizeof(target_ce_config_wlan));
1636 
1637 	if (ret != 0) {
1638 		ath10k_err("Failed to write pipe cfg: %d\n", ret);
1639 		return ret;
1640 	}
1641 
1642 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1643 					  offsetof(struct pcie_state,
1644 						   svc_to_pipe_map),
1645 					  &svc_to_pipe_map);
1646 	if (ret != 0) {
1647 		ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1648 		return ret;
1649 	}
1650 
1651 	if (svc_to_pipe_map == 0) {
1652 		ret = -EIO;
1653 		ath10k_err("Invalid svc_to_pipe map\n");
1654 		return ret;
1655 	}
1656 
1657 	ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1658 				 target_service_to_ce_map_wlan,
1659 				 sizeof(target_service_to_ce_map_wlan));
1660 	if (ret != 0) {
1661 		ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1662 		return ret;
1663 	}
1664 
1665 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1666 					  offsetof(struct pcie_state,
1667 						   config_flags),
1668 					  &pcie_config_flags);
1669 	if (ret != 0) {
1670 		ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1671 		return ret;
1672 	}
1673 
1674 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1675 
1676 	ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1677 				 offsetof(struct pcie_state, config_flags),
1678 				 &pcie_config_flags,
1679 				 sizeof(pcie_config_flags));
1680 	if (ret != 0) {
1681 		ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1682 		return ret;
1683 	}
1684 
1685 	/* configure early allocation */
1686 	ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1687 
1688 	ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1689 	if (ret != 0) {
1690 		ath10k_err("Faile to get early alloc val: %d\n", ret);
1691 		return ret;
1692 	}
1693 
1694 	/* first bank is switched to IRAM */
1695 	ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1696 			 HI_EARLY_ALLOC_MAGIC_MASK);
1697 	ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1698 			 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1699 
1700 	ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1701 	if (ret != 0) {
1702 		ath10k_err("Failed to set early alloc val: %d\n", ret);
1703 		return ret;
1704 	}
1705 
1706 	/* Tell Target to proceed with initialization */
1707 	flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1708 
1709 	ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1710 	if (ret != 0) {
1711 		ath10k_err("Failed to get option val: %d\n", ret);
1712 		return ret;
1713 	}
1714 
1715 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1716 
1717 	ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1718 	if (ret != 0) {
1719 		ath10k_err("Failed to set option val: %d\n", ret);
1720 		return ret;
1721 	}
1722 
1723 	return 0;
1724 }
1725 
1726 static int ath10k_pci_alloc_ce(struct ath10k *ar)
1727 {
1728 	int i, ret;
1729 
1730 	for (i = 0; i < CE_COUNT; i++) {
1731 		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1732 		if (ret) {
1733 			ath10k_err("failed to allocate copy engine pipe %d: %d\n",
1734 				   i, ret);
1735 			return ret;
1736 		}
1737 	}
1738 
1739 	return 0;
1740 }
1741 
1742 static void ath10k_pci_free_ce(struct ath10k *ar)
1743 {
1744 	int i;
1745 
1746 	for (i = 0; i < CE_COUNT; i++)
1747 		ath10k_ce_free_pipe(ar, i);
1748 }
1749 
1750 static int ath10k_pci_ce_init(struct ath10k *ar)
1751 {
1752 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1753 	struct ath10k_pci_pipe *pipe_info;
1754 	const struct ce_attr *attr;
1755 	int pipe_num, ret;
1756 
1757 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1758 		pipe_info = &ar_pci->pipe_info[pipe_num];
1759 		pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
1760 		pipe_info->pipe_num = pipe_num;
1761 		pipe_info->hif_ce_state = ar;
1762 		attr = &host_ce_config_wlan[pipe_num];
1763 
1764 		ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
1765 		if (ret) {
1766 			ath10k_err("failed to initialize copy engine pipe %d: %d\n",
1767 				   pipe_num, ret);
1768 			return ret;
1769 		}
1770 
1771 		if (pipe_num == CE_COUNT - 1) {
1772 			/*
1773 			 * Reserve the ultimate CE for
1774 			 * diagnostic Window support
1775 			 */
1776 			ar_pci->ce_diag = pipe_info->ce_hdl;
1777 			continue;
1778 		}
1779 
1780 		pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1781 	}
1782 
1783 	return 0;
1784 }
1785 
1786 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1787 {
1788 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1789 	u32 fw_indicator;
1790 
1791 	ath10k_pci_wake(ar);
1792 
1793 	fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1794 
1795 	if (fw_indicator & FW_IND_EVENT_PENDING) {
1796 		/* ACK: clear Target-side pending event */
1797 		ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
1798 				   fw_indicator & ~FW_IND_EVENT_PENDING);
1799 
1800 		if (ar_pci->started) {
1801 			ath10k_pci_hif_dump_area(ar);
1802 		} else {
1803 			/*
1804 			 * Probable Target failure before we're prepared
1805 			 * to handle it.  Generally unexpected.
1806 			 */
1807 			ath10k_warn("early firmware event indicated\n");
1808 		}
1809 	}
1810 
1811 	ath10k_pci_sleep(ar);
1812 }
1813 
1814 /* this function effectively clears target memory controller assert line */
1815 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1816 {
1817 	u32 val;
1818 
1819 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1820 	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1821 			       val | SOC_RESET_CONTROL_SI0_RST_MASK);
1822 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1823 
1824 	msleep(10);
1825 
1826 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1827 	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1828 			       val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1829 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1830 
1831 	msleep(10);
1832 }
1833 
1834 static int ath10k_pci_warm_reset(struct ath10k *ar)
1835 {
1836 	int ret = 0;
1837 	u32 val;
1838 
1839 	ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
1840 
1841 	ret = ath10k_do_pci_wake(ar);
1842 	if (ret) {
1843 		ath10k_err("failed to wake up target: %d\n", ret);
1844 		return ret;
1845 	}
1846 
1847 	/* debug */
1848 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1849 				PCIE_INTR_CAUSE_ADDRESS);
1850 	ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1851 
1852 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1853 				CPU_INTR_ADDRESS);
1854 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1855 		   val);
1856 
1857 	/* disable pending irqs */
1858 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1859 			   PCIE_INTR_ENABLE_ADDRESS, 0);
1860 
1861 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1862 			   PCIE_INTR_CLR_ADDRESS, ~0);
1863 
1864 	msleep(100);
1865 
1866 	/* clear fw indicator */
1867 	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1868 
1869 	/* clear target LF timer interrupts */
1870 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1871 				SOC_LF_TIMER_CONTROL0_ADDRESS);
1872 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1873 			   SOC_LF_TIMER_CONTROL0_ADDRESS,
1874 			   val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1875 
1876 	/* reset CE */
1877 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1878 				SOC_RESET_CONTROL_ADDRESS);
1879 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1880 			   val | SOC_RESET_CONTROL_CE_RST_MASK);
1881 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1882 				SOC_RESET_CONTROL_ADDRESS);
1883 	msleep(10);
1884 
1885 	/* unreset CE */
1886 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1887 			   val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1888 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1889 				SOC_RESET_CONTROL_ADDRESS);
1890 	msleep(10);
1891 
1892 	ath10k_pci_warm_reset_si0(ar);
1893 
1894 	/* debug */
1895 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1896 				PCIE_INTR_CAUSE_ADDRESS);
1897 	ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1898 
1899 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1900 				CPU_INTR_ADDRESS);
1901 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1902 		   val);
1903 
1904 	/* CPU warm reset */
1905 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1906 				SOC_RESET_CONTROL_ADDRESS);
1907 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1908 			   val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1909 
1910 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1911 				SOC_RESET_CONTROL_ADDRESS);
1912 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1913 
1914 	msleep(100);
1915 
1916 	ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1917 
1918 	ath10k_do_pci_sleep(ar);
1919 	return ret;
1920 }
1921 
1922 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1923 {
1924 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1925 	const char *irq_mode;
1926 	int ret;
1927 
1928 	/*
1929 	 * Bring the target up cleanly.
1930 	 *
1931 	 * The target may be in an undefined state with an AUX-powered Target
1932 	 * and a Host in WoW mode. If the Host crashes, loses power, or is
1933 	 * restarted (without unloading the driver) then the Target is left
1934 	 * (aux) powered and running. On a subsequent driver load, the Target
1935 	 * is in an unexpected state. We try to catch that here in order to
1936 	 * reset the Target and retry the probe.
1937 	 */
1938 	if (cold_reset)
1939 		ret = ath10k_pci_cold_reset(ar);
1940 	else
1941 		ret = ath10k_pci_warm_reset(ar);
1942 
1943 	if (ret) {
1944 		ath10k_err("failed to reset target: %d\n", ret);
1945 		goto err;
1946 	}
1947 
1948 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1949 		/* Force AWAKE forever */
1950 		ath10k_do_pci_wake(ar);
1951 
1952 	ret = ath10k_pci_ce_init(ar);
1953 	if (ret) {
1954 		ath10k_err("failed to initialize CE: %d\n", ret);
1955 		goto err_ps;
1956 	}
1957 
1958 	ret = ath10k_ce_disable_interrupts(ar);
1959 	if (ret) {
1960 		ath10k_err("failed to disable CE interrupts: %d\n", ret);
1961 		goto err_ce;
1962 	}
1963 
1964 	ret = ath10k_pci_init_irq(ar);
1965 	if (ret) {
1966 		ath10k_err("failed to init irqs: %d\n", ret);
1967 		goto err_ce;
1968 	}
1969 
1970 	ret = ath10k_pci_request_early_irq(ar);
1971 	if (ret) {
1972 		ath10k_err("failed to request early irq: %d\n", ret);
1973 		goto err_deinit_irq;
1974 	}
1975 
1976 	ret = ath10k_pci_wait_for_target_init(ar);
1977 	if (ret) {
1978 		ath10k_err("failed to wait for target to init: %d\n", ret);
1979 		goto err_free_early_irq;
1980 	}
1981 
1982 	ret = ath10k_pci_init_config(ar);
1983 	if (ret) {
1984 		ath10k_err("failed to setup init config: %d\n", ret);
1985 		goto err_free_early_irq;
1986 	}
1987 
1988 	ret = ath10k_pci_wake_target_cpu(ar);
1989 	if (ret) {
1990 		ath10k_err("could not wake up target CPU: %d\n", ret);
1991 		goto err_free_early_irq;
1992 	}
1993 
1994 	if (ar_pci->num_msi_intrs > 1)
1995 		irq_mode = "MSI-X";
1996 	else if (ar_pci->num_msi_intrs == 1)
1997 		irq_mode = "MSI";
1998 	else
1999 		irq_mode = "legacy";
2000 
2001 	if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2002 		ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
2003 			    irq_mode, ath10k_pci_irq_mode,
2004 			    ath10k_pci_reset_mode);
2005 
2006 	return 0;
2007 
2008 err_free_early_irq:
2009 	ath10k_pci_free_early_irq(ar);
2010 err_deinit_irq:
2011 	ath10k_pci_deinit_irq(ar);
2012 err_ce:
2013 	ath10k_pci_ce_deinit(ar);
2014 	ath10k_pci_warm_reset(ar);
2015 err_ps:
2016 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2017 		ath10k_do_pci_sleep(ar);
2018 err:
2019 	return ret;
2020 }
2021 
2022 static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
2023 {
2024 	int i, ret;
2025 
2026 	/*
2027 	 * Sometime warm reset succeeds after retries.
2028 	 *
2029 	 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
2030 	 * at first try.
2031 	 */
2032 	for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2033 		ret = __ath10k_pci_hif_power_up(ar, false);
2034 		if (ret == 0)
2035 			break;
2036 
2037 		ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
2038 			    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
2039 	}
2040 
2041 	return ret;
2042 }
2043 
2044 static int ath10k_pci_hif_power_up(struct ath10k *ar)
2045 {
2046 	int ret;
2047 
2048 	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
2049 
2050 	/*
2051 	 * Hardware CUS232 version 2 has some issues with cold reset and the
2052 	 * preferred (and safer) way to perform a device reset is through a
2053 	 * warm reset.
2054 	 *
2055 	 * Warm reset doesn't always work though so fall back to cold reset may
2056 	 * be necessary.
2057 	 */
2058 	ret = ath10k_pci_hif_power_up_warm(ar);
2059 	if (ret) {
2060 		ath10k_warn("failed to power up target using warm reset: %d\n",
2061 			    ret);
2062 
2063 		if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
2064 			return ret;
2065 
2066 		ath10k_warn("trying cold reset\n");
2067 
2068 		ret = __ath10k_pci_hif_power_up(ar, true);
2069 		if (ret) {
2070 			ath10k_err("failed to power up target using cold reset too (%d)\n",
2071 				   ret);
2072 			return ret;
2073 		}
2074 	}
2075 
2076 	return 0;
2077 }
2078 
2079 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2080 {
2081 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2082 
2083 	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
2084 
2085 	ath10k_pci_free_early_irq(ar);
2086 	ath10k_pci_kill_tasklet(ar);
2087 	ath10k_pci_deinit_irq(ar);
2088 	ath10k_pci_ce_deinit(ar);
2089 	ath10k_pci_warm_reset(ar);
2090 
2091 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2092 		ath10k_do_pci_sleep(ar);
2093 }
2094 
2095 #ifdef CONFIG_PM
2096 
2097 #define ATH10K_PCI_PM_CONTROL 0x44
2098 
2099 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2100 {
2101 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2102 	struct pci_dev *pdev = ar_pci->pdev;
2103 	u32 val;
2104 
2105 	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2106 
2107 	if ((val & 0x000000ff) != 0x3) {
2108 		pci_save_state(pdev);
2109 		pci_disable_device(pdev);
2110 		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2111 				       (val & 0xffffff00) | 0x03);
2112 	}
2113 
2114 	return 0;
2115 }
2116 
2117 static int ath10k_pci_hif_resume(struct ath10k *ar)
2118 {
2119 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2120 	struct pci_dev *pdev = ar_pci->pdev;
2121 	u32 val;
2122 
2123 	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2124 
2125 	if ((val & 0x000000ff) != 0) {
2126 		pci_restore_state(pdev);
2127 		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2128 				       val & 0xffffff00);
2129 		/*
2130 		 * Suspend/Resume resets the PCI configuration space,
2131 		 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2132 		 * to keep PCI Tx retries from interfering with C3 CPU state
2133 		 */
2134 		pci_read_config_dword(pdev, 0x40, &val);
2135 
2136 		if ((val & 0x0000ff00) != 0)
2137 			pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2138 	}
2139 
2140 	return 0;
2141 }
2142 #endif
2143 
2144 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2145 	.tx_sg			= ath10k_pci_hif_tx_sg,
2146 	.exchange_bmi_msg	= ath10k_pci_hif_exchange_bmi_msg,
2147 	.start			= ath10k_pci_hif_start,
2148 	.stop			= ath10k_pci_hif_stop,
2149 	.map_service_to_pipe	= ath10k_pci_hif_map_service_to_pipe,
2150 	.get_default_pipe	= ath10k_pci_hif_get_default_pipe,
2151 	.send_complete_check	= ath10k_pci_hif_send_complete_check,
2152 	.set_callbacks		= ath10k_pci_hif_set_callbacks,
2153 	.get_free_queue_number	= ath10k_pci_hif_get_free_queue_number,
2154 	.power_up		= ath10k_pci_hif_power_up,
2155 	.power_down		= ath10k_pci_hif_power_down,
2156 #ifdef CONFIG_PM
2157 	.suspend		= ath10k_pci_hif_suspend,
2158 	.resume			= ath10k_pci_hif_resume,
2159 #endif
2160 };
2161 
2162 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2163 {
2164 	struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2165 	struct ath10k_pci *ar_pci = pipe->ar_pci;
2166 
2167 	ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2168 }
2169 
2170 static void ath10k_msi_err_tasklet(unsigned long data)
2171 {
2172 	struct ath10k *ar = (struct ath10k *)data;
2173 
2174 	ath10k_pci_fw_interrupt_handler(ar);
2175 }
2176 
2177 /*
2178  * Handler for a per-engine interrupt on a PARTICULAR CE.
2179  * This is used in cases where each CE has a private MSI interrupt.
2180  */
2181 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2182 {
2183 	struct ath10k *ar = arg;
2184 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2185 	int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2186 
2187 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2188 		ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2189 		return IRQ_HANDLED;
2190 	}
2191 
2192 	/*
2193 	 * NOTE: We are able to derive ce_id from irq because we
2194 	 * use a one-to-one mapping for CE's 0..5.
2195 	 * CE's 6 & 7 do not use interrupts at all.
2196 	 *
2197 	 * This mapping must be kept in sync with the mapping
2198 	 * used by firmware.
2199 	 */
2200 	tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2201 	return IRQ_HANDLED;
2202 }
2203 
2204 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2205 {
2206 	struct ath10k *ar = arg;
2207 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2208 
2209 	tasklet_schedule(&ar_pci->msi_fw_err);
2210 	return IRQ_HANDLED;
2211 }
2212 
2213 /*
2214  * Top-level interrupt handler for all PCI interrupts from a Target.
2215  * When a block of MSI interrupts is allocated, this top-level handler
2216  * is not used; instead, we directly call the correct sub-handler.
2217  */
2218 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2219 {
2220 	struct ath10k *ar = arg;
2221 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2222 
2223 	if (ar_pci->num_msi_intrs == 0) {
2224 		if (!ath10k_pci_irq_pending(ar))
2225 			return IRQ_NONE;
2226 
2227 		ath10k_pci_disable_and_clear_legacy_irq(ar);
2228 	}
2229 
2230 	tasklet_schedule(&ar_pci->intr_tq);
2231 
2232 	return IRQ_HANDLED;
2233 }
2234 
2235 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2236 {
2237 	struct ath10k *ar = (struct ath10k *)data;
2238 	u32 fw_ind;
2239 	int ret;
2240 
2241 	ret = ath10k_pci_wake(ar);
2242 	if (ret) {
2243 		ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2244 			    ret);
2245 		return;
2246 	}
2247 
2248 	fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2249 	if (fw_ind & FW_IND_EVENT_PENDING) {
2250 		ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2251 				   fw_ind & ~FW_IND_EVENT_PENDING);
2252 		ath10k_pci_hif_dump_area(ar);
2253 	}
2254 
2255 	ath10k_pci_sleep(ar);
2256 	ath10k_pci_enable_legacy_irq(ar);
2257 }
2258 
2259 static void ath10k_pci_tasklet(unsigned long data)
2260 {
2261 	struct ath10k *ar = (struct ath10k *)data;
2262 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2263 
2264 	ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2265 	ath10k_ce_per_engine_service_any(ar);
2266 
2267 	/* Re-enable legacy irq that was disabled in the irq handler */
2268 	if (ar_pci->num_msi_intrs == 0)
2269 		ath10k_pci_enable_legacy_irq(ar);
2270 }
2271 
2272 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2273 {
2274 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2275 	int ret, i;
2276 
2277 	ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2278 			  ath10k_pci_msi_fw_handler,
2279 			  IRQF_SHARED, "ath10k_pci", ar);
2280 	if (ret) {
2281 		ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2282 			    ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2283 		return ret;
2284 	}
2285 
2286 	for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2287 		ret = request_irq(ar_pci->pdev->irq + i,
2288 				  ath10k_pci_per_engine_handler,
2289 				  IRQF_SHARED, "ath10k_pci", ar);
2290 		if (ret) {
2291 			ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2292 				    ar_pci->pdev->irq + i, ret);
2293 
2294 			for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2295 				free_irq(ar_pci->pdev->irq + i, ar);
2296 
2297 			free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2298 			return ret;
2299 		}
2300 	}
2301 
2302 	return 0;
2303 }
2304 
2305 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2306 {
2307 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2308 	int ret;
2309 
2310 	ret = request_irq(ar_pci->pdev->irq,
2311 			  ath10k_pci_interrupt_handler,
2312 			  IRQF_SHARED, "ath10k_pci", ar);
2313 	if (ret) {
2314 		ath10k_warn("failed to request MSI irq %d: %d\n",
2315 			    ar_pci->pdev->irq, ret);
2316 		return ret;
2317 	}
2318 
2319 	return 0;
2320 }
2321 
2322 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2323 {
2324 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2325 	int ret;
2326 
2327 	ret = request_irq(ar_pci->pdev->irq,
2328 			  ath10k_pci_interrupt_handler,
2329 			  IRQF_SHARED, "ath10k_pci", ar);
2330 	if (ret) {
2331 		ath10k_warn("failed to request legacy irq %d: %d\n",
2332 			    ar_pci->pdev->irq, ret);
2333 		return ret;
2334 	}
2335 
2336 	return 0;
2337 }
2338 
2339 static int ath10k_pci_request_irq(struct ath10k *ar)
2340 {
2341 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2342 
2343 	switch (ar_pci->num_msi_intrs) {
2344 	case 0:
2345 		return ath10k_pci_request_irq_legacy(ar);
2346 	case 1:
2347 		return ath10k_pci_request_irq_msi(ar);
2348 	case MSI_NUM_REQUEST:
2349 		return ath10k_pci_request_irq_msix(ar);
2350 	}
2351 
2352 	ath10k_warn("unknown irq configuration upon request\n");
2353 	return -EINVAL;
2354 }
2355 
2356 static void ath10k_pci_free_irq(struct ath10k *ar)
2357 {
2358 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2359 	int i;
2360 
2361 	/* There's at least one interrupt irregardless whether its legacy INTR
2362 	 * or MSI or MSI-X */
2363 	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2364 		free_irq(ar_pci->pdev->irq + i, ar);
2365 }
2366 
2367 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2368 {
2369 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2370 	int i;
2371 
2372 	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2373 	tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2374 		     (unsigned long)ar);
2375 	tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2376 		     (unsigned long)ar);
2377 
2378 	for (i = 0; i < CE_COUNT; i++) {
2379 		ar_pci->pipe_info[i].ar_pci = ar_pci;
2380 		tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2381 			     (unsigned long)&ar_pci->pipe_info[i]);
2382 	}
2383 }
2384 
2385 static int ath10k_pci_init_irq(struct ath10k *ar)
2386 {
2387 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2388 	bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2389 				       ar_pci->features);
2390 	int ret;
2391 
2392 	ath10k_pci_init_irq_tasklets(ar);
2393 
2394 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2395 	    !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2396 		ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2397 
2398 	/* Try MSI-X */
2399 	if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2400 		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2401 		ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2402 							 ar_pci->num_msi_intrs);
2403 		if (ret > 0)
2404 			return 0;
2405 
2406 		/* fall-through */
2407 	}
2408 
2409 	/* Try MSI */
2410 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2411 		ar_pci->num_msi_intrs = 1;
2412 		ret = pci_enable_msi(ar_pci->pdev);
2413 		if (ret == 0)
2414 			return 0;
2415 
2416 		/* fall-through */
2417 	}
2418 
2419 	/* Try legacy irq
2420 	 *
2421 	 * A potential race occurs here: The CORE_BASE write
2422 	 * depends on target correctly decoding AXI address but
2423 	 * host won't know when target writes BAR to CORE_CTRL.
2424 	 * This write might get lost if target has NOT written BAR.
2425 	 * For now, fix the race by repeating the write in below
2426 	 * synchronization checking. */
2427 	ar_pci->num_msi_intrs = 0;
2428 
2429 	ret = ath10k_pci_wake(ar);
2430 	if (ret) {
2431 		ath10k_warn("failed to wake target: %d\n", ret);
2432 		return ret;
2433 	}
2434 
2435 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2436 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2437 	ath10k_pci_sleep(ar);
2438 
2439 	return 0;
2440 }
2441 
2442 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2443 {
2444 	int ret;
2445 
2446 	ret = ath10k_pci_wake(ar);
2447 	if (ret) {
2448 		ath10k_warn("failed to wake target: %d\n", ret);
2449 		return ret;
2450 	}
2451 
2452 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2453 			   0);
2454 	ath10k_pci_sleep(ar);
2455 
2456 	return 0;
2457 }
2458 
2459 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2460 {
2461 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2462 
2463 	switch (ar_pci->num_msi_intrs) {
2464 	case 0:
2465 		return ath10k_pci_deinit_irq_legacy(ar);
2466 	case 1:
2467 		/* fall-through */
2468 	case MSI_NUM_REQUEST:
2469 		pci_disable_msi(ar_pci->pdev);
2470 		return 0;
2471 	default:
2472 		pci_disable_msi(ar_pci->pdev);
2473 	}
2474 
2475 	ath10k_warn("unknown irq configuration upon deinit\n");
2476 	return -EINVAL;
2477 }
2478 
2479 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2480 {
2481 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2482 	unsigned long timeout;
2483 	int ret;
2484 	u32 val;
2485 
2486 	ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2487 
2488 	ret = ath10k_pci_wake(ar);
2489 	if (ret) {
2490 		ath10k_err("failed to wake up target for init: %d\n", ret);
2491 		return ret;
2492 	}
2493 
2494 	timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2495 
2496 	do {
2497 		val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2498 
2499 		ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
2500 
2501 		/* target should never return this */
2502 		if (val == 0xffffffff)
2503 			continue;
2504 
2505 		/* the device has crashed so don't bother trying anymore */
2506 		if (val & FW_IND_EVENT_PENDING)
2507 			break;
2508 
2509 		if (val & FW_IND_INITIALIZED)
2510 			break;
2511 
2512 		if (ar_pci->num_msi_intrs == 0)
2513 			/* Fix potential race by repeating CORE_BASE writes */
2514 			ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2515 					       PCIE_INTR_FIRMWARE_MASK |
2516 					       PCIE_INTR_CE_MASK_ALL);
2517 
2518 		mdelay(10);
2519 	} while (time_before(jiffies, timeout));
2520 
2521 	if (val == 0xffffffff) {
2522 		ath10k_err("failed to read device register, device is gone\n");
2523 		ret = -EIO;
2524 		goto out;
2525 	}
2526 
2527 	if (val & FW_IND_EVENT_PENDING) {
2528 		ath10k_warn("device has crashed during init\n");
2529 		ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2530 				   val & ~FW_IND_EVENT_PENDING);
2531 		ath10k_pci_hif_dump_area(ar);
2532 		ret = -ECOMM;
2533 		goto out;
2534 	}
2535 
2536 	if (!(val & FW_IND_INITIALIZED)) {
2537 		ath10k_err("failed to receive initialized event from target: %08x\n",
2538 			   val);
2539 		ret = -ETIMEDOUT;
2540 		goto out;
2541 	}
2542 
2543 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
2544 
2545 out:
2546 	ath10k_pci_sleep(ar);
2547 	return ret;
2548 }
2549 
2550 static int ath10k_pci_cold_reset(struct ath10k *ar)
2551 {
2552 	int i, ret;
2553 	u32 val;
2554 
2555 	ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
2556 
2557 	ret = ath10k_do_pci_wake(ar);
2558 	if (ret) {
2559 		ath10k_err("failed to wake up target: %d\n",
2560 			   ret);
2561 		return ret;
2562 	}
2563 
2564 	/* Put Target, including PCIe, into RESET. */
2565 	val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2566 	val |= 1;
2567 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2568 
2569 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2570 		if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2571 					  RTC_STATE_COLD_RESET_MASK)
2572 			break;
2573 		msleep(1);
2574 	}
2575 
2576 	/* Pull Target, including PCIe, out of RESET. */
2577 	val &= ~1;
2578 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2579 
2580 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2581 		if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2582 					    RTC_STATE_COLD_RESET_MASK))
2583 			break;
2584 		msleep(1);
2585 	}
2586 
2587 	ath10k_do_pci_sleep(ar);
2588 
2589 	ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
2590 
2591 	return 0;
2592 }
2593 
2594 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2595 {
2596 	int i;
2597 
2598 	for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2599 		if (!test_bit(i, ar_pci->features))
2600 			continue;
2601 
2602 		switch (i) {
2603 		case ATH10K_PCI_FEATURE_MSI_X:
2604 			ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2605 			break;
2606 		case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2607 			ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2608 			break;
2609 		}
2610 	}
2611 }
2612 
2613 static int ath10k_pci_probe(struct pci_dev *pdev,
2614 			    const struct pci_device_id *pci_dev)
2615 {
2616 	void __iomem *mem;
2617 	int ret = 0;
2618 	struct ath10k *ar;
2619 	struct ath10k_pci *ar_pci;
2620 	u32 lcr_val, chip_id;
2621 
2622 	ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
2623 
2624 	ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2625 	if (ar_pci == NULL)
2626 		return -ENOMEM;
2627 
2628 	ar_pci->pdev = pdev;
2629 	ar_pci->dev = &pdev->dev;
2630 
2631 	switch (pci_dev->device) {
2632 	case QCA988X_2_0_DEVICE_ID:
2633 		set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2634 		break;
2635 	default:
2636 		ret = -ENODEV;
2637 		ath10k_err("Unknown device ID: %d\n", pci_dev->device);
2638 		goto err_ar_pci;
2639 	}
2640 
2641 	if (ath10k_pci_target_ps)
2642 		set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2643 
2644 	ath10k_pci_dump_features(ar_pci);
2645 
2646 	ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2647 	if (!ar) {
2648 		ath10k_err("failed to create driver core\n");
2649 		ret = -EINVAL;
2650 		goto err_ar_pci;
2651 	}
2652 
2653 	ar_pci->ar = ar;
2654 	atomic_set(&ar_pci->keep_awake_count, 0);
2655 
2656 	pci_set_drvdata(pdev, ar);
2657 
2658 	ret = pci_enable_device(pdev);
2659 	if (ret) {
2660 		ath10k_err("failed to enable PCI device: %d\n", ret);
2661 		goto err_ar;
2662 	}
2663 
2664 	/* Request MMIO resources */
2665 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2666 	if (ret) {
2667 		ath10k_err("failed to request MMIO region: %d\n", ret);
2668 		goto err_device;
2669 	}
2670 
2671 	/*
2672 	 * Target structures have a limit of 32 bit DMA pointers.
2673 	 * DMA pointers can be wider than 32 bits by default on some systems.
2674 	 */
2675 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2676 	if (ret) {
2677 		ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2678 		goto err_region;
2679 	}
2680 
2681 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2682 	if (ret) {
2683 		ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2684 		goto err_region;
2685 	}
2686 
2687 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2688 	pci_set_master(pdev);
2689 
2690 	/*
2691 	 * Temporary FIX: disable ASPM
2692 	 * Will be removed after the OTP is programmed
2693 	 */
2694 	pci_read_config_dword(pdev, 0x80, &lcr_val);
2695 	pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2696 
2697 	/* Arrange for access to Target SoC registers. */
2698 	mem = pci_iomap(pdev, BAR_NUM, 0);
2699 	if (!mem) {
2700 		ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2701 		ret = -EIO;
2702 		goto err_master;
2703 	}
2704 
2705 	ar_pci->mem = mem;
2706 
2707 	spin_lock_init(&ar_pci->ce_lock);
2708 
2709 	ret = ath10k_do_pci_wake(ar);
2710 	if (ret) {
2711 		ath10k_err("Failed to get chip id: %d\n", ret);
2712 		goto err_iomap;
2713 	}
2714 
2715 	chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2716 
2717 	ath10k_do_pci_sleep(ar);
2718 
2719 	ret = ath10k_pci_alloc_ce(ar);
2720 	if (ret) {
2721 		ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
2722 		goto err_iomap;
2723 	}
2724 
2725 	ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2726 
2727 	ret = ath10k_core_register(ar, chip_id);
2728 	if (ret) {
2729 		ath10k_err("failed to register driver core: %d\n", ret);
2730 		goto err_free_ce;
2731 	}
2732 
2733 	return 0;
2734 
2735 err_free_ce:
2736 	ath10k_pci_free_ce(ar);
2737 err_iomap:
2738 	pci_iounmap(pdev, mem);
2739 err_master:
2740 	pci_clear_master(pdev);
2741 err_region:
2742 	pci_release_region(pdev, BAR_NUM);
2743 err_device:
2744 	pci_disable_device(pdev);
2745 err_ar:
2746 	ath10k_core_destroy(ar);
2747 err_ar_pci:
2748 	/* call HIF PCI free here */
2749 	kfree(ar_pci);
2750 
2751 	return ret;
2752 }
2753 
2754 static void ath10k_pci_remove(struct pci_dev *pdev)
2755 {
2756 	struct ath10k *ar = pci_get_drvdata(pdev);
2757 	struct ath10k_pci *ar_pci;
2758 
2759 	ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
2760 
2761 	if (!ar)
2762 		return;
2763 
2764 	ar_pci = ath10k_pci_priv(ar);
2765 
2766 	if (!ar_pci)
2767 		return;
2768 
2769 	ath10k_core_unregister(ar);
2770 	ath10k_pci_free_ce(ar);
2771 
2772 	pci_iounmap(pdev, ar_pci->mem);
2773 	pci_release_region(pdev, BAR_NUM);
2774 	pci_clear_master(pdev);
2775 	pci_disable_device(pdev);
2776 
2777 	ath10k_core_destroy(ar);
2778 	kfree(ar_pci);
2779 }
2780 
2781 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2782 
2783 static struct pci_driver ath10k_pci_driver = {
2784 	.name = "ath10k_pci",
2785 	.id_table = ath10k_pci_id_table,
2786 	.probe = ath10k_pci_probe,
2787 	.remove = ath10k_pci_remove,
2788 };
2789 
2790 static int __init ath10k_pci_init(void)
2791 {
2792 	int ret;
2793 
2794 	ret = pci_register_driver(&ath10k_pci_driver);
2795 	if (ret)
2796 		ath10k_err("failed to register PCI driver: %d\n", ret);
2797 
2798 	return ret;
2799 }
2800 module_init(ath10k_pci_init);
2801 
2802 static void __exit ath10k_pci_exit(void)
2803 {
2804 	pci_unregister_driver(&ath10k_pci_driver);
2805 }
2806 
2807 module_exit(ath10k_pci_exit);
2808 
2809 MODULE_AUTHOR("Qualcomm Atheros");
2810 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2811 MODULE_LICENSE("Dual BSD/GPL");
2812 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
2813 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2814