xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/pci.c (revision d2999e1b)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
23 
24 #include "core.h"
25 #include "debug.h"
26 
27 #include "targaddrs.h"
28 #include "bmi.h"
29 
30 #include "hif.h"
31 #include "htc.h"
32 
33 #include "ce.h"
34 #include "pci.h"
35 
36 enum ath10k_pci_irq_mode {
37 	ATH10K_PCI_IRQ_AUTO = 0,
38 	ATH10K_PCI_IRQ_LEGACY = 1,
39 	ATH10K_PCI_IRQ_MSI = 2,
40 };
41 
42 enum ath10k_pci_reset_mode {
43 	ATH10K_PCI_RESET_AUTO = 0,
44 	ATH10K_PCI_RESET_WARM_ONLY = 1,
45 };
46 
47 static unsigned int ath10k_pci_target_ps;
48 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
49 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
50 
51 module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
52 MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
53 
54 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
55 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
56 
57 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
58 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
59 
60 /* how long wait to wait for target to initialise, in ms */
61 #define ATH10K_PCI_TARGET_WAIT 3000
62 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
63 
64 #define QCA988X_2_0_DEVICE_ID	(0x003c)
65 
66 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
67 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
68 	{0}
69 };
70 
71 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
72 				       u32 *data);
73 
74 static int ath10k_pci_post_rx(struct ath10k *ar);
75 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
76 					     int num);
77 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
78 static int ath10k_pci_cold_reset(struct ath10k *ar);
79 static int ath10k_pci_warm_reset(struct ath10k *ar);
80 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
81 static int ath10k_pci_init_irq(struct ath10k *ar);
82 static int ath10k_pci_deinit_irq(struct ath10k *ar);
83 static int ath10k_pci_request_irq(struct ath10k *ar);
84 static void ath10k_pci_free_irq(struct ath10k *ar);
85 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
86 			       struct ath10k_ce_pipe *rx_pipe,
87 			       struct bmi_xfer *xfer);
88 
89 static const struct ce_attr host_ce_config_wlan[] = {
90 	/* CE0: host->target HTC control and raw streams */
91 	{
92 		.flags = CE_ATTR_FLAGS,
93 		.src_nentries = 16,
94 		.src_sz_max = 256,
95 		.dest_nentries = 0,
96 	},
97 
98 	/* CE1: target->host HTT + HTC control */
99 	{
100 		.flags = CE_ATTR_FLAGS,
101 		.src_nentries = 0,
102 		.src_sz_max = 512,
103 		.dest_nentries = 512,
104 	},
105 
106 	/* CE2: target->host WMI */
107 	{
108 		.flags = CE_ATTR_FLAGS,
109 		.src_nentries = 0,
110 		.src_sz_max = 2048,
111 		.dest_nentries = 32,
112 	},
113 
114 	/* CE3: host->target WMI */
115 	{
116 		.flags = CE_ATTR_FLAGS,
117 		.src_nentries = 32,
118 		.src_sz_max = 2048,
119 		.dest_nentries = 0,
120 	},
121 
122 	/* CE4: host->target HTT */
123 	{
124 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
125 		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
126 		.src_sz_max = 256,
127 		.dest_nentries = 0,
128 	},
129 
130 	/* CE5: unused */
131 	{
132 		.flags = CE_ATTR_FLAGS,
133 		.src_nentries = 0,
134 		.src_sz_max = 0,
135 		.dest_nentries = 0,
136 	},
137 
138 	/* CE6: target autonomous hif_memcpy */
139 	{
140 		.flags = CE_ATTR_FLAGS,
141 		.src_nentries = 0,
142 		.src_sz_max = 0,
143 		.dest_nentries = 0,
144 	},
145 
146 	/* CE7: ce_diag, the Diagnostic Window */
147 	{
148 		.flags = CE_ATTR_FLAGS,
149 		.src_nentries = 2,
150 		.src_sz_max = DIAG_TRANSFER_LIMIT,
151 		.dest_nentries = 2,
152 	},
153 };
154 
155 /* Target firmware's Copy Engine configuration. */
156 static const struct ce_pipe_config target_ce_config_wlan[] = {
157 	/* CE0: host->target HTC control and raw streams */
158 	{
159 		.pipenum = 0,
160 		.pipedir = PIPEDIR_OUT,
161 		.nentries = 32,
162 		.nbytes_max = 256,
163 		.flags = CE_ATTR_FLAGS,
164 		.reserved = 0,
165 	},
166 
167 	/* CE1: target->host HTT + HTC control */
168 	{
169 		.pipenum = 1,
170 		.pipedir = PIPEDIR_IN,
171 		.nentries = 32,
172 		.nbytes_max = 512,
173 		.flags = CE_ATTR_FLAGS,
174 		.reserved = 0,
175 	},
176 
177 	/* CE2: target->host WMI */
178 	{
179 		.pipenum = 2,
180 		.pipedir = PIPEDIR_IN,
181 		.nentries = 32,
182 		.nbytes_max = 2048,
183 		.flags = CE_ATTR_FLAGS,
184 		.reserved = 0,
185 	},
186 
187 	/* CE3: host->target WMI */
188 	{
189 		.pipenum = 3,
190 		.pipedir = PIPEDIR_OUT,
191 		.nentries = 32,
192 		.nbytes_max = 2048,
193 		.flags = CE_ATTR_FLAGS,
194 		.reserved = 0,
195 	},
196 
197 	/* CE4: host->target HTT */
198 	{
199 		.pipenum = 4,
200 		.pipedir = PIPEDIR_OUT,
201 		.nentries = 256,
202 		.nbytes_max = 256,
203 		.flags = CE_ATTR_FLAGS,
204 		.reserved = 0,
205 	},
206 
207 	/* NB: 50% of src nentries, since tx has 2 frags */
208 
209 	/* CE5: unused */
210 	{
211 		.pipenum = 5,
212 		.pipedir = PIPEDIR_OUT,
213 		.nentries = 32,
214 		.nbytes_max = 2048,
215 		.flags = CE_ATTR_FLAGS,
216 		.reserved = 0,
217 	},
218 
219 	/* CE6: Reserved for target autonomous hif_memcpy */
220 	{
221 		.pipenum = 6,
222 		.pipedir = PIPEDIR_INOUT,
223 		.nentries = 32,
224 		.nbytes_max = 4096,
225 		.flags = CE_ATTR_FLAGS,
226 		.reserved = 0,
227 	},
228 
229 	/* CE7 used only by Host */
230 };
231 
232 static bool ath10k_pci_irq_pending(struct ath10k *ar)
233 {
234 	u32 cause;
235 
236 	/* Check if the shared legacy irq is for us */
237 	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
238 				  PCIE_INTR_CAUSE_ADDRESS);
239 	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
240 		return true;
241 
242 	return false;
243 }
244 
245 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
246 {
247 	/* IMPORTANT: INTR_CLR register has to be set after
248 	 * INTR_ENABLE is set to 0, otherwise interrupt can not be
249 	 * really cleared. */
250 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
251 			   0);
252 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
253 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
254 
255 	/* IMPORTANT: this extra read transaction is required to
256 	 * flush the posted write buffer. */
257 	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
258 				 PCIE_INTR_ENABLE_ADDRESS);
259 }
260 
261 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
262 {
263 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
264 			   PCIE_INTR_ENABLE_ADDRESS,
265 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
266 
267 	/* IMPORTANT: this extra read transaction is required to
268 	 * flush the posted write buffer. */
269 	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
270 				 PCIE_INTR_ENABLE_ADDRESS);
271 }
272 
273 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
274 {
275 	struct ath10k *ar = arg;
276 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
277 
278 	if (ar_pci->num_msi_intrs == 0) {
279 		if (!ath10k_pci_irq_pending(ar))
280 			return IRQ_NONE;
281 
282 		ath10k_pci_disable_and_clear_legacy_irq(ar);
283 	}
284 
285 	tasklet_schedule(&ar_pci->early_irq_tasklet);
286 
287 	return IRQ_HANDLED;
288 }
289 
290 static int ath10k_pci_request_early_irq(struct ath10k *ar)
291 {
292 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
293 	int ret;
294 
295 	/* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
296 	 * interrupt from irq vector is triggered in all cases for FW
297 	 * indication/errors */
298 	ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
299 			  IRQF_SHARED, "ath10k_pci (early)", ar);
300 	if (ret) {
301 		ath10k_warn("failed to request early irq: %d\n", ret);
302 		return ret;
303 	}
304 
305 	return 0;
306 }
307 
308 static void ath10k_pci_free_early_irq(struct ath10k *ar)
309 {
310 	free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
311 }
312 
313 /*
314  * Diagnostic read/write access is provided for startup/config/debug usage.
315  * Caller must guarantee proper alignment, when applicable, and single user
316  * at any moment.
317  */
318 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
319 				    int nbytes)
320 {
321 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
322 	int ret = 0;
323 	u32 buf;
324 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
325 	unsigned int id;
326 	unsigned int flags;
327 	struct ath10k_ce_pipe *ce_diag;
328 	/* Host buffer address in CE space */
329 	u32 ce_data;
330 	dma_addr_t ce_data_base = 0;
331 	void *data_buf = NULL;
332 	int i;
333 
334 	/*
335 	 * This code cannot handle reads to non-memory space. Redirect to the
336 	 * register read fn but preserve the multi word read capability of
337 	 * this fn
338 	 */
339 	if (address < DRAM_BASE_ADDRESS) {
340 		if (!IS_ALIGNED(address, 4) ||
341 		    !IS_ALIGNED((unsigned long)data, 4))
342 			return -EIO;
343 
344 		while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
345 					   ar, address, (u32 *)data)) == 0)) {
346 			nbytes -= sizeof(u32);
347 			address += sizeof(u32);
348 			data += sizeof(u32);
349 		}
350 		return ret;
351 	}
352 
353 	ce_diag = ar_pci->ce_diag;
354 
355 	/*
356 	 * Allocate a temporary bounce buffer to hold caller's data
357 	 * to be DMA'ed from Target. This guarantees
358 	 *   1) 4-byte alignment
359 	 *   2) Buffer in DMA-able space
360 	 */
361 	orig_nbytes = nbytes;
362 	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
363 						       orig_nbytes,
364 						       &ce_data_base,
365 						       GFP_ATOMIC);
366 
367 	if (!data_buf) {
368 		ret = -ENOMEM;
369 		goto done;
370 	}
371 	memset(data_buf, 0, orig_nbytes);
372 
373 	remaining_bytes = orig_nbytes;
374 	ce_data = ce_data_base;
375 	while (remaining_bytes) {
376 		nbytes = min_t(unsigned int, remaining_bytes,
377 			       DIAG_TRANSFER_LIMIT);
378 
379 		ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
380 		if (ret != 0)
381 			goto done;
382 
383 		/* Request CE to send from Target(!) address to Host buffer */
384 		/*
385 		 * The address supplied by the caller is in the
386 		 * Target CPU virtual address space.
387 		 *
388 		 * In order to use this address with the diagnostic CE,
389 		 * convert it from Target CPU virtual address space
390 		 * to CE address space
391 		 */
392 		ath10k_pci_wake(ar);
393 		address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
394 						     address);
395 		ath10k_pci_sleep(ar);
396 
397 		ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
398 				 0);
399 		if (ret)
400 			goto done;
401 
402 		i = 0;
403 		while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
404 						     &completed_nbytes,
405 						     &id) != 0) {
406 			mdelay(1);
407 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
408 				ret = -EBUSY;
409 				goto done;
410 			}
411 		}
412 
413 		if (nbytes != completed_nbytes) {
414 			ret = -EIO;
415 			goto done;
416 		}
417 
418 		if (buf != (u32) address) {
419 			ret = -EIO;
420 			goto done;
421 		}
422 
423 		i = 0;
424 		while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
425 						     &completed_nbytes,
426 						     &id, &flags) != 0) {
427 			mdelay(1);
428 
429 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
430 				ret = -EBUSY;
431 				goto done;
432 			}
433 		}
434 
435 		if (nbytes != completed_nbytes) {
436 			ret = -EIO;
437 			goto done;
438 		}
439 
440 		if (buf != ce_data) {
441 			ret = -EIO;
442 			goto done;
443 		}
444 
445 		remaining_bytes -= nbytes;
446 		address += nbytes;
447 		ce_data += nbytes;
448 	}
449 
450 done:
451 	if (ret == 0) {
452 		/* Copy data from allocated DMA buf to caller's buf */
453 		WARN_ON_ONCE(orig_nbytes & 3);
454 		for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
455 			((u32 *)data)[i] =
456 				__le32_to_cpu(((__le32 *)data_buf)[i]);
457 		}
458 	} else
459 		ath10k_warn("failed to read diag value at 0x%x: %d\n",
460 			    address, ret);
461 
462 	if (data_buf)
463 		dma_free_coherent(ar->dev, orig_nbytes, data_buf,
464 				  ce_data_base);
465 
466 	return ret;
467 }
468 
469 /* Read 4-byte aligned data from Target memory or register */
470 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
471 				       u32 *data)
472 {
473 	/* Assume range doesn't cross this boundary */
474 	if (address >= DRAM_BASE_ADDRESS)
475 		return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
476 
477 	ath10k_pci_wake(ar);
478 	*data = ath10k_pci_read32(ar, address);
479 	ath10k_pci_sleep(ar);
480 	return 0;
481 }
482 
483 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
484 				     const void *data, int nbytes)
485 {
486 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
487 	int ret = 0;
488 	u32 buf;
489 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
490 	unsigned int id;
491 	unsigned int flags;
492 	struct ath10k_ce_pipe *ce_diag;
493 	void *data_buf = NULL;
494 	u32 ce_data;	/* Host buffer address in CE space */
495 	dma_addr_t ce_data_base = 0;
496 	int i;
497 
498 	ce_diag = ar_pci->ce_diag;
499 
500 	/*
501 	 * Allocate a temporary bounce buffer to hold caller's data
502 	 * to be DMA'ed to Target. This guarantees
503 	 *   1) 4-byte alignment
504 	 *   2) Buffer in DMA-able space
505 	 */
506 	orig_nbytes = nbytes;
507 	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
508 						       orig_nbytes,
509 						       &ce_data_base,
510 						       GFP_ATOMIC);
511 	if (!data_buf) {
512 		ret = -ENOMEM;
513 		goto done;
514 	}
515 
516 	/* Copy caller's data to allocated DMA buf */
517 	WARN_ON_ONCE(orig_nbytes & 3);
518 	for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
519 		((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
520 
521 	/*
522 	 * The address supplied by the caller is in the
523 	 * Target CPU virtual address space.
524 	 *
525 	 * In order to use this address with the diagnostic CE,
526 	 * convert it from
527 	 *    Target CPU virtual address space
528 	 * to
529 	 *    CE address space
530 	 */
531 	ath10k_pci_wake(ar);
532 	address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
533 	ath10k_pci_sleep(ar);
534 
535 	remaining_bytes = orig_nbytes;
536 	ce_data = ce_data_base;
537 	while (remaining_bytes) {
538 		/* FIXME: check cast */
539 		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
540 
541 		/* Set up to receive directly into Target(!) address */
542 		ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
543 		if (ret != 0)
544 			goto done;
545 
546 		/*
547 		 * Request CE to send caller-supplied data that
548 		 * was copied to bounce buffer to Target(!) address.
549 		 */
550 		ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
551 				     nbytes, 0, 0);
552 		if (ret != 0)
553 			goto done;
554 
555 		i = 0;
556 		while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
557 						     &completed_nbytes,
558 						     &id) != 0) {
559 			mdelay(1);
560 
561 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
562 				ret = -EBUSY;
563 				goto done;
564 			}
565 		}
566 
567 		if (nbytes != completed_nbytes) {
568 			ret = -EIO;
569 			goto done;
570 		}
571 
572 		if (buf != ce_data) {
573 			ret = -EIO;
574 			goto done;
575 		}
576 
577 		i = 0;
578 		while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
579 						     &completed_nbytes,
580 						     &id, &flags) != 0) {
581 			mdelay(1);
582 
583 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
584 				ret = -EBUSY;
585 				goto done;
586 			}
587 		}
588 
589 		if (nbytes != completed_nbytes) {
590 			ret = -EIO;
591 			goto done;
592 		}
593 
594 		if (buf != address) {
595 			ret = -EIO;
596 			goto done;
597 		}
598 
599 		remaining_bytes -= nbytes;
600 		address += nbytes;
601 		ce_data += nbytes;
602 	}
603 
604 done:
605 	if (data_buf) {
606 		dma_free_coherent(ar->dev, orig_nbytes, data_buf,
607 				  ce_data_base);
608 	}
609 
610 	if (ret != 0)
611 		ath10k_warn("failed to write diag value at 0x%x: %d\n",
612 			    address, ret);
613 
614 	return ret;
615 }
616 
617 /* Write 4B data to Target memory or register */
618 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
619 					u32 data)
620 {
621 	/* Assume range doesn't cross this boundary */
622 	if (address >= DRAM_BASE_ADDRESS)
623 		return ath10k_pci_diag_write_mem(ar, address, &data,
624 						 sizeof(u32));
625 
626 	ath10k_pci_wake(ar);
627 	ath10k_pci_write32(ar, address, data);
628 	ath10k_pci_sleep(ar);
629 	return 0;
630 }
631 
632 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
633 {
634 	void __iomem *mem = ath10k_pci_priv(ar)->mem;
635 	u32 val;
636 	val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
637 		       RTC_STATE_ADDRESS);
638 	return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
639 }
640 
641 int ath10k_do_pci_wake(struct ath10k *ar)
642 {
643 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
644 	void __iomem *pci_addr = ar_pci->mem;
645 	int tot_delay = 0;
646 	int curr_delay = 5;
647 
648 	if (atomic_read(&ar_pci->keep_awake_count) == 0) {
649 		/* Force AWAKE */
650 		iowrite32(PCIE_SOC_WAKE_V_MASK,
651 			  pci_addr + PCIE_LOCAL_BASE_ADDRESS +
652 			  PCIE_SOC_WAKE_ADDRESS);
653 	}
654 	atomic_inc(&ar_pci->keep_awake_count);
655 
656 	if (ar_pci->verified_awake)
657 		return 0;
658 
659 	for (;;) {
660 		if (ath10k_pci_target_is_awake(ar)) {
661 			ar_pci->verified_awake = true;
662 			return 0;
663 		}
664 
665 		if (tot_delay > PCIE_WAKE_TIMEOUT) {
666 			ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
667 				    PCIE_WAKE_TIMEOUT,
668 				    atomic_read(&ar_pci->keep_awake_count));
669 			return -ETIMEDOUT;
670 		}
671 
672 		udelay(curr_delay);
673 		tot_delay += curr_delay;
674 
675 		if (curr_delay < 50)
676 			curr_delay += 5;
677 	}
678 }
679 
680 void ath10k_do_pci_sleep(struct ath10k *ar)
681 {
682 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
683 	void __iomem *pci_addr = ar_pci->mem;
684 
685 	if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
686 		/* Allow sleep */
687 		ar_pci->verified_awake = false;
688 		iowrite32(PCIE_SOC_WAKE_RESET,
689 			  pci_addr + PCIE_LOCAL_BASE_ADDRESS +
690 			  PCIE_SOC_WAKE_ADDRESS);
691 	}
692 }
693 
694 /* Called by lower (CE) layer when a send to Target completes. */
695 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
696 {
697 	struct ath10k *ar = ce_state->ar;
698 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
699 	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
700 	void *transfer_context;
701 	u32 ce_data;
702 	unsigned int nbytes;
703 	unsigned int transfer_id;
704 
705 	while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
706 					     &ce_data, &nbytes,
707 					     &transfer_id) == 0) {
708 		/* no need to call tx completion for NULL pointers */
709 		if (transfer_context == NULL)
710 			continue;
711 
712 		cb->tx_completion(ar, transfer_context, transfer_id);
713 	}
714 }
715 
716 /* Called by lower (CE) layer when data is received from the Target. */
717 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
718 {
719 	struct ath10k *ar = ce_state->ar;
720 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
721 	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
722 	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
723 	struct sk_buff *skb;
724 	void *transfer_context;
725 	u32 ce_data;
726 	unsigned int nbytes, max_nbytes;
727 	unsigned int transfer_id;
728 	unsigned int flags;
729 	int err;
730 
731 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
732 					     &ce_data, &nbytes, &transfer_id,
733 					     &flags) == 0) {
734 		err = ath10k_pci_post_rx_pipe(pipe_info, 1);
735 		if (unlikely(err)) {
736 			/* FIXME: retry */
737 			ath10k_warn("failed to replenish CE rx ring %d: %d\n",
738 				    pipe_info->pipe_num, err);
739 		}
740 
741 		skb = transfer_context;
742 		max_nbytes = skb->len + skb_tailroom(skb);
743 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
744 				 max_nbytes, DMA_FROM_DEVICE);
745 
746 		if (unlikely(max_nbytes < nbytes)) {
747 			ath10k_warn("rxed more than expected (nbytes %d, max %d)",
748 				    nbytes, max_nbytes);
749 			dev_kfree_skb_any(skb);
750 			continue;
751 		}
752 
753 		skb_put(skb, nbytes);
754 		cb->rx_completion(ar, skb, pipe_info->pipe_num);
755 	}
756 }
757 
758 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
759 				struct ath10k_hif_sg_item *items, int n_items)
760 {
761 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
762 	struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
763 	struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
764 	struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
765 	unsigned int nentries_mask;
766 	unsigned int sw_index;
767 	unsigned int write_index;
768 	int err, i = 0;
769 
770 	spin_lock_bh(&ar_pci->ce_lock);
771 
772 	nentries_mask = src_ring->nentries_mask;
773 	sw_index = src_ring->sw_index;
774 	write_index = src_ring->write_index;
775 
776 	if (unlikely(CE_RING_DELTA(nentries_mask,
777 				   write_index, sw_index - 1) < n_items)) {
778 		err = -ENOBUFS;
779 		goto err;
780 	}
781 
782 	for (i = 0; i < n_items - 1; i++) {
783 		ath10k_dbg(ATH10K_DBG_PCI,
784 			   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
785 			   i, items[i].paddr, items[i].len, n_items);
786 		ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
787 				items[i].vaddr, items[i].len);
788 
789 		err = ath10k_ce_send_nolock(ce_pipe,
790 					    items[i].transfer_context,
791 					    items[i].paddr,
792 					    items[i].len,
793 					    items[i].transfer_id,
794 					    CE_SEND_FLAG_GATHER);
795 		if (err)
796 			goto err;
797 	}
798 
799 	/* `i` is equal to `n_items -1` after for() */
800 
801 	ath10k_dbg(ATH10K_DBG_PCI,
802 		   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
803 		   i, items[i].paddr, items[i].len, n_items);
804 	ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
805 			items[i].vaddr, items[i].len);
806 
807 	err = ath10k_ce_send_nolock(ce_pipe,
808 				    items[i].transfer_context,
809 				    items[i].paddr,
810 				    items[i].len,
811 				    items[i].transfer_id,
812 				    0);
813 	if (err)
814 		goto err;
815 
816 	spin_unlock_bh(&ar_pci->ce_lock);
817 	return 0;
818 
819 err:
820 	for (; i > 0; i--)
821 		__ath10k_ce_send_revert(ce_pipe);
822 
823 	spin_unlock_bh(&ar_pci->ce_lock);
824 	return err;
825 }
826 
827 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
828 {
829 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
830 
831 	ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
832 
833 	return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
834 }
835 
836 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
837 {
838 	u32 reg_dump_area = 0;
839 	u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
840 	u32 host_addr;
841 	int ret;
842 	u32 i;
843 
844 	ath10k_err("firmware crashed!\n");
845 	ath10k_err("hardware name %s version 0x%x\n",
846 		   ar->hw_params.name, ar->target_version);
847 	ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
848 
849 	host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
850 	ret = ath10k_pci_diag_read_mem(ar, host_addr,
851 				       &reg_dump_area, sizeof(u32));
852 	if (ret) {
853 		ath10k_err("failed to read FW dump area address: %d\n", ret);
854 		return;
855 	}
856 
857 	ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
858 
859 	ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
860 				       &reg_dump_values[0],
861 				       REG_DUMP_COUNT_QCA988X * sizeof(u32));
862 	if (ret != 0) {
863 		ath10k_err("failed to read FW dump area: %d\n", ret);
864 		return;
865 	}
866 
867 	BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
868 
869 	ath10k_err("target Register Dump\n");
870 	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
871 		ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
872 			   i,
873 			   reg_dump_values[i],
874 			   reg_dump_values[i + 1],
875 			   reg_dump_values[i + 2],
876 			   reg_dump_values[i + 3]);
877 
878 	queue_work(ar->workqueue, &ar->restart_work);
879 }
880 
881 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
882 					       int force)
883 {
884 	ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
885 
886 	if (!force) {
887 		int resources;
888 		/*
889 		 * Decide whether to actually poll for completions, or just
890 		 * wait for a later chance.
891 		 * If there seem to be plenty of resources left, then just wait
892 		 * since checking involves reading a CE register, which is a
893 		 * relatively expensive operation.
894 		 */
895 		resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
896 
897 		/*
898 		 * If at least 50% of the total resources are still available,
899 		 * don't bother checking again yet.
900 		 */
901 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
902 			return;
903 	}
904 	ath10k_ce_per_engine_service(ar, pipe);
905 }
906 
907 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
908 					 struct ath10k_hif_cb *callbacks)
909 {
910 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
911 
912 	ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
913 
914 	memcpy(&ar_pci->msg_callbacks_current, callbacks,
915 	       sizeof(ar_pci->msg_callbacks_current));
916 }
917 
918 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
919 {
920 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
921 	const struct ce_attr *attr;
922 	struct ath10k_pci_pipe *pipe_info;
923 	int pipe_num, disable_interrupts;
924 
925 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
926 		pipe_info = &ar_pci->pipe_info[pipe_num];
927 
928 		/* Handle Diagnostic CE specially */
929 		if (pipe_info->ce_hdl == ar_pci->ce_diag)
930 			continue;
931 
932 		attr = &host_ce_config_wlan[pipe_num];
933 
934 		if (attr->src_nentries) {
935 			disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
936 			ath10k_ce_send_cb_register(pipe_info->ce_hdl,
937 						   ath10k_pci_ce_send_done,
938 						   disable_interrupts);
939 		}
940 
941 		if (attr->dest_nentries)
942 			ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
943 						   ath10k_pci_ce_recv_data);
944 	}
945 
946 	return 0;
947 }
948 
949 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
950 {
951 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
952 	int i;
953 
954 	tasklet_kill(&ar_pci->intr_tq);
955 	tasklet_kill(&ar_pci->msi_fw_err);
956 	tasklet_kill(&ar_pci->early_irq_tasklet);
957 
958 	for (i = 0; i < CE_COUNT; i++)
959 		tasklet_kill(&ar_pci->pipe_info[i].intr);
960 }
961 
962 /* TODO - temporary mapping while we have too few CE's */
963 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
964 					      u16 service_id, u8 *ul_pipe,
965 					      u8 *dl_pipe, int *ul_is_polled,
966 					      int *dl_is_polled)
967 {
968 	int ret = 0;
969 
970 	ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
971 
972 	/* polling for received messages not supported */
973 	*dl_is_polled = 0;
974 
975 	switch (service_id) {
976 	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
977 		/*
978 		 * Host->target HTT gets its own pipe, so it can be polled
979 		 * while other pipes are interrupt driven.
980 		 */
981 		*ul_pipe = 4;
982 		/*
983 		 * Use the same target->host pipe for HTC ctrl, HTC raw
984 		 * streams, and HTT.
985 		 */
986 		*dl_pipe = 1;
987 		break;
988 
989 	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
990 	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
991 		/*
992 		 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
993 		 * HTC_CTRL_RSVD_SVC could share the same pipe as the
994 		 * WMI services.  So, if another CE is needed, change
995 		 * this to *ul_pipe = 3, which frees up CE 0.
996 		 */
997 		/* *ul_pipe = 3; */
998 		*ul_pipe = 0;
999 		*dl_pipe = 1;
1000 		break;
1001 
1002 	case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1003 	case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1004 	case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1005 	case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1006 
1007 	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1008 		*ul_pipe = 3;
1009 		*dl_pipe = 2;
1010 		break;
1011 
1012 		/* pipe 5 unused   */
1013 		/* pipe 6 reserved */
1014 		/* pipe 7 reserved */
1015 
1016 	default:
1017 		ret = -1;
1018 		break;
1019 	}
1020 	*ul_is_polled =
1021 		(host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1022 
1023 	return ret;
1024 }
1025 
1026 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1027 						u8 *ul_pipe, u8 *dl_pipe)
1028 {
1029 	int ul_is_polled, dl_is_polled;
1030 
1031 	ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
1032 
1033 	(void)ath10k_pci_hif_map_service_to_pipe(ar,
1034 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1035 						 ul_pipe,
1036 						 dl_pipe,
1037 						 &ul_is_polled,
1038 						 &dl_is_polled);
1039 }
1040 
1041 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1042 				   int num)
1043 {
1044 	struct ath10k *ar = pipe_info->hif_ce_state;
1045 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1046 	struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1047 	struct sk_buff *skb;
1048 	dma_addr_t ce_data;
1049 	int i, ret = 0;
1050 
1051 	if (pipe_info->buf_sz == 0)
1052 		return 0;
1053 
1054 	for (i = 0; i < num; i++) {
1055 		skb = dev_alloc_skb(pipe_info->buf_sz);
1056 		if (!skb) {
1057 			ath10k_warn("failed to allocate skbuff for pipe %d\n",
1058 				    num);
1059 			ret = -ENOMEM;
1060 			goto err;
1061 		}
1062 
1063 		WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1064 
1065 		ce_data = dma_map_single(ar->dev, skb->data,
1066 					 skb->len + skb_tailroom(skb),
1067 					 DMA_FROM_DEVICE);
1068 
1069 		if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1070 			ath10k_warn("failed to DMA map sk_buff\n");
1071 			dev_kfree_skb_any(skb);
1072 			ret = -EIO;
1073 			goto err;
1074 		}
1075 
1076 		ATH10K_SKB_CB(skb)->paddr = ce_data;
1077 
1078 		pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1079 					       pipe_info->buf_sz,
1080 					       PCI_DMA_FROMDEVICE);
1081 
1082 		ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1083 						 ce_data);
1084 		if (ret) {
1085 			ath10k_warn("failed to enqueue to pipe %d: %d\n",
1086 				    num, ret);
1087 			goto err;
1088 		}
1089 	}
1090 
1091 	return ret;
1092 
1093 err:
1094 	ath10k_pci_rx_pipe_cleanup(pipe_info);
1095 	return ret;
1096 }
1097 
1098 static int ath10k_pci_post_rx(struct ath10k *ar)
1099 {
1100 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1101 	struct ath10k_pci_pipe *pipe_info;
1102 	const struct ce_attr *attr;
1103 	int pipe_num, ret = 0;
1104 
1105 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1106 		pipe_info = &ar_pci->pipe_info[pipe_num];
1107 		attr = &host_ce_config_wlan[pipe_num];
1108 
1109 		if (attr->dest_nentries == 0)
1110 			continue;
1111 
1112 		ret = ath10k_pci_post_rx_pipe(pipe_info,
1113 					      attr->dest_nentries - 1);
1114 		if (ret) {
1115 			ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1116 				    pipe_num, ret);
1117 
1118 			for (; pipe_num >= 0; pipe_num--) {
1119 				pipe_info = &ar_pci->pipe_info[pipe_num];
1120 				ath10k_pci_rx_pipe_cleanup(pipe_info);
1121 			}
1122 			return ret;
1123 		}
1124 	}
1125 
1126 	return 0;
1127 }
1128 
1129 static int ath10k_pci_hif_start(struct ath10k *ar)
1130 {
1131 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1132 	int ret, ret_early;
1133 
1134 	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1135 
1136 	ath10k_pci_free_early_irq(ar);
1137 	ath10k_pci_kill_tasklet(ar);
1138 
1139 	ret = ath10k_pci_request_irq(ar);
1140 	if (ret) {
1141 		ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1142 			    ret);
1143 		goto err_early_irq;
1144 	}
1145 
1146 	ret = ath10k_pci_setup_ce_irq(ar);
1147 	if (ret) {
1148 		ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1149 		goto err_stop;
1150 	}
1151 
1152 	/* Post buffers once to start things off. */
1153 	ret = ath10k_pci_post_rx(ar);
1154 	if (ret) {
1155 		ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1156 			    ret);
1157 		goto err_stop;
1158 	}
1159 
1160 	ar_pci->started = 1;
1161 	return 0;
1162 
1163 err_stop:
1164 	ath10k_ce_disable_interrupts(ar);
1165 	ath10k_pci_free_irq(ar);
1166 	ath10k_pci_kill_tasklet(ar);
1167 err_early_irq:
1168 	/* Though there should be no interrupts (device was reset)
1169 	 * power_down() expects the early IRQ to be installed as per the
1170 	 * driver lifecycle. */
1171 	ret_early = ath10k_pci_request_early_irq(ar);
1172 	if (ret_early)
1173 		ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1174 
1175 	return ret;
1176 }
1177 
1178 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1179 {
1180 	struct ath10k *ar;
1181 	struct ath10k_pci *ar_pci;
1182 	struct ath10k_ce_pipe *ce_hdl;
1183 	u32 buf_sz;
1184 	struct sk_buff *netbuf;
1185 	u32 ce_data;
1186 
1187 	buf_sz = pipe_info->buf_sz;
1188 
1189 	/* Unused Copy Engine */
1190 	if (buf_sz == 0)
1191 		return;
1192 
1193 	ar = pipe_info->hif_ce_state;
1194 	ar_pci = ath10k_pci_priv(ar);
1195 
1196 	if (!ar_pci->started)
1197 		return;
1198 
1199 	ce_hdl = pipe_info->ce_hdl;
1200 
1201 	while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1202 					  &ce_data) == 0) {
1203 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1204 				 netbuf->len + skb_tailroom(netbuf),
1205 				 DMA_FROM_DEVICE);
1206 		dev_kfree_skb_any(netbuf);
1207 	}
1208 }
1209 
1210 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1211 {
1212 	struct ath10k *ar;
1213 	struct ath10k_pci *ar_pci;
1214 	struct ath10k_ce_pipe *ce_hdl;
1215 	struct sk_buff *netbuf;
1216 	u32 ce_data;
1217 	unsigned int nbytes;
1218 	unsigned int id;
1219 	u32 buf_sz;
1220 
1221 	buf_sz = pipe_info->buf_sz;
1222 
1223 	/* Unused Copy Engine */
1224 	if (buf_sz == 0)
1225 		return;
1226 
1227 	ar = pipe_info->hif_ce_state;
1228 	ar_pci = ath10k_pci_priv(ar);
1229 
1230 	if (!ar_pci->started)
1231 		return;
1232 
1233 	ce_hdl = pipe_info->ce_hdl;
1234 
1235 	while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1236 					  &ce_data, &nbytes, &id) == 0) {
1237 		/* no need to call tx completion for NULL pointers */
1238 		if (!netbuf)
1239 			continue;
1240 
1241 		ar_pci->msg_callbacks_current.tx_completion(ar,
1242 							    netbuf,
1243 							    id);
1244 	}
1245 }
1246 
1247 /*
1248  * Cleanup residual buffers for device shutdown:
1249  *    buffers that were enqueued for receive
1250  *    buffers that were to be sent
1251  * Note: Buffers that had completed but which were
1252  * not yet processed are on a completion queue. They
1253  * are handled when the completion thread shuts down.
1254  */
1255 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1256 {
1257 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1258 	int pipe_num;
1259 
1260 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1261 		struct ath10k_pci_pipe *pipe_info;
1262 
1263 		pipe_info = &ar_pci->pipe_info[pipe_num];
1264 		ath10k_pci_rx_pipe_cleanup(pipe_info);
1265 		ath10k_pci_tx_pipe_cleanup(pipe_info);
1266 	}
1267 }
1268 
1269 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1270 {
1271 	int i;
1272 
1273 	for (i = 0; i < CE_COUNT; i++)
1274 		ath10k_ce_deinit_pipe(ar, i);
1275 }
1276 
1277 static void ath10k_pci_hif_stop(struct ath10k *ar)
1278 {
1279 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1280 	int ret;
1281 
1282 	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
1283 
1284 	if (WARN_ON(!ar_pci->started))
1285 		return;
1286 
1287 	ret = ath10k_ce_disable_interrupts(ar);
1288 	if (ret)
1289 		ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1290 
1291 	ath10k_pci_free_irq(ar);
1292 	ath10k_pci_kill_tasklet(ar);
1293 
1294 	ret = ath10k_pci_request_early_irq(ar);
1295 	if (ret)
1296 		ath10k_warn("failed to re-enable early irq: %d\n", ret);
1297 
1298 	/* At this point, asynchronous threads are stopped, the target should
1299 	 * not DMA nor interrupt. We process the leftovers and then free
1300 	 * everything else up. */
1301 
1302 	ath10k_pci_buffer_cleanup(ar);
1303 
1304 	/* Make the sure the device won't access any structures on the host by
1305 	 * resetting it. The device was fed with PCI CE ringbuffer
1306 	 * configuration during init. If ringbuffers are freed and the device
1307 	 * were to access them this could lead to memory corruption on the
1308 	 * host. */
1309 	ath10k_pci_warm_reset(ar);
1310 
1311 	ar_pci->started = 0;
1312 }
1313 
1314 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1315 					   void *req, u32 req_len,
1316 					   void *resp, u32 *resp_len)
1317 {
1318 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1319 	struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1320 	struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1321 	struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1322 	struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1323 	dma_addr_t req_paddr = 0;
1324 	dma_addr_t resp_paddr = 0;
1325 	struct bmi_xfer xfer = {};
1326 	void *treq, *tresp = NULL;
1327 	int ret = 0;
1328 
1329 	might_sleep();
1330 
1331 	if (resp && !resp_len)
1332 		return -EINVAL;
1333 
1334 	if (resp && resp_len && *resp_len == 0)
1335 		return -EINVAL;
1336 
1337 	treq = kmemdup(req, req_len, GFP_KERNEL);
1338 	if (!treq)
1339 		return -ENOMEM;
1340 
1341 	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1342 	ret = dma_mapping_error(ar->dev, req_paddr);
1343 	if (ret)
1344 		goto err_dma;
1345 
1346 	if (resp && resp_len) {
1347 		tresp = kzalloc(*resp_len, GFP_KERNEL);
1348 		if (!tresp) {
1349 			ret = -ENOMEM;
1350 			goto err_req;
1351 		}
1352 
1353 		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1354 					    DMA_FROM_DEVICE);
1355 		ret = dma_mapping_error(ar->dev, resp_paddr);
1356 		if (ret)
1357 			goto err_req;
1358 
1359 		xfer.wait_for_resp = true;
1360 		xfer.resp_len = 0;
1361 
1362 		ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1363 	}
1364 
1365 	init_completion(&xfer.done);
1366 
1367 	ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1368 	if (ret)
1369 		goto err_resp;
1370 
1371 	ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1372 	if (ret) {
1373 		u32 unused_buffer;
1374 		unsigned int unused_nbytes;
1375 		unsigned int unused_id;
1376 
1377 		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1378 					   &unused_nbytes, &unused_id);
1379 	} else {
1380 		/* non-zero means we did not time out */
1381 		ret = 0;
1382 	}
1383 
1384 err_resp:
1385 	if (resp) {
1386 		u32 unused_buffer;
1387 
1388 		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1389 		dma_unmap_single(ar->dev, resp_paddr,
1390 				 *resp_len, DMA_FROM_DEVICE);
1391 	}
1392 err_req:
1393 	dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1394 
1395 	if (ret == 0 && resp_len) {
1396 		*resp_len = min(*resp_len, xfer.resp_len);
1397 		memcpy(resp, tresp, xfer.resp_len);
1398 	}
1399 err_dma:
1400 	kfree(treq);
1401 	kfree(tresp);
1402 
1403 	return ret;
1404 }
1405 
1406 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1407 {
1408 	struct bmi_xfer *xfer;
1409 	u32 ce_data;
1410 	unsigned int nbytes;
1411 	unsigned int transfer_id;
1412 
1413 	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1414 					  &nbytes, &transfer_id))
1415 		return;
1416 
1417 	if (xfer->wait_for_resp)
1418 		return;
1419 
1420 	complete(&xfer->done);
1421 }
1422 
1423 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1424 {
1425 	struct bmi_xfer *xfer;
1426 	u32 ce_data;
1427 	unsigned int nbytes;
1428 	unsigned int transfer_id;
1429 	unsigned int flags;
1430 
1431 	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1432 					  &nbytes, &transfer_id, &flags))
1433 		return;
1434 
1435 	if (!xfer->wait_for_resp) {
1436 		ath10k_warn("unexpected: BMI data received; ignoring\n");
1437 		return;
1438 	}
1439 
1440 	xfer->resp_len = nbytes;
1441 	complete(&xfer->done);
1442 }
1443 
1444 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1445 			       struct ath10k_ce_pipe *rx_pipe,
1446 			       struct bmi_xfer *xfer)
1447 {
1448 	unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1449 
1450 	while (time_before_eq(jiffies, timeout)) {
1451 		ath10k_pci_bmi_send_done(tx_pipe);
1452 		ath10k_pci_bmi_recv_data(rx_pipe);
1453 
1454 		if (completion_done(&xfer->done))
1455 			return 0;
1456 
1457 		schedule();
1458 	}
1459 
1460 	return -ETIMEDOUT;
1461 }
1462 
1463 /*
1464  * Map from service/endpoint to Copy Engine.
1465  * This table is derived from the CE_PCI TABLE, above.
1466  * It is passed to the Target at startup for use by firmware.
1467  */
1468 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1469 	{
1470 		 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1471 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1472 		 3,
1473 	},
1474 	{
1475 		 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1476 		 PIPEDIR_IN,		/* in = DL = target -> host */
1477 		 2,
1478 	},
1479 	{
1480 		 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1481 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1482 		 3,
1483 	},
1484 	{
1485 		 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1486 		 PIPEDIR_IN,		/* in = DL = target -> host */
1487 		 2,
1488 	},
1489 	{
1490 		 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1491 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1492 		 3,
1493 	},
1494 	{
1495 		 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1496 		 PIPEDIR_IN,		/* in = DL = target -> host */
1497 		 2,
1498 	},
1499 	{
1500 		 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1501 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1502 		 3,
1503 	},
1504 	{
1505 		 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1506 		 PIPEDIR_IN,		/* in = DL = target -> host */
1507 		 2,
1508 	},
1509 	{
1510 		 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1511 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1512 		 3,
1513 	},
1514 	{
1515 		 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1516 		 PIPEDIR_IN,		/* in = DL = target -> host */
1517 		 2,
1518 	},
1519 	{
1520 		 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1521 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1522 		 0,		/* could be moved to 3 (share with WMI) */
1523 	},
1524 	{
1525 		 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1526 		 PIPEDIR_IN,		/* in = DL = target -> host */
1527 		 1,
1528 	},
1529 	{
1530 		 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,	/* not currently used */
1531 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1532 		 0,
1533 	},
1534 	{
1535 		 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,	/* not currently used */
1536 		 PIPEDIR_IN,		/* in = DL = target -> host */
1537 		 1,
1538 	},
1539 	{
1540 		 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1541 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1542 		 4,
1543 	},
1544 	{
1545 		 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1546 		 PIPEDIR_IN,		/* in = DL = target -> host */
1547 		 1,
1548 	},
1549 
1550 	/* (Additions here) */
1551 
1552 	{				/* Must be last */
1553 		 0,
1554 		 0,
1555 		 0,
1556 	},
1557 };
1558 
1559 /*
1560  * Send an interrupt to the device to wake up the Target CPU
1561  * so it has an opportunity to notice any changed state.
1562  */
1563 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1564 {
1565 	int ret;
1566 	u32 core_ctrl;
1567 
1568 	ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1569 					      CORE_CTRL_ADDRESS,
1570 					  &core_ctrl);
1571 	if (ret) {
1572 		ath10k_warn("failed to read core_ctrl: %d\n", ret);
1573 		return ret;
1574 	}
1575 
1576 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1577 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1578 
1579 	ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1580 					       CORE_CTRL_ADDRESS,
1581 					   core_ctrl);
1582 	if (ret) {
1583 		ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1584 			    ret);
1585 		return ret;
1586 	}
1587 
1588 	return 0;
1589 }
1590 
1591 static int ath10k_pci_init_config(struct ath10k *ar)
1592 {
1593 	u32 interconnect_targ_addr;
1594 	u32 pcie_state_targ_addr = 0;
1595 	u32 pipe_cfg_targ_addr = 0;
1596 	u32 svc_to_pipe_map = 0;
1597 	u32 pcie_config_flags = 0;
1598 	u32 ealloc_value;
1599 	u32 ealloc_targ_addr;
1600 	u32 flag2_value;
1601 	u32 flag2_targ_addr;
1602 	int ret = 0;
1603 
1604 	/* Download to Target the CE Config and the service-to-CE map */
1605 	interconnect_targ_addr =
1606 		host_interest_item_address(HI_ITEM(hi_interconnect_state));
1607 
1608 	/* Supply Target-side CE configuration */
1609 	ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1610 					  &pcie_state_targ_addr);
1611 	if (ret != 0) {
1612 		ath10k_err("Failed to get pcie state addr: %d\n", ret);
1613 		return ret;
1614 	}
1615 
1616 	if (pcie_state_targ_addr == 0) {
1617 		ret = -EIO;
1618 		ath10k_err("Invalid pcie state addr\n");
1619 		return ret;
1620 	}
1621 
1622 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1623 					  offsetof(struct pcie_state,
1624 						   pipe_cfg_addr),
1625 					  &pipe_cfg_targ_addr);
1626 	if (ret != 0) {
1627 		ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1628 		return ret;
1629 	}
1630 
1631 	if (pipe_cfg_targ_addr == 0) {
1632 		ret = -EIO;
1633 		ath10k_err("Invalid pipe cfg addr\n");
1634 		return ret;
1635 	}
1636 
1637 	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1638 				 target_ce_config_wlan,
1639 				 sizeof(target_ce_config_wlan));
1640 
1641 	if (ret != 0) {
1642 		ath10k_err("Failed to write pipe cfg: %d\n", ret);
1643 		return ret;
1644 	}
1645 
1646 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1647 					  offsetof(struct pcie_state,
1648 						   svc_to_pipe_map),
1649 					  &svc_to_pipe_map);
1650 	if (ret != 0) {
1651 		ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1652 		return ret;
1653 	}
1654 
1655 	if (svc_to_pipe_map == 0) {
1656 		ret = -EIO;
1657 		ath10k_err("Invalid svc_to_pipe map\n");
1658 		return ret;
1659 	}
1660 
1661 	ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1662 				 target_service_to_ce_map_wlan,
1663 				 sizeof(target_service_to_ce_map_wlan));
1664 	if (ret != 0) {
1665 		ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1666 		return ret;
1667 	}
1668 
1669 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1670 					  offsetof(struct pcie_state,
1671 						   config_flags),
1672 					  &pcie_config_flags);
1673 	if (ret != 0) {
1674 		ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1675 		return ret;
1676 	}
1677 
1678 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1679 
1680 	ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1681 				 offsetof(struct pcie_state, config_flags),
1682 				 &pcie_config_flags,
1683 				 sizeof(pcie_config_flags));
1684 	if (ret != 0) {
1685 		ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1686 		return ret;
1687 	}
1688 
1689 	/* configure early allocation */
1690 	ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1691 
1692 	ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1693 	if (ret != 0) {
1694 		ath10k_err("Faile to get early alloc val: %d\n", ret);
1695 		return ret;
1696 	}
1697 
1698 	/* first bank is switched to IRAM */
1699 	ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1700 			 HI_EARLY_ALLOC_MAGIC_MASK);
1701 	ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1702 			 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1703 
1704 	ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1705 	if (ret != 0) {
1706 		ath10k_err("Failed to set early alloc val: %d\n", ret);
1707 		return ret;
1708 	}
1709 
1710 	/* Tell Target to proceed with initialization */
1711 	flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1712 
1713 	ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1714 	if (ret != 0) {
1715 		ath10k_err("Failed to get option val: %d\n", ret);
1716 		return ret;
1717 	}
1718 
1719 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1720 
1721 	ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1722 	if (ret != 0) {
1723 		ath10k_err("Failed to set option val: %d\n", ret);
1724 		return ret;
1725 	}
1726 
1727 	return 0;
1728 }
1729 
1730 static int ath10k_pci_alloc_ce(struct ath10k *ar)
1731 {
1732 	int i, ret;
1733 
1734 	for (i = 0; i < CE_COUNT; i++) {
1735 		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1736 		if (ret) {
1737 			ath10k_err("failed to allocate copy engine pipe %d: %d\n",
1738 				   i, ret);
1739 			return ret;
1740 		}
1741 	}
1742 
1743 	return 0;
1744 }
1745 
1746 static void ath10k_pci_free_ce(struct ath10k *ar)
1747 {
1748 	int i;
1749 
1750 	for (i = 0; i < CE_COUNT; i++)
1751 		ath10k_ce_free_pipe(ar, i);
1752 }
1753 
1754 static int ath10k_pci_ce_init(struct ath10k *ar)
1755 {
1756 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1757 	struct ath10k_pci_pipe *pipe_info;
1758 	const struct ce_attr *attr;
1759 	int pipe_num, ret;
1760 
1761 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1762 		pipe_info = &ar_pci->pipe_info[pipe_num];
1763 		pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
1764 		pipe_info->pipe_num = pipe_num;
1765 		pipe_info->hif_ce_state = ar;
1766 		attr = &host_ce_config_wlan[pipe_num];
1767 
1768 		ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
1769 		if (ret) {
1770 			ath10k_err("failed to initialize copy engine pipe %d: %d\n",
1771 				   pipe_num, ret);
1772 			return ret;
1773 		}
1774 
1775 		if (pipe_num == CE_COUNT - 1) {
1776 			/*
1777 			 * Reserve the ultimate CE for
1778 			 * diagnostic Window support
1779 			 */
1780 			ar_pci->ce_diag = pipe_info->ce_hdl;
1781 			continue;
1782 		}
1783 
1784 		pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1785 	}
1786 
1787 	return 0;
1788 }
1789 
1790 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1791 {
1792 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1793 	u32 fw_indicator;
1794 
1795 	ath10k_pci_wake(ar);
1796 
1797 	fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1798 
1799 	if (fw_indicator & FW_IND_EVENT_PENDING) {
1800 		/* ACK: clear Target-side pending event */
1801 		ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
1802 				   fw_indicator & ~FW_IND_EVENT_PENDING);
1803 
1804 		if (ar_pci->started) {
1805 			ath10k_pci_hif_dump_area(ar);
1806 		} else {
1807 			/*
1808 			 * Probable Target failure before we're prepared
1809 			 * to handle it.  Generally unexpected.
1810 			 */
1811 			ath10k_warn("early firmware event indicated\n");
1812 		}
1813 	}
1814 
1815 	ath10k_pci_sleep(ar);
1816 }
1817 
1818 /* this function effectively clears target memory controller assert line */
1819 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1820 {
1821 	u32 val;
1822 
1823 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1824 	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1825 			       val | SOC_RESET_CONTROL_SI0_RST_MASK);
1826 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1827 
1828 	msleep(10);
1829 
1830 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1831 	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1832 			       val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1833 	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1834 
1835 	msleep(10);
1836 }
1837 
1838 static int ath10k_pci_warm_reset(struct ath10k *ar)
1839 {
1840 	int ret = 0;
1841 	u32 val;
1842 
1843 	ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
1844 
1845 	ret = ath10k_do_pci_wake(ar);
1846 	if (ret) {
1847 		ath10k_err("failed to wake up target: %d\n", ret);
1848 		return ret;
1849 	}
1850 
1851 	/* debug */
1852 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1853 				PCIE_INTR_CAUSE_ADDRESS);
1854 	ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1855 
1856 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1857 				CPU_INTR_ADDRESS);
1858 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1859 		   val);
1860 
1861 	/* disable pending irqs */
1862 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1863 			   PCIE_INTR_ENABLE_ADDRESS, 0);
1864 
1865 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1866 			   PCIE_INTR_CLR_ADDRESS, ~0);
1867 
1868 	msleep(100);
1869 
1870 	/* clear fw indicator */
1871 	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1872 
1873 	/* clear target LF timer interrupts */
1874 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1875 				SOC_LF_TIMER_CONTROL0_ADDRESS);
1876 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1877 			   SOC_LF_TIMER_CONTROL0_ADDRESS,
1878 			   val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1879 
1880 	/* reset CE */
1881 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1882 				SOC_RESET_CONTROL_ADDRESS);
1883 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1884 			   val | SOC_RESET_CONTROL_CE_RST_MASK);
1885 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1886 				SOC_RESET_CONTROL_ADDRESS);
1887 	msleep(10);
1888 
1889 	/* unreset CE */
1890 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1891 			   val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1892 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1893 				SOC_RESET_CONTROL_ADDRESS);
1894 	msleep(10);
1895 
1896 	ath10k_pci_warm_reset_si0(ar);
1897 
1898 	/* debug */
1899 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1900 				PCIE_INTR_CAUSE_ADDRESS);
1901 	ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1902 
1903 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1904 				CPU_INTR_ADDRESS);
1905 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1906 		   val);
1907 
1908 	/* CPU warm reset */
1909 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1910 				SOC_RESET_CONTROL_ADDRESS);
1911 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1912 			   val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1913 
1914 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1915 				SOC_RESET_CONTROL_ADDRESS);
1916 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1917 
1918 	msleep(100);
1919 
1920 	ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1921 
1922 	ath10k_do_pci_sleep(ar);
1923 	return ret;
1924 }
1925 
1926 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1927 {
1928 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1929 	const char *irq_mode;
1930 	int ret;
1931 
1932 	/*
1933 	 * Bring the target up cleanly.
1934 	 *
1935 	 * The target may be in an undefined state with an AUX-powered Target
1936 	 * and a Host in WoW mode. If the Host crashes, loses power, or is
1937 	 * restarted (without unloading the driver) then the Target is left
1938 	 * (aux) powered and running. On a subsequent driver load, the Target
1939 	 * is in an unexpected state. We try to catch that here in order to
1940 	 * reset the Target and retry the probe.
1941 	 */
1942 	if (cold_reset)
1943 		ret = ath10k_pci_cold_reset(ar);
1944 	else
1945 		ret = ath10k_pci_warm_reset(ar);
1946 
1947 	if (ret) {
1948 		ath10k_err("failed to reset target: %d\n", ret);
1949 		goto err;
1950 	}
1951 
1952 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1953 		/* Force AWAKE forever */
1954 		ath10k_do_pci_wake(ar);
1955 
1956 	ret = ath10k_pci_ce_init(ar);
1957 	if (ret) {
1958 		ath10k_err("failed to initialize CE: %d\n", ret);
1959 		goto err_ps;
1960 	}
1961 
1962 	ret = ath10k_ce_disable_interrupts(ar);
1963 	if (ret) {
1964 		ath10k_err("failed to disable CE interrupts: %d\n", ret);
1965 		goto err_ce;
1966 	}
1967 
1968 	ret = ath10k_pci_init_irq(ar);
1969 	if (ret) {
1970 		ath10k_err("failed to init irqs: %d\n", ret);
1971 		goto err_ce;
1972 	}
1973 
1974 	ret = ath10k_pci_request_early_irq(ar);
1975 	if (ret) {
1976 		ath10k_err("failed to request early irq: %d\n", ret);
1977 		goto err_deinit_irq;
1978 	}
1979 
1980 	ret = ath10k_pci_wait_for_target_init(ar);
1981 	if (ret) {
1982 		ath10k_err("failed to wait for target to init: %d\n", ret);
1983 		goto err_free_early_irq;
1984 	}
1985 
1986 	ret = ath10k_pci_init_config(ar);
1987 	if (ret) {
1988 		ath10k_err("failed to setup init config: %d\n", ret);
1989 		goto err_free_early_irq;
1990 	}
1991 
1992 	ret = ath10k_pci_wake_target_cpu(ar);
1993 	if (ret) {
1994 		ath10k_err("could not wake up target CPU: %d\n", ret);
1995 		goto err_free_early_irq;
1996 	}
1997 
1998 	if (ar_pci->num_msi_intrs > 1)
1999 		irq_mode = "MSI-X";
2000 	else if (ar_pci->num_msi_intrs == 1)
2001 		irq_mode = "MSI";
2002 	else
2003 		irq_mode = "legacy";
2004 
2005 	if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2006 		ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
2007 			    irq_mode, ath10k_pci_irq_mode,
2008 			    ath10k_pci_reset_mode);
2009 
2010 	return 0;
2011 
2012 err_free_early_irq:
2013 	ath10k_pci_free_early_irq(ar);
2014 err_deinit_irq:
2015 	ath10k_pci_deinit_irq(ar);
2016 err_ce:
2017 	ath10k_pci_ce_deinit(ar);
2018 	ath10k_pci_warm_reset(ar);
2019 err_ps:
2020 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2021 		ath10k_do_pci_sleep(ar);
2022 err:
2023 	return ret;
2024 }
2025 
2026 static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
2027 {
2028 	int i, ret;
2029 
2030 	/*
2031 	 * Sometime warm reset succeeds after retries.
2032 	 *
2033 	 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
2034 	 * at first try.
2035 	 */
2036 	for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2037 		ret = __ath10k_pci_hif_power_up(ar, false);
2038 		if (ret == 0)
2039 			break;
2040 
2041 		ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
2042 			    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
2043 	}
2044 
2045 	return ret;
2046 }
2047 
2048 static int ath10k_pci_hif_power_up(struct ath10k *ar)
2049 {
2050 	int ret;
2051 
2052 	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
2053 
2054 	/*
2055 	 * Hardware CUS232 version 2 has some issues with cold reset and the
2056 	 * preferred (and safer) way to perform a device reset is through a
2057 	 * warm reset.
2058 	 *
2059 	 * Warm reset doesn't always work though so fall back to cold reset may
2060 	 * be necessary.
2061 	 */
2062 	ret = ath10k_pci_hif_power_up_warm(ar);
2063 	if (ret) {
2064 		ath10k_warn("failed to power up target using warm reset: %d\n",
2065 			    ret);
2066 
2067 		if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
2068 			return ret;
2069 
2070 		ath10k_warn("trying cold reset\n");
2071 
2072 		ret = __ath10k_pci_hif_power_up(ar, true);
2073 		if (ret) {
2074 			ath10k_err("failed to power up target using cold reset too (%d)\n",
2075 				   ret);
2076 			return ret;
2077 		}
2078 	}
2079 
2080 	return 0;
2081 }
2082 
2083 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2084 {
2085 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2086 
2087 	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
2088 
2089 	ath10k_pci_free_early_irq(ar);
2090 	ath10k_pci_kill_tasklet(ar);
2091 	ath10k_pci_deinit_irq(ar);
2092 	ath10k_pci_ce_deinit(ar);
2093 	ath10k_pci_warm_reset(ar);
2094 
2095 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2096 		ath10k_do_pci_sleep(ar);
2097 }
2098 
2099 #ifdef CONFIG_PM
2100 
2101 #define ATH10K_PCI_PM_CONTROL 0x44
2102 
2103 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2104 {
2105 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2106 	struct pci_dev *pdev = ar_pci->pdev;
2107 	u32 val;
2108 
2109 	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2110 
2111 	if ((val & 0x000000ff) != 0x3) {
2112 		pci_save_state(pdev);
2113 		pci_disable_device(pdev);
2114 		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2115 				       (val & 0xffffff00) | 0x03);
2116 	}
2117 
2118 	return 0;
2119 }
2120 
2121 static int ath10k_pci_hif_resume(struct ath10k *ar)
2122 {
2123 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2124 	struct pci_dev *pdev = ar_pci->pdev;
2125 	u32 val;
2126 
2127 	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2128 
2129 	if ((val & 0x000000ff) != 0) {
2130 		pci_restore_state(pdev);
2131 		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2132 				       val & 0xffffff00);
2133 		/*
2134 		 * Suspend/Resume resets the PCI configuration space,
2135 		 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2136 		 * to keep PCI Tx retries from interfering with C3 CPU state
2137 		 */
2138 		pci_read_config_dword(pdev, 0x40, &val);
2139 
2140 		if ((val & 0x0000ff00) != 0)
2141 			pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2142 	}
2143 
2144 	return 0;
2145 }
2146 #endif
2147 
2148 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2149 	.tx_sg			= ath10k_pci_hif_tx_sg,
2150 	.exchange_bmi_msg	= ath10k_pci_hif_exchange_bmi_msg,
2151 	.start			= ath10k_pci_hif_start,
2152 	.stop			= ath10k_pci_hif_stop,
2153 	.map_service_to_pipe	= ath10k_pci_hif_map_service_to_pipe,
2154 	.get_default_pipe	= ath10k_pci_hif_get_default_pipe,
2155 	.send_complete_check	= ath10k_pci_hif_send_complete_check,
2156 	.set_callbacks		= ath10k_pci_hif_set_callbacks,
2157 	.get_free_queue_number	= ath10k_pci_hif_get_free_queue_number,
2158 	.power_up		= ath10k_pci_hif_power_up,
2159 	.power_down		= ath10k_pci_hif_power_down,
2160 #ifdef CONFIG_PM
2161 	.suspend		= ath10k_pci_hif_suspend,
2162 	.resume			= ath10k_pci_hif_resume,
2163 #endif
2164 };
2165 
2166 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2167 {
2168 	struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2169 	struct ath10k_pci *ar_pci = pipe->ar_pci;
2170 
2171 	ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2172 }
2173 
2174 static void ath10k_msi_err_tasklet(unsigned long data)
2175 {
2176 	struct ath10k *ar = (struct ath10k *)data;
2177 
2178 	ath10k_pci_fw_interrupt_handler(ar);
2179 }
2180 
2181 /*
2182  * Handler for a per-engine interrupt on a PARTICULAR CE.
2183  * This is used in cases where each CE has a private MSI interrupt.
2184  */
2185 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2186 {
2187 	struct ath10k *ar = arg;
2188 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2189 	int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2190 
2191 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2192 		ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2193 		return IRQ_HANDLED;
2194 	}
2195 
2196 	/*
2197 	 * NOTE: We are able to derive ce_id from irq because we
2198 	 * use a one-to-one mapping for CE's 0..5.
2199 	 * CE's 6 & 7 do not use interrupts at all.
2200 	 *
2201 	 * This mapping must be kept in sync with the mapping
2202 	 * used by firmware.
2203 	 */
2204 	tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2205 	return IRQ_HANDLED;
2206 }
2207 
2208 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2209 {
2210 	struct ath10k *ar = arg;
2211 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2212 
2213 	tasklet_schedule(&ar_pci->msi_fw_err);
2214 	return IRQ_HANDLED;
2215 }
2216 
2217 /*
2218  * Top-level interrupt handler for all PCI interrupts from a Target.
2219  * When a block of MSI interrupts is allocated, this top-level handler
2220  * is not used; instead, we directly call the correct sub-handler.
2221  */
2222 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2223 {
2224 	struct ath10k *ar = arg;
2225 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2226 
2227 	if (ar_pci->num_msi_intrs == 0) {
2228 		if (!ath10k_pci_irq_pending(ar))
2229 			return IRQ_NONE;
2230 
2231 		ath10k_pci_disable_and_clear_legacy_irq(ar);
2232 	}
2233 
2234 	tasklet_schedule(&ar_pci->intr_tq);
2235 
2236 	return IRQ_HANDLED;
2237 }
2238 
2239 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2240 {
2241 	struct ath10k *ar = (struct ath10k *)data;
2242 	u32 fw_ind;
2243 	int ret;
2244 
2245 	ret = ath10k_pci_wake(ar);
2246 	if (ret) {
2247 		ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2248 			    ret);
2249 		return;
2250 	}
2251 
2252 	fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2253 	if (fw_ind & FW_IND_EVENT_PENDING) {
2254 		ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2255 				   fw_ind & ~FW_IND_EVENT_PENDING);
2256 		ath10k_pci_hif_dump_area(ar);
2257 	}
2258 
2259 	ath10k_pci_sleep(ar);
2260 	ath10k_pci_enable_legacy_irq(ar);
2261 }
2262 
2263 static void ath10k_pci_tasklet(unsigned long data)
2264 {
2265 	struct ath10k *ar = (struct ath10k *)data;
2266 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2267 
2268 	ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2269 	ath10k_ce_per_engine_service_any(ar);
2270 
2271 	/* Re-enable legacy irq that was disabled in the irq handler */
2272 	if (ar_pci->num_msi_intrs == 0)
2273 		ath10k_pci_enable_legacy_irq(ar);
2274 }
2275 
2276 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2277 {
2278 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2279 	int ret, i;
2280 
2281 	ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2282 			  ath10k_pci_msi_fw_handler,
2283 			  IRQF_SHARED, "ath10k_pci", ar);
2284 	if (ret) {
2285 		ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2286 			    ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2287 		return ret;
2288 	}
2289 
2290 	for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2291 		ret = request_irq(ar_pci->pdev->irq + i,
2292 				  ath10k_pci_per_engine_handler,
2293 				  IRQF_SHARED, "ath10k_pci", ar);
2294 		if (ret) {
2295 			ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2296 				    ar_pci->pdev->irq + i, ret);
2297 
2298 			for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2299 				free_irq(ar_pci->pdev->irq + i, ar);
2300 
2301 			free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2302 			return ret;
2303 		}
2304 	}
2305 
2306 	return 0;
2307 }
2308 
2309 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2310 {
2311 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2312 	int ret;
2313 
2314 	ret = request_irq(ar_pci->pdev->irq,
2315 			  ath10k_pci_interrupt_handler,
2316 			  IRQF_SHARED, "ath10k_pci", ar);
2317 	if (ret) {
2318 		ath10k_warn("failed to request MSI irq %d: %d\n",
2319 			    ar_pci->pdev->irq, ret);
2320 		return ret;
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2327 {
2328 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2329 	int ret;
2330 
2331 	ret = request_irq(ar_pci->pdev->irq,
2332 			  ath10k_pci_interrupt_handler,
2333 			  IRQF_SHARED, "ath10k_pci", ar);
2334 	if (ret) {
2335 		ath10k_warn("failed to request legacy irq %d: %d\n",
2336 			    ar_pci->pdev->irq, ret);
2337 		return ret;
2338 	}
2339 
2340 	return 0;
2341 }
2342 
2343 static int ath10k_pci_request_irq(struct ath10k *ar)
2344 {
2345 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2346 
2347 	switch (ar_pci->num_msi_intrs) {
2348 	case 0:
2349 		return ath10k_pci_request_irq_legacy(ar);
2350 	case 1:
2351 		return ath10k_pci_request_irq_msi(ar);
2352 	case MSI_NUM_REQUEST:
2353 		return ath10k_pci_request_irq_msix(ar);
2354 	}
2355 
2356 	ath10k_warn("unknown irq configuration upon request\n");
2357 	return -EINVAL;
2358 }
2359 
2360 static void ath10k_pci_free_irq(struct ath10k *ar)
2361 {
2362 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2363 	int i;
2364 
2365 	/* There's at least one interrupt irregardless whether its legacy INTR
2366 	 * or MSI or MSI-X */
2367 	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2368 		free_irq(ar_pci->pdev->irq + i, ar);
2369 }
2370 
2371 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2372 {
2373 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2374 	int i;
2375 
2376 	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2377 	tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2378 		     (unsigned long)ar);
2379 	tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2380 		     (unsigned long)ar);
2381 
2382 	for (i = 0; i < CE_COUNT; i++) {
2383 		ar_pci->pipe_info[i].ar_pci = ar_pci;
2384 		tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2385 			     (unsigned long)&ar_pci->pipe_info[i]);
2386 	}
2387 }
2388 
2389 static int ath10k_pci_init_irq(struct ath10k *ar)
2390 {
2391 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2392 	bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2393 				       ar_pci->features);
2394 	int ret;
2395 
2396 	ath10k_pci_init_irq_tasklets(ar);
2397 
2398 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2399 	    !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2400 		ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2401 
2402 	/* Try MSI-X */
2403 	if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2404 		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2405 		ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2406 							 ar_pci->num_msi_intrs);
2407 		if (ret > 0)
2408 			return 0;
2409 
2410 		/* fall-through */
2411 	}
2412 
2413 	/* Try MSI */
2414 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2415 		ar_pci->num_msi_intrs = 1;
2416 		ret = pci_enable_msi(ar_pci->pdev);
2417 		if (ret == 0)
2418 			return 0;
2419 
2420 		/* fall-through */
2421 	}
2422 
2423 	/* Try legacy irq
2424 	 *
2425 	 * A potential race occurs here: The CORE_BASE write
2426 	 * depends on target correctly decoding AXI address but
2427 	 * host won't know when target writes BAR to CORE_CTRL.
2428 	 * This write might get lost if target has NOT written BAR.
2429 	 * For now, fix the race by repeating the write in below
2430 	 * synchronization checking. */
2431 	ar_pci->num_msi_intrs = 0;
2432 
2433 	ret = ath10k_pci_wake(ar);
2434 	if (ret) {
2435 		ath10k_warn("failed to wake target: %d\n", ret);
2436 		return ret;
2437 	}
2438 
2439 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2440 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2441 	ath10k_pci_sleep(ar);
2442 
2443 	return 0;
2444 }
2445 
2446 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2447 {
2448 	int ret;
2449 
2450 	ret = ath10k_pci_wake(ar);
2451 	if (ret) {
2452 		ath10k_warn("failed to wake target: %d\n", ret);
2453 		return ret;
2454 	}
2455 
2456 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2457 			   0);
2458 	ath10k_pci_sleep(ar);
2459 
2460 	return 0;
2461 }
2462 
2463 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2464 {
2465 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2466 
2467 	switch (ar_pci->num_msi_intrs) {
2468 	case 0:
2469 		return ath10k_pci_deinit_irq_legacy(ar);
2470 	case 1:
2471 		/* fall-through */
2472 	case MSI_NUM_REQUEST:
2473 		pci_disable_msi(ar_pci->pdev);
2474 		return 0;
2475 	default:
2476 		pci_disable_msi(ar_pci->pdev);
2477 	}
2478 
2479 	ath10k_warn("unknown irq configuration upon deinit\n");
2480 	return -EINVAL;
2481 }
2482 
2483 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2484 {
2485 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2486 	unsigned long timeout;
2487 	int ret;
2488 	u32 val;
2489 
2490 	ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2491 
2492 	ret = ath10k_pci_wake(ar);
2493 	if (ret) {
2494 		ath10k_err("failed to wake up target for init: %d\n", ret);
2495 		return ret;
2496 	}
2497 
2498 	timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2499 
2500 	do {
2501 		val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2502 
2503 		ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
2504 
2505 		/* target should never return this */
2506 		if (val == 0xffffffff)
2507 			continue;
2508 
2509 		/* the device has crashed so don't bother trying anymore */
2510 		if (val & FW_IND_EVENT_PENDING)
2511 			break;
2512 
2513 		if (val & FW_IND_INITIALIZED)
2514 			break;
2515 
2516 		if (ar_pci->num_msi_intrs == 0)
2517 			/* Fix potential race by repeating CORE_BASE writes */
2518 			ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2519 					       PCIE_INTR_FIRMWARE_MASK |
2520 					       PCIE_INTR_CE_MASK_ALL);
2521 
2522 		mdelay(10);
2523 	} while (time_before(jiffies, timeout));
2524 
2525 	if (val == 0xffffffff) {
2526 		ath10k_err("failed to read device register, device is gone\n");
2527 		ret = -EIO;
2528 		goto out;
2529 	}
2530 
2531 	if (val & FW_IND_EVENT_PENDING) {
2532 		ath10k_warn("device has crashed during init\n");
2533 		ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2534 				   val & ~FW_IND_EVENT_PENDING);
2535 		ath10k_pci_hif_dump_area(ar);
2536 		ret = -ECOMM;
2537 		goto out;
2538 	}
2539 
2540 	if (!(val & FW_IND_INITIALIZED)) {
2541 		ath10k_err("failed to receive initialized event from target: %08x\n",
2542 			   val);
2543 		ret = -ETIMEDOUT;
2544 		goto out;
2545 	}
2546 
2547 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
2548 
2549 out:
2550 	ath10k_pci_sleep(ar);
2551 	return ret;
2552 }
2553 
2554 static int ath10k_pci_cold_reset(struct ath10k *ar)
2555 {
2556 	int i, ret;
2557 	u32 val;
2558 
2559 	ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
2560 
2561 	ret = ath10k_do_pci_wake(ar);
2562 	if (ret) {
2563 		ath10k_err("failed to wake up target: %d\n",
2564 			   ret);
2565 		return ret;
2566 	}
2567 
2568 	/* Put Target, including PCIe, into RESET. */
2569 	val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2570 	val |= 1;
2571 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2572 
2573 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2574 		if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2575 					  RTC_STATE_COLD_RESET_MASK)
2576 			break;
2577 		msleep(1);
2578 	}
2579 
2580 	/* Pull Target, including PCIe, out of RESET. */
2581 	val &= ~1;
2582 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2583 
2584 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2585 		if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2586 					    RTC_STATE_COLD_RESET_MASK))
2587 			break;
2588 		msleep(1);
2589 	}
2590 
2591 	ath10k_do_pci_sleep(ar);
2592 
2593 	ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
2594 
2595 	return 0;
2596 }
2597 
2598 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2599 {
2600 	int i;
2601 
2602 	for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2603 		if (!test_bit(i, ar_pci->features))
2604 			continue;
2605 
2606 		switch (i) {
2607 		case ATH10K_PCI_FEATURE_MSI_X:
2608 			ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2609 			break;
2610 		case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2611 			ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2612 			break;
2613 		}
2614 	}
2615 }
2616 
2617 static int ath10k_pci_probe(struct pci_dev *pdev,
2618 			    const struct pci_device_id *pci_dev)
2619 {
2620 	void __iomem *mem;
2621 	int ret = 0;
2622 	struct ath10k *ar;
2623 	struct ath10k_pci *ar_pci;
2624 	u32 lcr_val, chip_id;
2625 
2626 	ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
2627 
2628 	ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2629 	if (ar_pci == NULL)
2630 		return -ENOMEM;
2631 
2632 	ar_pci->pdev = pdev;
2633 	ar_pci->dev = &pdev->dev;
2634 
2635 	switch (pci_dev->device) {
2636 	case QCA988X_2_0_DEVICE_ID:
2637 		set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2638 		break;
2639 	default:
2640 		ret = -ENODEV;
2641 		ath10k_err("Unknown device ID: %d\n", pci_dev->device);
2642 		goto err_ar_pci;
2643 	}
2644 
2645 	if (ath10k_pci_target_ps)
2646 		set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2647 
2648 	ath10k_pci_dump_features(ar_pci);
2649 
2650 	ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2651 	if (!ar) {
2652 		ath10k_err("failed to create driver core\n");
2653 		ret = -EINVAL;
2654 		goto err_ar_pci;
2655 	}
2656 
2657 	ar_pci->ar = ar;
2658 	atomic_set(&ar_pci->keep_awake_count, 0);
2659 
2660 	pci_set_drvdata(pdev, ar);
2661 
2662 	ret = pci_enable_device(pdev);
2663 	if (ret) {
2664 		ath10k_err("failed to enable PCI device: %d\n", ret);
2665 		goto err_ar;
2666 	}
2667 
2668 	/* Request MMIO resources */
2669 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2670 	if (ret) {
2671 		ath10k_err("failed to request MMIO region: %d\n", ret);
2672 		goto err_device;
2673 	}
2674 
2675 	/*
2676 	 * Target structures have a limit of 32 bit DMA pointers.
2677 	 * DMA pointers can be wider than 32 bits by default on some systems.
2678 	 */
2679 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2680 	if (ret) {
2681 		ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2682 		goto err_region;
2683 	}
2684 
2685 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2686 	if (ret) {
2687 		ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2688 		goto err_region;
2689 	}
2690 
2691 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2692 	pci_set_master(pdev);
2693 
2694 	/*
2695 	 * Temporary FIX: disable ASPM
2696 	 * Will be removed after the OTP is programmed
2697 	 */
2698 	pci_read_config_dword(pdev, 0x80, &lcr_val);
2699 	pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2700 
2701 	/* Arrange for access to Target SoC registers. */
2702 	mem = pci_iomap(pdev, BAR_NUM, 0);
2703 	if (!mem) {
2704 		ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2705 		ret = -EIO;
2706 		goto err_master;
2707 	}
2708 
2709 	ar_pci->mem = mem;
2710 
2711 	spin_lock_init(&ar_pci->ce_lock);
2712 
2713 	ret = ath10k_do_pci_wake(ar);
2714 	if (ret) {
2715 		ath10k_err("Failed to get chip id: %d\n", ret);
2716 		goto err_iomap;
2717 	}
2718 
2719 	chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2720 
2721 	ath10k_do_pci_sleep(ar);
2722 
2723 	ret = ath10k_pci_alloc_ce(ar);
2724 	if (ret) {
2725 		ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
2726 		goto err_iomap;
2727 	}
2728 
2729 	ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2730 
2731 	ret = ath10k_core_register(ar, chip_id);
2732 	if (ret) {
2733 		ath10k_err("failed to register driver core: %d\n", ret);
2734 		goto err_free_ce;
2735 	}
2736 
2737 	return 0;
2738 
2739 err_free_ce:
2740 	ath10k_pci_free_ce(ar);
2741 err_iomap:
2742 	pci_iounmap(pdev, mem);
2743 err_master:
2744 	pci_clear_master(pdev);
2745 err_region:
2746 	pci_release_region(pdev, BAR_NUM);
2747 err_device:
2748 	pci_disable_device(pdev);
2749 err_ar:
2750 	ath10k_core_destroy(ar);
2751 err_ar_pci:
2752 	/* call HIF PCI free here */
2753 	kfree(ar_pci);
2754 
2755 	return ret;
2756 }
2757 
2758 static void ath10k_pci_remove(struct pci_dev *pdev)
2759 {
2760 	struct ath10k *ar = pci_get_drvdata(pdev);
2761 	struct ath10k_pci *ar_pci;
2762 
2763 	ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
2764 
2765 	if (!ar)
2766 		return;
2767 
2768 	ar_pci = ath10k_pci_priv(ar);
2769 
2770 	if (!ar_pci)
2771 		return;
2772 
2773 	ath10k_core_unregister(ar);
2774 	ath10k_pci_free_ce(ar);
2775 
2776 	pci_iounmap(pdev, ar_pci->mem);
2777 	pci_release_region(pdev, BAR_NUM);
2778 	pci_clear_master(pdev);
2779 	pci_disable_device(pdev);
2780 
2781 	ath10k_core_destroy(ar);
2782 	kfree(ar_pci);
2783 }
2784 
2785 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2786 
2787 static struct pci_driver ath10k_pci_driver = {
2788 	.name = "ath10k_pci",
2789 	.id_table = ath10k_pci_id_table,
2790 	.probe = ath10k_pci_probe,
2791 	.remove = ath10k_pci_remove,
2792 };
2793 
2794 static int __init ath10k_pci_init(void)
2795 {
2796 	int ret;
2797 
2798 	ret = pci_register_driver(&ath10k_pci_driver);
2799 	if (ret)
2800 		ath10k_err("failed to register PCI driver: %d\n", ret);
2801 
2802 	return ret;
2803 }
2804 module_init(ath10k_pci_init);
2805 
2806 static void __exit ath10k_pci_exit(void)
2807 {
2808 	pci_unregister_driver(&ath10k_pci_driver);
2809 }
2810 
2811 module_exit(ath10k_pci_exit);
2812 
2813 MODULE_AUTHOR("Qualcomm Atheros");
2814 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2815 MODULE_LICENSE("Dual BSD/GPL");
2816 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
2817 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2818