xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/pci.c (revision b34e08d5)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
23 
24 #include "core.h"
25 #include "debug.h"
26 
27 #include "targaddrs.h"
28 #include "bmi.h"
29 
30 #include "hif.h"
31 #include "htc.h"
32 
33 #include "ce.h"
34 #include "pci.h"
35 
36 enum ath10k_pci_irq_mode {
37 	ATH10K_PCI_IRQ_AUTO = 0,
38 	ATH10K_PCI_IRQ_LEGACY = 1,
39 	ATH10K_PCI_IRQ_MSI = 2,
40 };
41 
42 static unsigned int ath10k_target_ps;
43 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
44 
45 module_param(ath10k_target_ps, uint, 0644);
46 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
47 
48 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
49 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
50 
51 #define QCA988X_2_0_DEVICE_ID	(0x003c)
52 
53 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
54 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
55 	{0}
56 };
57 
58 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
59 				       u32 *data);
60 
61 static int ath10k_pci_post_rx(struct ath10k *ar);
62 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
63 					     int num);
64 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
65 static int ath10k_pci_cold_reset(struct ath10k *ar);
66 static int ath10k_pci_warm_reset(struct ath10k *ar);
67 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
68 static int ath10k_pci_init_irq(struct ath10k *ar);
69 static int ath10k_pci_deinit_irq(struct ath10k *ar);
70 static int ath10k_pci_request_irq(struct ath10k *ar);
71 static void ath10k_pci_free_irq(struct ath10k *ar);
72 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
73 			       struct ath10k_ce_pipe *rx_pipe,
74 			       struct bmi_xfer *xfer);
75 
76 static const struct ce_attr host_ce_config_wlan[] = {
77 	/* CE0: host->target HTC control and raw streams */
78 	{
79 		.flags = CE_ATTR_FLAGS,
80 		.src_nentries = 16,
81 		.src_sz_max = 256,
82 		.dest_nentries = 0,
83 	},
84 
85 	/* CE1: target->host HTT + HTC control */
86 	{
87 		.flags = CE_ATTR_FLAGS,
88 		.src_nentries = 0,
89 		.src_sz_max = 512,
90 		.dest_nentries = 512,
91 	},
92 
93 	/* CE2: target->host WMI */
94 	{
95 		.flags = CE_ATTR_FLAGS,
96 		.src_nentries = 0,
97 		.src_sz_max = 2048,
98 		.dest_nentries = 32,
99 	},
100 
101 	/* CE3: host->target WMI */
102 	{
103 		.flags = CE_ATTR_FLAGS,
104 		.src_nentries = 32,
105 		.src_sz_max = 2048,
106 		.dest_nentries = 0,
107 	},
108 
109 	/* CE4: host->target HTT */
110 	{
111 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
112 		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
113 		.src_sz_max = 256,
114 		.dest_nentries = 0,
115 	},
116 
117 	/* CE5: unused */
118 	{
119 		.flags = CE_ATTR_FLAGS,
120 		.src_nentries = 0,
121 		.src_sz_max = 0,
122 		.dest_nentries = 0,
123 	},
124 
125 	/* CE6: target autonomous hif_memcpy */
126 	{
127 		.flags = CE_ATTR_FLAGS,
128 		.src_nentries = 0,
129 		.src_sz_max = 0,
130 		.dest_nentries = 0,
131 	},
132 
133 	/* CE7: ce_diag, the Diagnostic Window */
134 	{
135 		.flags = CE_ATTR_FLAGS,
136 		.src_nentries = 2,
137 		.src_sz_max = DIAG_TRANSFER_LIMIT,
138 		.dest_nentries = 2,
139 	},
140 };
141 
142 /* Target firmware's Copy Engine configuration. */
143 static const struct ce_pipe_config target_ce_config_wlan[] = {
144 	/* CE0: host->target HTC control and raw streams */
145 	{
146 		.pipenum = 0,
147 		.pipedir = PIPEDIR_OUT,
148 		.nentries = 32,
149 		.nbytes_max = 256,
150 		.flags = CE_ATTR_FLAGS,
151 		.reserved = 0,
152 	},
153 
154 	/* CE1: target->host HTT + HTC control */
155 	{
156 		.pipenum = 1,
157 		.pipedir = PIPEDIR_IN,
158 		.nentries = 32,
159 		.nbytes_max = 512,
160 		.flags = CE_ATTR_FLAGS,
161 		.reserved = 0,
162 	},
163 
164 	/* CE2: target->host WMI */
165 	{
166 		.pipenum = 2,
167 		.pipedir = PIPEDIR_IN,
168 		.nentries = 32,
169 		.nbytes_max = 2048,
170 		.flags = CE_ATTR_FLAGS,
171 		.reserved = 0,
172 	},
173 
174 	/* CE3: host->target WMI */
175 	{
176 		.pipenum = 3,
177 		.pipedir = PIPEDIR_OUT,
178 		.nentries = 32,
179 		.nbytes_max = 2048,
180 		.flags = CE_ATTR_FLAGS,
181 		.reserved = 0,
182 	},
183 
184 	/* CE4: host->target HTT */
185 	{
186 		.pipenum = 4,
187 		.pipedir = PIPEDIR_OUT,
188 		.nentries = 256,
189 		.nbytes_max = 256,
190 		.flags = CE_ATTR_FLAGS,
191 		.reserved = 0,
192 	},
193 
194 	/* NB: 50% of src nentries, since tx has 2 frags */
195 
196 	/* CE5: unused */
197 	{
198 		.pipenum = 5,
199 		.pipedir = PIPEDIR_OUT,
200 		.nentries = 32,
201 		.nbytes_max = 2048,
202 		.flags = CE_ATTR_FLAGS,
203 		.reserved = 0,
204 	},
205 
206 	/* CE6: Reserved for target autonomous hif_memcpy */
207 	{
208 		.pipenum = 6,
209 		.pipedir = PIPEDIR_INOUT,
210 		.nentries = 32,
211 		.nbytes_max = 4096,
212 		.flags = CE_ATTR_FLAGS,
213 		.reserved = 0,
214 	},
215 
216 	/* CE7 used only by Host */
217 };
218 
219 static bool ath10k_pci_irq_pending(struct ath10k *ar)
220 {
221 	u32 cause;
222 
223 	/* Check if the shared legacy irq is for us */
224 	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
225 				  PCIE_INTR_CAUSE_ADDRESS);
226 	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
227 		return true;
228 
229 	return false;
230 }
231 
232 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
233 {
234 	/* IMPORTANT: INTR_CLR register has to be set after
235 	 * INTR_ENABLE is set to 0, otherwise interrupt can not be
236 	 * really cleared. */
237 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
238 			   0);
239 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
240 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
241 
242 	/* IMPORTANT: this extra read transaction is required to
243 	 * flush the posted write buffer. */
244 	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
245 				 PCIE_INTR_ENABLE_ADDRESS);
246 }
247 
248 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
249 {
250 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
251 			   PCIE_INTR_ENABLE_ADDRESS,
252 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
253 
254 	/* IMPORTANT: this extra read transaction is required to
255 	 * flush the posted write buffer. */
256 	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
257 				 PCIE_INTR_ENABLE_ADDRESS);
258 }
259 
260 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
261 {
262 	struct ath10k *ar = arg;
263 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
264 
265 	if (ar_pci->num_msi_intrs == 0) {
266 		if (!ath10k_pci_irq_pending(ar))
267 			return IRQ_NONE;
268 
269 		ath10k_pci_disable_and_clear_legacy_irq(ar);
270 	}
271 
272 	tasklet_schedule(&ar_pci->early_irq_tasklet);
273 
274 	return IRQ_HANDLED;
275 }
276 
277 static int ath10k_pci_request_early_irq(struct ath10k *ar)
278 {
279 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
280 	int ret;
281 
282 	/* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
283 	 * interrupt from irq vector is triggered in all cases for FW
284 	 * indication/errors */
285 	ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
286 			  IRQF_SHARED, "ath10k_pci (early)", ar);
287 	if (ret) {
288 		ath10k_warn("failed to request early irq: %d\n", ret);
289 		return ret;
290 	}
291 
292 	return 0;
293 }
294 
295 static void ath10k_pci_free_early_irq(struct ath10k *ar)
296 {
297 	free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
298 }
299 
300 /*
301  * Diagnostic read/write access is provided for startup/config/debug usage.
302  * Caller must guarantee proper alignment, when applicable, and single user
303  * at any moment.
304  */
305 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
306 				    int nbytes)
307 {
308 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
309 	int ret = 0;
310 	u32 buf;
311 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
312 	unsigned int id;
313 	unsigned int flags;
314 	struct ath10k_ce_pipe *ce_diag;
315 	/* Host buffer address in CE space */
316 	u32 ce_data;
317 	dma_addr_t ce_data_base = 0;
318 	void *data_buf = NULL;
319 	int i;
320 
321 	/*
322 	 * This code cannot handle reads to non-memory space. Redirect to the
323 	 * register read fn but preserve the multi word read capability of
324 	 * this fn
325 	 */
326 	if (address < DRAM_BASE_ADDRESS) {
327 		if (!IS_ALIGNED(address, 4) ||
328 		    !IS_ALIGNED((unsigned long)data, 4))
329 			return -EIO;
330 
331 		while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
332 					   ar, address, (u32 *)data)) == 0)) {
333 			nbytes -= sizeof(u32);
334 			address += sizeof(u32);
335 			data += sizeof(u32);
336 		}
337 		return ret;
338 	}
339 
340 	ce_diag = ar_pci->ce_diag;
341 
342 	/*
343 	 * Allocate a temporary bounce buffer to hold caller's data
344 	 * to be DMA'ed from Target. This guarantees
345 	 *   1) 4-byte alignment
346 	 *   2) Buffer in DMA-able space
347 	 */
348 	orig_nbytes = nbytes;
349 	data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
350 							 orig_nbytes,
351 							 &ce_data_base);
352 
353 	if (!data_buf) {
354 		ret = -ENOMEM;
355 		goto done;
356 	}
357 	memset(data_buf, 0, orig_nbytes);
358 
359 	remaining_bytes = orig_nbytes;
360 	ce_data = ce_data_base;
361 	while (remaining_bytes) {
362 		nbytes = min_t(unsigned int, remaining_bytes,
363 			       DIAG_TRANSFER_LIMIT);
364 
365 		ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
366 		if (ret != 0)
367 			goto done;
368 
369 		/* Request CE to send from Target(!) address to Host buffer */
370 		/*
371 		 * The address supplied by the caller is in the
372 		 * Target CPU virtual address space.
373 		 *
374 		 * In order to use this address with the diagnostic CE,
375 		 * convert it from Target CPU virtual address space
376 		 * to CE address space
377 		 */
378 		ath10k_pci_wake(ar);
379 		address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
380 						     address);
381 		ath10k_pci_sleep(ar);
382 
383 		ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
384 				 0);
385 		if (ret)
386 			goto done;
387 
388 		i = 0;
389 		while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
390 						     &completed_nbytes,
391 						     &id) != 0) {
392 			mdelay(1);
393 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
394 				ret = -EBUSY;
395 				goto done;
396 			}
397 		}
398 
399 		if (nbytes != completed_nbytes) {
400 			ret = -EIO;
401 			goto done;
402 		}
403 
404 		if (buf != (u32) address) {
405 			ret = -EIO;
406 			goto done;
407 		}
408 
409 		i = 0;
410 		while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
411 						     &completed_nbytes,
412 						     &id, &flags) != 0) {
413 			mdelay(1);
414 
415 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
416 				ret = -EBUSY;
417 				goto done;
418 			}
419 		}
420 
421 		if (nbytes != completed_nbytes) {
422 			ret = -EIO;
423 			goto done;
424 		}
425 
426 		if (buf != ce_data) {
427 			ret = -EIO;
428 			goto done;
429 		}
430 
431 		remaining_bytes -= nbytes;
432 		address += nbytes;
433 		ce_data += nbytes;
434 	}
435 
436 done:
437 	if (ret == 0) {
438 		/* Copy data from allocated DMA buf to caller's buf */
439 		WARN_ON_ONCE(orig_nbytes & 3);
440 		for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
441 			((u32 *)data)[i] =
442 				__le32_to_cpu(((__le32 *)data_buf)[i]);
443 		}
444 	} else
445 		ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
446 			   __func__, address);
447 
448 	if (data_buf)
449 		pci_free_consistent(ar_pci->pdev, orig_nbytes,
450 				    data_buf, ce_data_base);
451 
452 	return ret;
453 }
454 
455 /* Read 4-byte aligned data from Target memory or register */
456 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
457 				       u32 *data)
458 {
459 	/* Assume range doesn't cross this boundary */
460 	if (address >= DRAM_BASE_ADDRESS)
461 		return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
462 
463 	ath10k_pci_wake(ar);
464 	*data = ath10k_pci_read32(ar, address);
465 	ath10k_pci_sleep(ar);
466 	return 0;
467 }
468 
469 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
470 				     const void *data, int nbytes)
471 {
472 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
473 	int ret = 0;
474 	u32 buf;
475 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
476 	unsigned int id;
477 	unsigned int flags;
478 	struct ath10k_ce_pipe *ce_diag;
479 	void *data_buf = NULL;
480 	u32 ce_data;	/* Host buffer address in CE space */
481 	dma_addr_t ce_data_base = 0;
482 	int i;
483 
484 	ce_diag = ar_pci->ce_diag;
485 
486 	/*
487 	 * Allocate a temporary bounce buffer to hold caller's data
488 	 * to be DMA'ed to Target. This guarantees
489 	 *   1) 4-byte alignment
490 	 *   2) Buffer in DMA-able space
491 	 */
492 	orig_nbytes = nbytes;
493 	data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
494 							 orig_nbytes,
495 							 &ce_data_base);
496 	if (!data_buf) {
497 		ret = -ENOMEM;
498 		goto done;
499 	}
500 
501 	/* Copy caller's data to allocated DMA buf */
502 	WARN_ON_ONCE(orig_nbytes & 3);
503 	for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
504 		((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
505 
506 	/*
507 	 * The address supplied by the caller is in the
508 	 * Target CPU virtual address space.
509 	 *
510 	 * In order to use this address with the diagnostic CE,
511 	 * convert it from
512 	 *    Target CPU virtual address space
513 	 * to
514 	 *    CE address space
515 	 */
516 	ath10k_pci_wake(ar);
517 	address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
518 	ath10k_pci_sleep(ar);
519 
520 	remaining_bytes = orig_nbytes;
521 	ce_data = ce_data_base;
522 	while (remaining_bytes) {
523 		/* FIXME: check cast */
524 		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
525 
526 		/* Set up to receive directly into Target(!) address */
527 		ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
528 		if (ret != 0)
529 			goto done;
530 
531 		/*
532 		 * Request CE to send caller-supplied data that
533 		 * was copied to bounce buffer to Target(!) address.
534 		 */
535 		ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
536 				     nbytes, 0, 0);
537 		if (ret != 0)
538 			goto done;
539 
540 		i = 0;
541 		while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
542 						     &completed_nbytes,
543 						     &id) != 0) {
544 			mdelay(1);
545 
546 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
547 				ret = -EBUSY;
548 				goto done;
549 			}
550 		}
551 
552 		if (nbytes != completed_nbytes) {
553 			ret = -EIO;
554 			goto done;
555 		}
556 
557 		if (buf != ce_data) {
558 			ret = -EIO;
559 			goto done;
560 		}
561 
562 		i = 0;
563 		while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
564 						     &completed_nbytes,
565 						     &id, &flags) != 0) {
566 			mdelay(1);
567 
568 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
569 				ret = -EBUSY;
570 				goto done;
571 			}
572 		}
573 
574 		if (nbytes != completed_nbytes) {
575 			ret = -EIO;
576 			goto done;
577 		}
578 
579 		if (buf != address) {
580 			ret = -EIO;
581 			goto done;
582 		}
583 
584 		remaining_bytes -= nbytes;
585 		address += nbytes;
586 		ce_data += nbytes;
587 	}
588 
589 done:
590 	if (data_buf) {
591 		pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
592 				    ce_data_base);
593 	}
594 
595 	if (ret != 0)
596 		ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
597 			   address);
598 
599 	return ret;
600 }
601 
602 /* Write 4B data to Target memory or register */
603 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
604 					u32 data)
605 {
606 	/* Assume range doesn't cross this boundary */
607 	if (address >= DRAM_BASE_ADDRESS)
608 		return ath10k_pci_diag_write_mem(ar, address, &data,
609 						 sizeof(u32));
610 
611 	ath10k_pci_wake(ar);
612 	ath10k_pci_write32(ar, address, data);
613 	ath10k_pci_sleep(ar);
614 	return 0;
615 }
616 
617 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
618 {
619 	void __iomem *mem = ath10k_pci_priv(ar)->mem;
620 	u32 val;
621 	val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
622 		       RTC_STATE_ADDRESS);
623 	return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
624 }
625 
626 int ath10k_do_pci_wake(struct ath10k *ar)
627 {
628 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
629 	void __iomem *pci_addr = ar_pci->mem;
630 	int tot_delay = 0;
631 	int curr_delay = 5;
632 
633 	if (atomic_read(&ar_pci->keep_awake_count) == 0) {
634 		/* Force AWAKE */
635 		iowrite32(PCIE_SOC_WAKE_V_MASK,
636 			  pci_addr + PCIE_LOCAL_BASE_ADDRESS +
637 			  PCIE_SOC_WAKE_ADDRESS);
638 	}
639 	atomic_inc(&ar_pci->keep_awake_count);
640 
641 	if (ar_pci->verified_awake)
642 		return 0;
643 
644 	for (;;) {
645 		if (ath10k_pci_target_is_awake(ar)) {
646 			ar_pci->verified_awake = true;
647 			return 0;
648 		}
649 
650 		if (tot_delay > PCIE_WAKE_TIMEOUT) {
651 			ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
652 				    PCIE_WAKE_TIMEOUT,
653 				    atomic_read(&ar_pci->keep_awake_count));
654 			return -ETIMEDOUT;
655 		}
656 
657 		udelay(curr_delay);
658 		tot_delay += curr_delay;
659 
660 		if (curr_delay < 50)
661 			curr_delay += 5;
662 	}
663 }
664 
665 void ath10k_do_pci_sleep(struct ath10k *ar)
666 {
667 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
668 	void __iomem *pci_addr = ar_pci->mem;
669 
670 	if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
671 		/* Allow sleep */
672 		ar_pci->verified_awake = false;
673 		iowrite32(PCIE_SOC_WAKE_RESET,
674 			  pci_addr + PCIE_LOCAL_BASE_ADDRESS +
675 			  PCIE_SOC_WAKE_ADDRESS);
676 	}
677 }
678 
679 /* Called by lower (CE) layer when a send to Target completes. */
680 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
681 {
682 	struct ath10k *ar = ce_state->ar;
683 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
684 	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
685 	void *transfer_context;
686 	u32 ce_data;
687 	unsigned int nbytes;
688 	unsigned int transfer_id;
689 
690 	while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
691 					     &ce_data, &nbytes,
692 					     &transfer_id) == 0) {
693 		/* no need to call tx completion for NULL pointers */
694 		if (transfer_context == NULL)
695 			continue;
696 
697 		cb->tx_completion(ar, transfer_context, transfer_id);
698 	}
699 }
700 
701 /* Called by lower (CE) layer when data is received from the Target. */
702 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
703 {
704 	struct ath10k *ar = ce_state->ar;
705 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
706 	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
707 	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
708 	struct sk_buff *skb;
709 	void *transfer_context;
710 	u32 ce_data;
711 	unsigned int nbytes, max_nbytes;
712 	unsigned int transfer_id;
713 	unsigned int flags;
714 	int err;
715 
716 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
717 					     &ce_data, &nbytes, &transfer_id,
718 					     &flags) == 0) {
719 		err = ath10k_pci_post_rx_pipe(pipe_info, 1);
720 		if (unlikely(err)) {
721 			/* FIXME: retry */
722 			ath10k_warn("failed to replenish CE rx ring %d: %d\n",
723 				    pipe_info->pipe_num, err);
724 		}
725 
726 		skb = transfer_context;
727 		max_nbytes = skb->len + skb_tailroom(skb);
728 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
729 				 max_nbytes, DMA_FROM_DEVICE);
730 
731 		if (unlikely(max_nbytes < nbytes)) {
732 			ath10k_warn("rxed more than expected (nbytes %d, max %d)",
733 				    nbytes, max_nbytes);
734 			dev_kfree_skb_any(skb);
735 			continue;
736 		}
737 
738 		skb_put(skb, nbytes);
739 		cb->rx_completion(ar, skb, pipe_info->pipe_num);
740 	}
741 }
742 
743 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
744 				struct ath10k_hif_sg_item *items, int n_items)
745 {
746 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
747 	struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
748 	struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
749 	struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
750 	unsigned int nentries_mask = src_ring->nentries_mask;
751 	unsigned int sw_index = src_ring->sw_index;
752 	unsigned int write_index = src_ring->write_index;
753 	int err, i;
754 
755 	spin_lock_bh(&ar_pci->ce_lock);
756 
757 	if (unlikely(CE_RING_DELTA(nentries_mask,
758 				   write_index, sw_index - 1) < n_items)) {
759 		err = -ENOBUFS;
760 		goto unlock;
761 	}
762 
763 	for (i = 0; i < n_items - 1; i++) {
764 		ath10k_dbg(ATH10K_DBG_PCI,
765 			   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
766 			   i, items[i].paddr, items[i].len, n_items);
767 		ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
768 				items[i].vaddr, items[i].len);
769 
770 		err = ath10k_ce_send_nolock(ce_pipe,
771 					    items[i].transfer_context,
772 					    items[i].paddr,
773 					    items[i].len,
774 					    items[i].transfer_id,
775 					    CE_SEND_FLAG_GATHER);
776 		if (err)
777 			goto unlock;
778 	}
779 
780 	/* `i` is equal to `n_items -1` after for() */
781 
782 	ath10k_dbg(ATH10K_DBG_PCI,
783 		   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
784 		   i, items[i].paddr, items[i].len, n_items);
785 	ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
786 			items[i].vaddr, items[i].len);
787 
788 	err = ath10k_ce_send_nolock(ce_pipe,
789 				    items[i].transfer_context,
790 				    items[i].paddr,
791 				    items[i].len,
792 				    items[i].transfer_id,
793 				    0);
794 	if (err)
795 		goto unlock;
796 
797 	err = 0;
798 unlock:
799 	spin_unlock_bh(&ar_pci->ce_lock);
800 	return err;
801 }
802 
803 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
804 {
805 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
806 	return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
807 }
808 
809 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
810 {
811 	u32 reg_dump_area = 0;
812 	u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
813 	u32 host_addr;
814 	int ret;
815 	u32 i;
816 
817 	ath10k_err("firmware crashed!\n");
818 	ath10k_err("hardware name %s version 0x%x\n",
819 		   ar->hw_params.name, ar->target_version);
820 	ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
821 
822 	host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
823 	ret = ath10k_pci_diag_read_mem(ar, host_addr,
824 				       &reg_dump_area, sizeof(u32));
825 	if (ret) {
826 		ath10k_err("failed to read FW dump area address: %d\n", ret);
827 		return;
828 	}
829 
830 	ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
831 
832 	ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
833 				       &reg_dump_values[0],
834 				       REG_DUMP_COUNT_QCA988X * sizeof(u32));
835 	if (ret != 0) {
836 		ath10k_err("failed to read FW dump area: %d\n", ret);
837 		return;
838 	}
839 
840 	BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
841 
842 	ath10k_err("target Register Dump\n");
843 	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
844 		ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
845 			   i,
846 			   reg_dump_values[i],
847 			   reg_dump_values[i + 1],
848 			   reg_dump_values[i + 2],
849 			   reg_dump_values[i + 3]);
850 
851 	queue_work(ar->workqueue, &ar->restart_work);
852 }
853 
854 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
855 					       int force)
856 {
857 	if (!force) {
858 		int resources;
859 		/*
860 		 * Decide whether to actually poll for completions, or just
861 		 * wait for a later chance.
862 		 * If there seem to be plenty of resources left, then just wait
863 		 * since checking involves reading a CE register, which is a
864 		 * relatively expensive operation.
865 		 */
866 		resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
867 
868 		/*
869 		 * If at least 50% of the total resources are still available,
870 		 * don't bother checking again yet.
871 		 */
872 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
873 			return;
874 	}
875 	ath10k_ce_per_engine_service(ar, pipe);
876 }
877 
878 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
879 					 struct ath10k_hif_cb *callbacks)
880 {
881 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
882 
883 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
884 
885 	memcpy(&ar_pci->msg_callbacks_current, callbacks,
886 	       sizeof(ar_pci->msg_callbacks_current));
887 }
888 
889 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
890 {
891 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
892 	const struct ce_attr *attr;
893 	struct ath10k_pci_pipe *pipe_info;
894 	int pipe_num, disable_interrupts;
895 
896 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
897 		pipe_info = &ar_pci->pipe_info[pipe_num];
898 
899 		/* Handle Diagnostic CE specially */
900 		if (pipe_info->ce_hdl == ar_pci->ce_diag)
901 			continue;
902 
903 		attr = &host_ce_config_wlan[pipe_num];
904 
905 		if (attr->src_nentries) {
906 			disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
907 			ath10k_ce_send_cb_register(pipe_info->ce_hdl,
908 						   ath10k_pci_ce_send_done,
909 						   disable_interrupts);
910 		}
911 
912 		if (attr->dest_nentries)
913 			ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
914 						   ath10k_pci_ce_recv_data);
915 	}
916 
917 	return 0;
918 }
919 
920 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
921 {
922 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
923 	int i;
924 
925 	tasklet_kill(&ar_pci->intr_tq);
926 	tasklet_kill(&ar_pci->msi_fw_err);
927 	tasklet_kill(&ar_pci->early_irq_tasklet);
928 
929 	for (i = 0; i < CE_COUNT; i++)
930 		tasklet_kill(&ar_pci->pipe_info[i].intr);
931 }
932 
933 /* TODO - temporary mapping while we have too few CE's */
934 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
935 					      u16 service_id, u8 *ul_pipe,
936 					      u8 *dl_pipe, int *ul_is_polled,
937 					      int *dl_is_polled)
938 {
939 	int ret = 0;
940 
941 	/* polling for received messages not supported */
942 	*dl_is_polled = 0;
943 
944 	switch (service_id) {
945 	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
946 		/*
947 		 * Host->target HTT gets its own pipe, so it can be polled
948 		 * while other pipes are interrupt driven.
949 		 */
950 		*ul_pipe = 4;
951 		/*
952 		 * Use the same target->host pipe for HTC ctrl, HTC raw
953 		 * streams, and HTT.
954 		 */
955 		*dl_pipe = 1;
956 		break;
957 
958 	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
959 	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
960 		/*
961 		 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
962 		 * HTC_CTRL_RSVD_SVC could share the same pipe as the
963 		 * WMI services.  So, if another CE is needed, change
964 		 * this to *ul_pipe = 3, which frees up CE 0.
965 		 */
966 		/* *ul_pipe = 3; */
967 		*ul_pipe = 0;
968 		*dl_pipe = 1;
969 		break;
970 
971 	case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
972 	case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
973 	case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
974 	case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
975 
976 	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
977 		*ul_pipe = 3;
978 		*dl_pipe = 2;
979 		break;
980 
981 		/* pipe 5 unused   */
982 		/* pipe 6 reserved */
983 		/* pipe 7 reserved */
984 
985 	default:
986 		ret = -1;
987 		break;
988 	}
989 	*ul_is_polled =
990 		(host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
991 
992 	return ret;
993 }
994 
995 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
996 						u8 *ul_pipe, u8 *dl_pipe)
997 {
998 	int ul_is_polled, dl_is_polled;
999 
1000 	(void)ath10k_pci_hif_map_service_to_pipe(ar,
1001 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1002 						 ul_pipe,
1003 						 dl_pipe,
1004 						 &ul_is_polled,
1005 						 &dl_is_polled);
1006 }
1007 
1008 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1009 				   int num)
1010 {
1011 	struct ath10k *ar = pipe_info->hif_ce_state;
1012 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1013 	struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1014 	struct sk_buff *skb;
1015 	dma_addr_t ce_data;
1016 	int i, ret = 0;
1017 
1018 	if (pipe_info->buf_sz == 0)
1019 		return 0;
1020 
1021 	for (i = 0; i < num; i++) {
1022 		skb = dev_alloc_skb(pipe_info->buf_sz);
1023 		if (!skb) {
1024 			ath10k_warn("failed to allocate skbuff for pipe %d\n",
1025 				    num);
1026 			ret = -ENOMEM;
1027 			goto err;
1028 		}
1029 
1030 		WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1031 
1032 		ce_data = dma_map_single(ar->dev, skb->data,
1033 					 skb->len + skb_tailroom(skb),
1034 					 DMA_FROM_DEVICE);
1035 
1036 		if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1037 			ath10k_warn("failed to DMA map sk_buff\n");
1038 			dev_kfree_skb_any(skb);
1039 			ret = -EIO;
1040 			goto err;
1041 		}
1042 
1043 		ATH10K_SKB_CB(skb)->paddr = ce_data;
1044 
1045 		pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1046 					       pipe_info->buf_sz,
1047 					       PCI_DMA_FROMDEVICE);
1048 
1049 		ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1050 						 ce_data);
1051 		if (ret) {
1052 			ath10k_warn("failed to enqueue to pipe %d: %d\n",
1053 				    num, ret);
1054 			goto err;
1055 		}
1056 	}
1057 
1058 	return ret;
1059 
1060 err:
1061 	ath10k_pci_rx_pipe_cleanup(pipe_info);
1062 	return ret;
1063 }
1064 
1065 static int ath10k_pci_post_rx(struct ath10k *ar)
1066 {
1067 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1068 	struct ath10k_pci_pipe *pipe_info;
1069 	const struct ce_attr *attr;
1070 	int pipe_num, ret = 0;
1071 
1072 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1073 		pipe_info = &ar_pci->pipe_info[pipe_num];
1074 		attr = &host_ce_config_wlan[pipe_num];
1075 
1076 		if (attr->dest_nentries == 0)
1077 			continue;
1078 
1079 		ret = ath10k_pci_post_rx_pipe(pipe_info,
1080 					      attr->dest_nentries - 1);
1081 		if (ret) {
1082 			ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1083 				    pipe_num, ret);
1084 
1085 			for (; pipe_num >= 0; pipe_num--) {
1086 				pipe_info = &ar_pci->pipe_info[pipe_num];
1087 				ath10k_pci_rx_pipe_cleanup(pipe_info);
1088 			}
1089 			return ret;
1090 		}
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 static int ath10k_pci_hif_start(struct ath10k *ar)
1097 {
1098 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1099 	int ret, ret_early;
1100 
1101 	ath10k_pci_free_early_irq(ar);
1102 	ath10k_pci_kill_tasklet(ar);
1103 
1104 	ret = ath10k_pci_request_irq(ar);
1105 	if (ret) {
1106 		ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1107 			    ret);
1108 		goto err_early_irq;
1109 	}
1110 
1111 	ret = ath10k_pci_setup_ce_irq(ar);
1112 	if (ret) {
1113 		ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1114 		goto err_stop;
1115 	}
1116 
1117 	/* Post buffers once to start things off. */
1118 	ret = ath10k_pci_post_rx(ar);
1119 	if (ret) {
1120 		ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1121 			    ret);
1122 		goto err_stop;
1123 	}
1124 
1125 	ar_pci->started = 1;
1126 	return 0;
1127 
1128 err_stop:
1129 	ath10k_ce_disable_interrupts(ar);
1130 	ath10k_pci_free_irq(ar);
1131 	ath10k_pci_kill_tasklet(ar);
1132 err_early_irq:
1133 	/* Though there should be no interrupts (device was reset)
1134 	 * power_down() expects the early IRQ to be installed as per the
1135 	 * driver lifecycle. */
1136 	ret_early = ath10k_pci_request_early_irq(ar);
1137 	if (ret_early)
1138 		ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1139 
1140 	return ret;
1141 }
1142 
1143 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1144 {
1145 	struct ath10k *ar;
1146 	struct ath10k_pci *ar_pci;
1147 	struct ath10k_ce_pipe *ce_hdl;
1148 	u32 buf_sz;
1149 	struct sk_buff *netbuf;
1150 	u32 ce_data;
1151 
1152 	buf_sz = pipe_info->buf_sz;
1153 
1154 	/* Unused Copy Engine */
1155 	if (buf_sz == 0)
1156 		return;
1157 
1158 	ar = pipe_info->hif_ce_state;
1159 	ar_pci = ath10k_pci_priv(ar);
1160 
1161 	if (!ar_pci->started)
1162 		return;
1163 
1164 	ce_hdl = pipe_info->ce_hdl;
1165 
1166 	while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1167 					  &ce_data) == 0) {
1168 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1169 				 netbuf->len + skb_tailroom(netbuf),
1170 				 DMA_FROM_DEVICE);
1171 		dev_kfree_skb_any(netbuf);
1172 	}
1173 }
1174 
1175 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1176 {
1177 	struct ath10k *ar;
1178 	struct ath10k_pci *ar_pci;
1179 	struct ath10k_ce_pipe *ce_hdl;
1180 	struct sk_buff *netbuf;
1181 	u32 ce_data;
1182 	unsigned int nbytes;
1183 	unsigned int id;
1184 	u32 buf_sz;
1185 
1186 	buf_sz = pipe_info->buf_sz;
1187 
1188 	/* Unused Copy Engine */
1189 	if (buf_sz == 0)
1190 		return;
1191 
1192 	ar = pipe_info->hif_ce_state;
1193 	ar_pci = ath10k_pci_priv(ar);
1194 
1195 	if (!ar_pci->started)
1196 		return;
1197 
1198 	ce_hdl = pipe_info->ce_hdl;
1199 
1200 	while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1201 					  &ce_data, &nbytes, &id) == 0) {
1202 		/* no need to call tx completion for NULL pointers */
1203 		if (!netbuf)
1204 			continue;
1205 
1206 		ar_pci->msg_callbacks_current.tx_completion(ar,
1207 							    netbuf,
1208 							    id);
1209 	}
1210 }
1211 
1212 /*
1213  * Cleanup residual buffers for device shutdown:
1214  *    buffers that were enqueued for receive
1215  *    buffers that were to be sent
1216  * Note: Buffers that had completed but which were
1217  * not yet processed are on a completion queue. They
1218  * are handled when the completion thread shuts down.
1219  */
1220 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1221 {
1222 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1223 	int pipe_num;
1224 
1225 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1226 		struct ath10k_pci_pipe *pipe_info;
1227 
1228 		pipe_info = &ar_pci->pipe_info[pipe_num];
1229 		ath10k_pci_rx_pipe_cleanup(pipe_info);
1230 		ath10k_pci_tx_pipe_cleanup(pipe_info);
1231 	}
1232 }
1233 
1234 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1235 {
1236 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1237 	struct ath10k_pci_pipe *pipe_info;
1238 	int pipe_num;
1239 
1240 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1241 		pipe_info = &ar_pci->pipe_info[pipe_num];
1242 		if (pipe_info->ce_hdl) {
1243 			ath10k_ce_deinit(pipe_info->ce_hdl);
1244 			pipe_info->ce_hdl = NULL;
1245 			pipe_info->buf_sz = 0;
1246 		}
1247 	}
1248 }
1249 
1250 static void ath10k_pci_hif_stop(struct ath10k *ar)
1251 {
1252 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1253 	int ret;
1254 
1255 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1256 
1257 	ret = ath10k_ce_disable_interrupts(ar);
1258 	if (ret)
1259 		ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1260 
1261 	ath10k_pci_free_irq(ar);
1262 	ath10k_pci_kill_tasklet(ar);
1263 
1264 	ret = ath10k_pci_request_early_irq(ar);
1265 	if (ret)
1266 		ath10k_warn("failed to re-enable early irq: %d\n", ret);
1267 
1268 	/* At this point, asynchronous threads are stopped, the target should
1269 	 * not DMA nor interrupt. We process the leftovers and then free
1270 	 * everything else up. */
1271 
1272 	ath10k_pci_buffer_cleanup(ar);
1273 
1274 	/* Make the sure the device won't access any structures on the host by
1275 	 * resetting it. The device was fed with PCI CE ringbuffer
1276 	 * configuration during init. If ringbuffers are freed and the device
1277 	 * were to access them this could lead to memory corruption on the
1278 	 * host. */
1279 	ath10k_pci_warm_reset(ar);
1280 
1281 	ar_pci->started = 0;
1282 }
1283 
1284 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1285 					   void *req, u32 req_len,
1286 					   void *resp, u32 *resp_len)
1287 {
1288 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1289 	struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1290 	struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1291 	struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1292 	struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1293 	dma_addr_t req_paddr = 0;
1294 	dma_addr_t resp_paddr = 0;
1295 	struct bmi_xfer xfer = {};
1296 	void *treq, *tresp = NULL;
1297 	int ret = 0;
1298 
1299 	might_sleep();
1300 
1301 	if (resp && !resp_len)
1302 		return -EINVAL;
1303 
1304 	if (resp && resp_len && *resp_len == 0)
1305 		return -EINVAL;
1306 
1307 	treq = kmemdup(req, req_len, GFP_KERNEL);
1308 	if (!treq)
1309 		return -ENOMEM;
1310 
1311 	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1312 	ret = dma_mapping_error(ar->dev, req_paddr);
1313 	if (ret)
1314 		goto err_dma;
1315 
1316 	if (resp && resp_len) {
1317 		tresp = kzalloc(*resp_len, GFP_KERNEL);
1318 		if (!tresp) {
1319 			ret = -ENOMEM;
1320 			goto err_req;
1321 		}
1322 
1323 		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1324 					    DMA_FROM_DEVICE);
1325 		ret = dma_mapping_error(ar->dev, resp_paddr);
1326 		if (ret)
1327 			goto err_req;
1328 
1329 		xfer.wait_for_resp = true;
1330 		xfer.resp_len = 0;
1331 
1332 		ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1333 	}
1334 
1335 	init_completion(&xfer.done);
1336 
1337 	ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1338 	if (ret)
1339 		goto err_resp;
1340 
1341 	ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1342 	if (ret) {
1343 		u32 unused_buffer;
1344 		unsigned int unused_nbytes;
1345 		unsigned int unused_id;
1346 
1347 		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1348 					   &unused_nbytes, &unused_id);
1349 	} else {
1350 		/* non-zero means we did not time out */
1351 		ret = 0;
1352 	}
1353 
1354 err_resp:
1355 	if (resp) {
1356 		u32 unused_buffer;
1357 
1358 		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1359 		dma_unmap_single(ar->dev, resp_paddr,
1360 				 *resp_len, DMA_FROM_DEVICE);
1361 	}
1362 err_req:
1363 	dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1364 
1365 	if (ret == 0 && resp_len) {
1366 		*resp_len = min(*resp_len, xfer.resp_len);
1367 		memcpy(resp, tresp, xfer.resp_len);
1368 	}
1369 err_dma:
1370 	kfree(treq);
1371 	kfree(tresp);
1372 
1373 	return ret;
1374 }
1375 
1376 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1377 {
1378 	struct bmi_xfer *xfer;
1379 	u32 ce_data;
1380 	unsigned int nbytes;
1381 	unsigned int transfer_id;
1382 
1383 	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1384 					  &nbytes, &transfer_id))
1385 		return;
1386 
1387 	if (xfer->wait_for_resp)
1388 		return;
1389 
1390 	complete(&xfer->done);
1391 }
1392 
1393 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1394 {
1395 	struct bmi_xfer *xfer;
1396 	u32 ce_data;
1397 	unsigned int nbytes;
1398 	unsigned int transfer_id;
1399 	unsigned int flags;
1400 
1401 	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1402 					  &nbytes, &transfer_id, &flags))
1403 		return;
1404 
1405 	if (!xfer->wait_for_resp) {
1406 		ath10k_warn("unexpected: BMI data received; ignoring\n");
1407 		return;
1408 	}
1409 
1410 	xfer->resp_len = nbytes;
1411 	complete(&xfer->done);
1412 }
1413 
1414 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1415 			       struct ath10k_ce_pipe *rx_pipe,
1416 			       struct bmi_xfer *xfer)
1417 {
1418 	unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1419 
1420 	while (time_before_eq(jiffies, timeout)) {
1421 		ath10k_pci_bmi_send_done(tx_pipe);
1422 		ath10k_pci_bmi_recv_data(rx_pipe);
1423 
1424 		if (completion_done(&xfer->done))
1425 			return 0;
1426 
1427 		schedule();
1428 	}
1429 
1430 	return -ETIMEDOUT;
1431 }
1432 
1433 /*
1434  * Map from service/endpoint to Copy Engine.
1435  * This table is derived from the CE_PCI TABLE, above.
1436  * It is passed to the Target at startup for use by firmware.
1437  */
1438 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1439 	{
1440 		 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1441 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1442 		 3,
1443 	},
1444 	{
1445 		 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1446 		 PIPEDIR_IN,		/* in = DL = target -> host */
1447 		 2,
1448 	},
1449 	{
1450 		 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1451 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1452 		 3,
1453 	},
1454 	{
1455 		 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1456 		 PIPEDIR_IN,		/* in = DL = target -> host */
1457 		 2,
1458 	},
1459 	{
1460 		 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1461 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1462 		 3,
1463 	},
1464 	{
1465 		 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1466 		 PIPEDIR_IN,		/* in = DL = target -> host */
1467 		 2,
1468 	},
1469 	{
1470 		 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1471 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1472 		 3,
1473 	},
1474 	{
1475 		 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1476 		 PIPEDIR_IN,		/* in = DL = target -> host */
1477 		 2,
1478 	},
1479 	{
1480 		 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1481 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1482 		 3,
1483 	},
1484 	{
1485 		 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1486 		 PIPEDIR_IN,		/* in = DL = target -> host */
1487 		 2,
1488 	},
1489 	{
1490 		 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1491 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1492 		 0,		/* could be moved to 3 (share with WMI) */
1493 	},
1494 	{
1495 		 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1496 		 PIPEDIR_IN,		/* in = DL = target -> host */
1497 		 1,
1498 	},
1499 	{
1500 		 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,	/* not currently used */
1501 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1502 		 0,
1503 	},
1504 	{
1505 		 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,	/* not currently used */
1506 		 PIPEDIR_IN,		/* in = DL = target -> host */
1507 		 1,
1508 	},
1509 	{
1510 		 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1511 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1512 		 4,
1513 	},
1514 	{
1515 		 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1516 		 PIPEDIR_IN,		/* in = DL = target -> host */
1517 		 1,
1518 	},
1519 
1520 	/* (Additions here) */
1521 
1522 	{				/* Must be last */
1523 		 0,
1524 		 0,
1525 		 0,
1526 	},
1527 };
1528 
1529 /*
1530  * Send an interrupt to the device to wake up the Target CPU
1531  * so it has an opportunity to notice any changed state.
1532  */
1533 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1534 {
1535 	int ret;
1536 	u32 core_ctrl;
1537 
1538 	ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1539 					      CORE_CTRL_ADDRESS,
1540 					  &core_ctrl);
1541 	if (ret) {
1542 		ath10k_warn("failed to read core_ctrl: %d\n", ret);
1543 		return ret;
1544 	}
1545 
1546 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1547 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1548 
1549 	ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1550 					       CORE_CTRL_ADDRESS,
1551 					   core_ctrl);
1552 	if (ret) {
1553 		ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1554 			    ret);
1555 		return ret;
1556 	}
1557 
1558 	return 0;
1559 }
1560 
1561 static int ath10k_pci_init_config(struct ath10k *ar)
1562 {
1563 	u32 interconnect_targ_addr;
1564 	u32 pcie_state_targ_addr = 0;
1565 	u32 pipe_cfg_targ_addr = 0;
1566 	u32 svc_to_pipe_map = 0;
1567 	u32 pcie_config_flags = 0;
1568 	u32 ealloc_value;
1569 	u32 ealloc_targ_addr;
1570 	u32 flag2_value;
1571 	u32 flag2_targ_addr;
1572 	int ret = 0;
1573 
1574 	/* Download to Target the CE Config and the service-to-CE map */
1575 	interconnect_targ_addr =
1576 		host_interest_item_address(HI_ITEM(hi_interconnect_state));
1577 
1578 	/* Supply Target-side CE configuration */
1579 	ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1580 					  &pcie_state_targ_addr);
1581 	if (ret != 0) {
1582 		ath10k_err("Failed to get pcie state addr: %d\n", ret);
1583 		return ret;
1584 	}
1585 
1586 	if (pcie_state_targ_addr == 0) {
1587 		ret = -EIO;
1588 		ath10k_err("Invalid pcie state addr\n");
1589 		return ret;
1590 	}
1591 
1592 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1593 					  offsetof(struct pcie_state,
1594 						   pipe_cfg_addr),
1595 					  &pipe_cfg_targ_addr);
1596 	if (ret != 0) {
1597 		ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1598 		return ret;
1599 	}
1600 
1601 	if (pipe_cfg_targ_addr == 0) {
1602 		ret = -EIO;
1603 		ath10k_err("Invalid pipe cfg addr\n");
1604 		return ret;
1605 	}
1606 
1607 	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1608 				 target_ce_config_wlan,
1609 				 sizeof(target_ce_config_wlan));
1610 
1611 	if (ret != 0) {
1612 		ath10k_err("Failed to write pipe cfg: %d\n", ret);
1613 		return ret;
1614 	}
1615 
1616 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1617 					  offsetof(struct pcie_state,
1618 						   svc_to_pipe_map),
1619 					  &svc_to_pipe_map);
1620 	if (ret != 0) {
1621 		ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1622 		return ret;
1623 	}
1624 
1625 	if (svc_to_pipe_map == 0) {
1626 		ret = -EIO;
1627 		ath10k_err("Invalid svc_to_pipe map\n");
1628 		return ret;
1629 	}
1630 
1631 	ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1632 				 target_service_to_ce_map_wlan,
1633 				 sizeof(target_service_to_ce_map_wlan));
1634 	if (ret != 0) {
1635 		ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1636 		return ret;
1637 	}
1638 
1639 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1640 					  offsetof(struct pcie_state,
1641 						   config_flags),
1642 					  &pcie_config_flags);
1643 	if (ret != 0) {
1644 		ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1645 		return ret;
1646 	}
1647 
1648 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1649 
1650 	ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1651 				 offsetof(struct pcie_state, config_flags),
1652 				 &pcie_config_flags,
1653 				 sizeof(pcie_config_flags));
1654 	if (ret != 0) {
1655 		ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1656 		return ret;
1657 	}
1658 
1659 	/* configure early allocation */
1660 	ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1661 
1662 	ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1663 	if (ret != 0) {
1664 		ath10k_err("Faile to get early alloc val: %d\n", ret);
1665 		return ret;
1666 	}
1667 
1668 	/* first bank is switched to IRAM */
1669 	ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1670 			 HI_EARLY_ALLOC_MAGIC_MASK);
1671 	ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1672 			 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1673 
1674 	ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1675 	if (ret != 0) {
1676 		ath10k_err("Failed to set early alloc val: %d\n", ret);
1677 		return ret;
1678 	}
1679 
1680 	/* Tell Target to proceed with initialization */
1681 	flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1682 
1683 	ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1684 	if (ret != 0) {
1685 		ath10k_err("Failed to get option val: %d\n", ret);
1686 		return ret;
1687 	}
1688 
1689 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1690 
1691 	ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1692 	if (ret != 0) {
1693 		ath10k_err("Failed to set option val: %d\n", ret);
1694 		return ret;
1695 	}
1696 
1697 	return 0;
1698 }
1699 
1700 
1701 
1702 static int ath10k_pci_ce_init(struct ath10k *ar)
1703 {
1704 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1705 	struct ath10k_pci_pipe *pipe_info;
1706 	const struct ce_attr *attr;
1707 	int pipe_num;
1708 
1709 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1710 		pipe_info = &ar_pci->pipe_info[pipe_num];
1711 		pipe_info->pipe_num = pipe_num;
1712 		pipe_info->hif_ce_state = ar;
1713 		attr = &host_ce_config_wlan[pipe_num];
1714 
1715 		pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1716 		if (pipe_info->ce_hdl == NULL) {
1717 			ath10k_err("failed to initialize CE for pipe: %d\n",
1718 				   pipe_num);
1719 
1720 			/* It is safe to call it here. It checks if ce_hdl is
1721 			 * valid for each pipe */
1722 			ath10k_pci_ce_deinit(ar);
1723 			return -1;
1724 		}
1725 
1726 		if (pipe_num == CE_COUNT - 1) {
1727 			/*
1728 			 * Reserve the ultimate CE for
1729 			 * diagnostic Window support
1730 			 */
1731 			ar_pci->ce_diag = pipe_info->ce_hdl;
1732 			continue;
1733 		}
1734 
1735 		pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1736 	}
1737 
1738 	return 0;
1739 }
1740 
1741 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1742 {
1743 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1744 	u32 fw_indicator_address, fw_indicator;
1745 
1746 	ath10k_pci_wake(ar);
1747 
1748 	fw_indicator_address = ar_pci->fw_indicator_address;
1749 	fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1750 
1751 	if (fw_indicator & FW_IND_EVENT_PENDING) {
1752 		/* ACK: clear Target-side pending event */
1753 		ath10k_pci_write32(ar, fw_indicator_address,
1754 				   fw_indicator & ~FW_IND_EVENT_PENDING);
1755 
1756 		if (ar_pci->started) {
1757 			ath10k_pci_hif_dump_area(ar);
1758 		} else {
1759 			/*
1760 			 * Probable Target failure before we're prepared
1761 			 * to handle it.  Generally unexpected.
1762 			 */
1763 			ath10k_warn("early firmware event indicated\n");
1764 		}
1765 	}
1766 
1767 	ath10k_pci_sleep(ar);
1768 }
1769 
1770 static int ath10k_pci_warm_reset(struct ath10k *ar)
1771 {
1772 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1773 	int ret = 0;
1774 	u32 val;
1775 
1776 	ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
1777 
1778 	ret = ath10k_do_pci_wake(ar);
1779 	if (ret) {
1780 		ath10k_err("failed to wake up target: %d\n", ret);
1781 		return ret;
1782 	}
1783 
1784 	/* debug */
1785 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1786 				PCIE_INTR_CAUSE_ADDRESS);
1787 	ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1788 
1789 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1790 				CPU_INTR_ADDRESS);
1791 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1792 		   val);
1793 
1794 	/* disable pending irqs */
1795 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1796 			   PCIE_INTR_ENABLE_ADDRESS, 0);
1797 
1798 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1799 			   PCIE_INTR_CLR_ADDRESS, ~0);
1800 
1801 	msleep(100);
1802 
1803 	/* clear fw indicator */
1804 	ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
1805 
1806 	/* clear target LF timer interrupts */
1807 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1808 				SOC_LF_TIMER_CONTROL0_ADDRESS);
1809 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1810 			   SOC_LF_TIMER_CONTROL0_ADDRESS,
1811 			   val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1812 
1813 	/* reset CE */
1814 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1815 				SOC_RESET_CONTROL_ADDRESS);
1816 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1817 			   val | SOC_RESET_CONTROL_CE_RST_MASK);
1818 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1819 				SOC_RESET_CONTROL_ADDRESS);
1820 	msleep(10);
1821 
1822 	/* unreset CE */
1823 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1824 			   val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1825 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1826 				SOC_RESET_CONTROL_ADDRESS);
1827 	msleep(10);
1828 
1829 	/* debug */
1830 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1831 				PCIE_INTR_CAUSE_ADDRESS);
1832 	ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1833 
1834 	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1835 				CPU_INTR_ADDRESS);
1836 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1837 		   val);
1838 
1839 	/* CPU warm reset */
1840 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1841 				SOC_RESET_CONTROL_ADDRESS);
1842 	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1843 			   val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1844 
1845 	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1846 				SOC_RESET_CONTROL_ADDRESS);
1847 	ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1848 
1849 	msleep(100);
1850 
1851 	ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1852 
1853 	ath10k_do_pci_sleep(ar);
1854 	return ret;
1855 }
1856 
1857 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1858 {
1859 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1860 	const char *irq_mode;
1861 	int ret;
1862 
1863 	/*
1864 	 * Bring the target up cleanly.
1865 	 *
1866 	 * The target may be in an undefined state with an AUX-powered Target
1867 	 * and a Host in WoW mode. If the Host crashes, loses power, or is
1868 	 * restarted (without unloading the driver) then the Target is left
1869 	 * (aux) powered and running. On a subsequent driver load, the Target
1870 	 * is in an unexpected state. We try to catch that here in order to
1871 	 * reset the Target and retry the probe.
1872 	 */
1873 	if (cold_reset)
1874 		ret = ath10k_pci_cold_reset(ar);
1875 	else
1876 		ret = ath10k_pci_warm_reset(ar);
1877 
1878 	if (ret) {
1879 		ath10k_err("failed to reset target: %d\n", ret);
1880 		goto err;
1881 	}
1882 
1883 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1884 		/* Force AWAKE forever */
1885 		ath10k_do_pci_wake(ar);
1886 
1887 	ret = ath10k_pci_ce_init(ar);
1888 	if (ret) {
1889 		ath10k_err("failed to initialize CE: %d\n", ret);
1890 		goto err_ps;
1891 	}
1892 
1893 	ret = ath10k_ce_disable_interrupts(ar);
1894 	if (ret) {
1895 		ath10k_err("failed to disable CE interrupts: %d\n", ret);
1896 		goto err_ce;
1897 	}
1898 
1899 	ret = ath10k_pci_init_irq(ar);
1900 	if (ret) {
1901 		ath10k_err("failed to init irqs: %d\n", ret);
1902 		goto err_ce;
1903 	}
1904 
1905 	ret = ath10k_pci_request_early_irq(ar);
1906 	if (ret) {
1907 		ath10k_err("failed to request early irq: %d\n", ret);
1908 		goto err_deinit_irq;
1909 	}
1910 
1911 	ret = ath10k_pci_wait_for_target_init(ar);
1912 	if (ret) {
1913 		ath10k_err("failed to wait for target to init: %d\n", ret);
1914 		goto err_free_early_irq;
1915 	}
1916 
1917 	ret = ath10k_pci_init_config(ar);
1918 	if (ret) {
1919 		ath10k_err("failed to setup init config: %d\n", ret);
1920 		goto err_free_early_irq;
1921 	}
1922 
1923 	ret = ath10k_pci_wake_target_cpu(ar);
1924 	if (ret) {
1925 		ath10k_err("could not wake up target CPU: %d\n", ret);
1926 		goto err_free_early_irq;
1927 	}
1928 
1929 	if (ar_pci->num_msi_intrs > 1)
1930 		irq_mode = "MSI-X";
1931 	else if (ar_pci->num_msi_intrs == 1)
1932 		irq_mode = "MSI";
1933 	else
1934 		irq_mode = "legacy";
1935 
1936 	if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1937 		ath10k_info("pci irq %s\n", irq_mode);
1938 
1939 	return 0;
1940 
1941 err_free_early_irq:
1942 	ath10k_pci_free_early_irq(ar);
1943 err_deinit_irq:
1944 	ath10k_pci_deinit_irq(ar);
1945 err_ce:
1946 	ath10k_pci_ce_deinit(ar);
1947 	ath10k_pci_warm_reset(ar);
1948 err_ps:
1949 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1950 		ath10k_do_pci_sleep(ar);
1951 err:
1952 	return ret;
1953 }
1954 
1955 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1956 {
1957 	int ret;
1958 
1959 	/*
1960 	 * Hardware CUS232 version 2 has some issues with cold reset and the
1961 	 * preferred (and safer) way to perform a device reset is through a
1962 	 * warm reset.
1963 	 *
1964 	 * Warm reset doesn't always work though (notably after a firmware
1965 	 * crash) so fall back to cold reset if necessary.
1966 	 */
1967 	ret = __ath10k_pci_hif_power_up(ar, false);
1968 	if (ret) {
1969 		ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
1970 			    ret);
1971 
1972 		ret = __ath10k_pci_hif_power_up(ar, true);
1973 		if (ret) {
1974 			ath10k_err("failed to power up target using cold reset too (%d)\n",
1975 				   ret);
1976 			return ret;
1977 		}
1978 	}
1979 
1980 	return 0;
1981 }
1982 
1983 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1984 {
1985 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1986 
1987 	ath10k_pci_free_early_irq(ar);
1988 	ath10k_pci_kill_tasklet(ar);
1989 	ath10k_pci_deinit_irq(ar);
1990 	ath10k_pci_warm_reset(ar);
1991 
1992 	ath10k_pci_ce_deinit(ar);
1993 	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1994 		ath10k_do_pci_sleep(ar);
1995 }
1996 
1997 #ifdef CONFIG_PM
1998 
1999 #define ATH10K_PCI_PM_CONTROL 0x44
2000 
2001 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2002 {
2003 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2004 	struct pci_dev *pdev = ar_pci->pdev;
2005 	u32 val;
2006 
2007 	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2008 
2009 	if ((val & 0x000000ff) != 0x3) {
2010 		pci_save_state(pdev);
2011 		pci_disable_device(pdev);
2012 		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2013 				       (val & 0xffffff00) | 0x03);
2014 	}
2015 
2016 	return 0;
2017 }
2018 
2019 static int ath10k_pci_hif_resume(struct ath10k *ar)
2020 {
2021 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2022 	struct pci_dev *pdev = ar_pci->pdev;
2023 	u32 val;
2024 
2025 	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2026 
2027 	if ((val & 0x000000ff) != 0) {
2028 		pci_restore_state(pdev);
2029 		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2030 				       val & 0xffffff00);
2031 		/*
2032 		 * Suspend/Resume resets the PCI configuration space,
2033 		 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2034 		 * to keep PCI Tx retries from interfering with C3 CPU state
2035 		 */
2036 		pci_read_config_dword(pdev, 0x40, &val);
2037 
2038 		if ((val & 0x0000ff00) != 0)
2039 			pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2040 	}
2041 
2042 	return 0;
2043 }
2044 #endif
2045 
2046 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2047 	.tx_sg			= ath10k_pci_hif_tx_sg,
2048 	.exchange_bmi_msg	= ath10k_pci_hif_exchange_bmi_msg,
2049 	.start			= ath10k_pci_hif_start,
2050 	.stop			= ath10k_pci_hif_stop,
2051 	.map_service_to_pipe	= ath10k_pci_hif_map_service_to_pipe,
2052 	.get_default_pipe	= ath10k_pci_hif_get_default_pipe,
2053 	.send_complete_check	= ath10k_pci_hif_send_complete_check,
2054 	.set_callbacks		= ath10k_pci_hif_set_callbacks,
2055 	.get_free_queue_number	= ath10k_pci_hif_get_free_queue_number,
2056 	.power_up		= ath10k_pci_hif_power_up,
2057 	.power_down		= ath10k_pci_hif_power_down,
2058 #ifdef CONFIG_PM
2059 	.suspend		= ath10k_pci_hif_suspend,
2060 	.resume			= ath10k_pci_hif_resume,
2061 #endif
2062 };
2063 
2064 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2065 {
2066 	struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2067 	struct ath10k_pci *ar_pci = pipe->ar_pci;
2068 
2069 	ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2070 }
2071 
2072 static void ath10k_msi_err_tasklet(unsigned long data)
2073 {
2074 	struct ath10k *ar = (struct ath10k *)data;
2075 
2076 	ath10k_pci_fw_interrupt_handler(ar);
2077 }
2078 
2079 /*
2080  * Handler for a per-engine interrupt on a PARTICULAR CE.
2081  * This is used in cases where each CE has a private MSI interrupt.
2082  */
2083 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2084 {
2085 	struct ath10k *ar = arg;
2086 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2087 	int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2088 
2089 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2090 		ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2091 		return IRQ_HANDLED;
2092 	}
2093 
2094 	/*
2095 	 * NOTE: We are able to derive ce_id from irq because we
2096 	 * use a one-to-one mapping for CE's 0..5.
2097 	 * CE's 6 & 7 do not use interrupts at all.
2098 	 *
2099 	 * This mapping must be kept in sync with the mapping
2100 	 * used by firmware.
2101 	 */
2102 	tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2103 	return IRQ_HANDLED;
2104 }
2105 
2106 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2107 {
2108 	struct ath10k *ar = arg;
2109 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2110 
2111 	tasklet_schedule(&ar_pci->msi_fw_err);
2112 	return IRQ_HANDLED;
2113 }
2114 
2115 /*
2116  * Top-level interrupt handler for all PCI interrupts from a Target.
2117  * When a block of MSI interrupts is allocated, this top-level handler
2118  * is not used; instead, we directly call the correct sub-handler.
2119  */
2120 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2121 {
2122 	struct ath10k *ar = arg;
2123 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2124 
2125 	if (ar_pci->num_msi_intrs == 0) {
2126 		if (!ath10k_pci_irq_pending(ar))
2127 			return IRQ_NONE;
2128 
2129 		ath10k_pci_disable_and_clear_legacy_irq(ar);
2130 	}
2131 
2132 	tasklet_schedule(&ar_pci->intr_tq);
2133 
2134 	return IRQ_HANDLED;
2135 }
2136 
2137 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2138 {
2139 	struct ath10k *ar = (struct ath10k *)data;
2140 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2141 	u32 fw_ind;
2142 	int ret;
2143 
2144 	ret = ath10k_pci_wake(ar);
2145 	if (ret) {
2146 		ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2147 			    ret);
2148 		return;
2149 	}
2150 
2151 	fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
2152 	if (fw_ind & FW_IND_EVENT_PENDING) {
2153 		ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
2154 				   fw_ind & ~FW_IND_EVENT_PENDING);
2155 
2156 		/* Some structures are unavailable during early boot or at
2157 		 * driver teardown so just print that the device has crashed. */
2158 		ath10k_warn("device crashed - no diagnostics available\n");
2159 	}
2160 
2161 	ath10k_pci_sleep(ar);
2162 	ath10k_pci_enable_legacy_irq(ar);
2163 }
2164 
2165 static void ath10k_pci_tasklet(unsigned long data)
2166 {
2167 	struct ath10k *ar = (struct ath10k *)data;
2168 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2169 
2170 	ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2171 	ath10k_ce_per_engine_service_any(ar);
2172 
2173 	/* Re-enable legacy irq that was disabled in the irq handler */
2174 	if (ar_pci->num_msi_intrs == 0)
2175 		ath10k_pci_enable_legacy_irq(ar);
2176 }
2177 
2178 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2179 {
2180 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2181 	int ret, i;
2182 
2183 	ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2184 			  ath10k_pci_msi_fw_handler,
2185 			  IRQF_SHARED, "ath10k_pci", ar);
2186 	if (ret) {
2187 		ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2188 			    ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2189 		return ret;
2190 	}
2191 
2192 	for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2193 		ret = request_irq(ar_pci->pdev->irq + i,
2194 				  ath10k_pci_per_engine_handler,
2195 				  IRQF_SHARED, "ath10k_pci", ar);
2196 		if (ret) {
2197 			ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2198 				    ar_pci->pdev->irq + i, ret);
2199 
2200 			for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2201 				free_irq(ar_pci->pdev->irq + i, ar);
2202 
2203 			free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2204 			return ret;
2205 		}
2206 	}
2207 
2208 	return 0;
2209 }
2210 
2211 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2212 {
2213 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2214 	int ret;
2215 
2216 	ret = request_irq(ar_pci->pdev->irq,
2217 			  ath10k_pci_interrupt_handler,
2218 			  IRQF_SHARED, "ath10k_pci", ar);
2219 	if (ret) {
2220 		ath10k_warn("failed to request MSI irq %d: %d\n",
2221 			    ar_pci->pdev->irq, ret);
2222 		return ret;
2223 	}
2224 
2225 	return 0;
2226 }
2227 
2228 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2229 {
2230 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2231 	int ret;
2232 
2233 	ret = request_irq(ar_pci->pdev->irq,
2234 			  ath10k_pci_interrupt_handler,
2235 			  IRQF_SHARED, "ath10k_pci", ar);
2236 	if (ret) {
2237 		ath10k_warn("failed to request legacy irq %d: %d\n",
2238 			    ar_pci->pdev->irq, ret);
2239 		return ret;
2240 	}
2241 
2242 	return 0;
2243 }
2244 
2245 static int ath10k_pci_request_irq(struct ath10k *ar)
2246 {
2247 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2248 
2249 	switch (ar_pci->num_msi_intrs) {
2250 	case 0:
2251 		return ath10k_pci_request_irq_legacy(ar);
2252 	case 1:
2253 		return ath10k_pci_request_irq_msi(ar);
2254 	case MSI_NUM_REQUEST:
2255 		return ath10k_pci_request_irq_msix(ar);
2256 	}
2257 
2258 	ath10k_warn("unknown irq configuration upon request\n");
2259 	return -EINVAL;
2260 }
2261 
2262 static void ath10k_pci_free_irq(struct ath10k *ar)
2263 {
2264 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2265 	int i;
2266 
2267 	/* There's at least one interrupt irregardless whether its legacy INTR
2268 	 * or MSI or MSI-X */
2269 	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2270 		free_irq(ar_pci->pdev->irq + i, ar);
2271 }
2272 
2273 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2274 {
2275 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2276 	int i;
2277 
2278 	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2279 	tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2280 		     (unsigned long)ar);
2281 	tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2282 		     (unsigned long)ar);
2283 
2284 	for (i = 0; i < CE_COUNT; i++) {
2285 		ar_pci->pipe_info[i].ar_pci = ar_pci;
2286 		tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2287 			     (unsigned long)&ar_pci->pipe_info[i]);
2288 	}
2289 }
2290 
2291 static int ath10k_pci_init_irq(struct ath10k *ar)
2292 {
2293 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2294 	bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2295 				       ar_pci->features);
2296 	int ret;
2297 
2298 	ath10k_pci_init_irq_tasklets(ar);
2299 
2300 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2301 	    !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2302 		ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2303 
2304 	/* Try MSI-X */
2305 	if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2306 		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2307 		ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2308 							 ar_pci->num_msi_intrs);
2309 		if (ret > 0)
2310 			return 0;
2311 
2312 		/* fall-through */
2313 	}
2314 
2315 	/* Try MSI */
2316 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2317 		ar_pci->num_msi_intrs = 1;
2318 		ret = pci_enable_msi(ar_pci->pdev);
2319 		if (ret == 0)
2320 			return 0;
2321 
2322 		/* fall-through */
2323 	}
2324 
2325 	/* Try legacy irq
2326 	 *
2327 	 * A potential race occurs here: The CORE_BASE write
2328 	 * depends on target correctly decoding AXI address but
2329 	 * host won't know when target writes BAR to CORE_CTRL.
2330 	 * This write might get lost if target has NOT written BAR.
2331 	 * For now, fix the race by repeating the write in below
2332 	 * synchronization checking. */
2333 	ar_pci->num_msi_intrs = 0;
2334 
2335 	ret = ath10k_pci_wake(ar);
2336 	if (ret) {
2337 		ath10k_warn("failed to wake target: %d\n", ret);
2338 		return ret;
2339 	}
2340 
2341 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2342 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2343 	ath10k_pci_sleep(ar);
2344 
2345 	return 0;
2346 }
2347 
2348 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2349 {
2350 	int ret;
2351 
2352 	ret = ath10k_pci_wake(ar);
2353 	if (ret) {
2354 		ath10k_warn("failed to wake target: %d\n", ret);
2355 		return ret;
2356 	}
2357 
2358 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2359 			   0);
2360 	ath10k_pci_sleep(ar);
2361 
2362 	return 0;
2363 }
2364 
2365 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2366 {
2367 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2368 
2369 	switch (ar_pci->num_msi_intrs) {
2370 	case 0:
2371 		return ath10k_pci_deinit_irq_legacy(ar);
2372 	case 1:
2373 		/* fall-through */
2374 	case MSI_NUM_REQUEST:
2375 		pci_disable_msi(ar_pci->pdev);
2376 		return 0;
2377 	default:
2378 		pci_disable_msi(ar_pci->pdev);
2379 	}
2380 
2381 	ath10k_warn("unknown irq configuration upon deinit\n");
2382 	return -EINVAL;
2383 }
2384 
2385 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2386 {
2387 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2388 	int wait_limit = 300; /* 3 sec */
2389 	int ret;
2390 
2391 	ret = ath10k_pci_wake(ar);
2392 	if (ret) {
2393 		ath10k_err("failed to wake up target: %d\n", ret);
2394 		return ret;
2395 	}
2396 
2397 	while (wait_limit-- &&
2398 	       !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2399 		 FW_IND_INITIALIZED)) {
2400 		if (ar_pci->num_msi_intrs == 0)
2401 			/* Fix potential race by repeating CORE_BASE writes */
2402 			iowrite32(PCIE_INTR_FIRMWARE_MASK |
2403 				  PCIE_INTR_CE_MASK_ALL,
2404 				  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2405 						 PCIE_INTR_ENABLE_ADDRESS));
2406 		mdelay(10);
2407 	}
2408 
2409 	if (wait_limit < 0) {
2410 		ath10k_err("target stalled\n");
2411 		ret = -EIO;
2412 		goto out;
2413 	}
2414 
2415 out:
2416 	ath10k_pci_sleep(ar);
2417 	return ret;
2418 }
2419 
2420 static int ath10k_pci_cold_reset(struct ath10k *ar)
2421 {
2422 	int i, ret;
2423 	u32 val;
2424 
2425 	ret = ath10k_do_pci_wake(ar);
2426 	if (ret) {
2427 		ath10k_err("failed to wake up target: %d\n",
2428 			   ret);
2429 		return ret;
2430 	}
2431 
2432 	/* Put Target, including PCIe, into RESET. */
2433 	val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2434 	val |= 1;
2435 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2436 
2437 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2438 		if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2439 					  RTC_STATE_COLD_RESET_MASK)
2440 			break;
2441 		msleep(1);
2442 	}
2443 
2444 	/* Pull Target, including PCIe, out of RESET. */
2445 	val &= ~1;
2446 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2447 
2448 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2449 		if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2450 					    RTC_STATE_COLD_RESET_MASK))
2451 			break;
2452 		msleep(1);
2453 	}
2454 
2455 	ath10k_do_pci_sleep(ar);
2456 	return 0;
2457 }
2458 
2459 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2460 {
2461 	int i;
2462 
2463 	for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2464 		if (!test_bit(i, ar_pci->features))
2465 			continue;
2466 
2467 		switch (i) {
2468 		case ATH10K_PCI_FEATURE_MSI_X:
2469 			ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2470 			break;
2471 		case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2472 			ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2473 			break;
2474 		}
2475 	}
2476 }
2477 
2478 static int ath10k_pci_probe(struct pci_dev *pdev,
2479 			    const struct pci_device_id *pci_dev)
2480 {
2481 	void __iomem *mem;
2482 	int ret = 0;
2483 	struct ath10k *ar;
2484 	struct ath10k_pci *ar_pci;
2485 	u32 lcr_val, chip_id;
2486 
2487 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2488 
2489 	ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2490 	if (ar_pci == NULL)
2491 		return -ENOMEM;
2492 
2493 	ar_pci->pdev = pdev;
2494 	ar_pci->dev = &pdev->dev;
2495 
2496 	switch (pci_dev->device) {
2497 	case QCA988X_2_0_DEVICE_ID:
2498 		set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2499 		break;
2500 	default:
2501 		ret = -ENODEV;
2502 		ath10k_err("Unknown device ID: %d\n", pci_dev->device);
2503 		goto err_ar_pci;
2504 	}
2505 
2506 	if (ath10k_target_ps)
2507 		set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2508 
2509 	ath10k_pci_dump_features(ar_pci);
2510 
2511 	ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2512 	if (!ar) {
2513 		ath10k_err("failed to create driver core\n");
2514 		ret = -EINVAL;
2515 		goto err_ar_pci;
2516 	}
2517 
2518 	ar_pci->ar = ar;
2519 	ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2520 	atomic_set(&ar_pci->keep_awake_count, 0);
2521 
2522 	pci_set_drvdata(pdev, ar);
2523 
2524 	/*
2525 	 * Without any knowledge of the Host, the Target may have been reset or
2526 	 * power cycled and its Config Space may no longer reflect the PCI
2527 	 * address space that was assigned earlier by the PCI infrastructure.
2528 	 * Refresh it now.
2529 	 */
2530 	ret = pci_assign_resource(pdev, BAR_NUM);
2531 	if (ret) {
2532 		ath10k_err("failed to assign PCI space: %d\n", ret);
2533 		goto err_ar;
2534 	}
2535 
2536 	ret = pci_enable_device(pdev);
2537 	if (ret) {
2538 		ath10k_err("failed to enable PCI device: %d\n", ret);
2539 		goto err_ar;
2540 	}
2541 
2542 	/* Request MMIO resources */
2543 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2544 	if (ret) {
2545 		ath10k_err("failed to request MMIO region: %d\n", ret);
2546 		goto err_device;
2547 	}
2548 
2549 	/*
2550 	 * Target structures have a limit of 32 bit DMA pointers.
2551 	 * DMA pointers can be wider than 32 bits by default on some systems.
2552 	 */
2553 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2554 	if (ret) {
2555 		ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2556 		goto err_region;
2557 	}
2558 
2559 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2560 	if (ret) {
2561 		ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2562 		goto err_region;
2563 	}
2564 
2565 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2566 	pci_set_master(pdev);
2567 
2568 	/*
2569 	 * Temporary FIX: disable ASPM
2570 	 * Will be removed after the OTP is programmed
2571 	 */
2572 	pci_read_config_dword(pdev, 0x80, &lcr_val);
2573 	pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2574 
2575 	/* Arrange for access to Target SoC registers. */
2576 	mem = pci_iomap(pdev, BAR_NUM, 0);
2577 	if (!mem) {
2578 		ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2579 		ret = -EIO;
2580 		goto err_master;
2581 	}
2582 
2583 	ar_pci->mem = mem;
2584 
2585 	spin_lock_init(&ar_pci->ce_lock);
2586 
2587 	ret = ath10k_do_pci_wake(ar);
2588 	if (ret) {
2589 		ath10k_err("Failed to get chip id: %d\n", ret);
2590 		goto err_iomap;
2591 	}
2592 
2593 	chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2594 
2595 	ath10k_do_pci_sleep(ar);
2596 
2597 	ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2598 
2599 	ret = ath10k_core_register(ar, chip_id);
2600 	if (ret) {
2601 		ath10k_err("failed to register driver core: %d\n", ret);
2602 		goto err_iomap;
2603 	}
2604 
2605 	return 0;
2606 
2607 err_iomap:
2608 	pci_iounmap(pdev, mem);
2609 err_master:
2610 	pci_clear_master(pdev);
2611 err_region:
2612 	pci_release_region(pdev, BAR_NUM);
2613 err_device:
2614 	pci_disable_device(pdev);
2615 err_ar:
2616 	ath10k_core_destroy(ar);
2617 err_ar_pci:
2618 	/* call HIF PCI free here */
2619 	kfree(ar_pci);
2620 
2621 	return ret;
2622 }
2623 
2624 static void ath10k_pci_remove(struct pci_dev *pdev)
2625 {
2626 	struct ath10k *ar = pci_get_drvdata(pdev);
2627 	struct ath10k_pci *ar_pci;
2628 
2629 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2630 
2631 	if (!ar)
2632 		return;
2633 
2634 	ar_pci = ath10k_pci_priv(ar);
2635 
2636 	if (!ar_pci)
2637 		return;
2638 
2639 	tasklet_kill(&ar_pci->msi_fw_err);
2640 
2641 	ath10k_core_unregister(ar);
2642 
2643 	pci_iounmap(pdev, ar_pci->mem);
2644 	pci_release_region(pdev, BAR_NUM);
2645 	pci_clear_master(pdev);
2646 	pci_disable_device(pdev);
2647 
2648 	ath10k_core_destroy(ar);
2649 	kfree(ar_pci);
2650 }
2651 
2652 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2653 
2654 static struct pci_driver ath10k_pci_driver = {
2655 	.name = "ath10k_pci",
2656 	.id_table = ath10k_pci_id_table,
2657 	.probe = ath10k_pci_probe,
2658 	.remove = ath10k_pci_remove,
2659 };
2660 
2661 static int __init ath10k_pci_init(void)
2662 {
2663 	int ret;
2664 
2665 	ret = pci_register_driver(&ath10k_pci_driver);
2666 	if (ret)
2667 		ath10k_err("failed to register PCI driver: %d\n", ret);
2668 
2669 	return ret;
2670 }
2671 module_init(ath10k_pci_init);
2672 
2673 static void __exit ath10k_pci_exit(void)
2674 {
2675 	pci_unregister_driver(&ath10k_pci_driver);
2676 }
2677 
2678 module_exit(ath10k_pci_exit);
2679 
2680 MODULE_AUTHOR("Qualcomm Atheros");
2681 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2682 MODULE_LICENSE("Dual BSD/GPL");
2683 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2684 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2685 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2686