xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/pci.c (revision ee8a99bd)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 
23 #include "core.h"
24 #include "debug.h"
25 
26 #include "targaddrs.h"
27 #include "bmi.h"
28 
29 #include "hif.h"
30 #include "htc.h"
31 
32 #include "ce.h"
33 #include "pci.h"
34 
35 unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38 
39 #define QCA988X_1_0_DEVICE_ID	(0xabcd)
40 #define QCA988X_2_0_DEVICE_ID	(0x003c)
41 
42 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43 	{ PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
44 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
45 	{0}
46 };
47 
48 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
49 				       u32 *data);
50 
51 static void ath10k_pci_process_ce(struct ath10k *ar);
52 static int ath10k_pci_post_rx(struct ath10k *ar);
53 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
54 					     int num);
55 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
56 static void ath10k_pci_stop_ce(struct ath10k *ar);
57 
58 static const struct ce_attr host_ce_config_wlan[] = {
59 	/* host->target HTC control and raw streams */
60 	{ /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
61 	/* could be moved to share CE3 */
62 	/* target->host HTT + HTC control */
63 	{ /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
64 	/* target->host WMI */
65 	{ /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
66 	/* host->target WMI */
67 	{ /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
68 	/* host->target HTT */
69 	{ /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
70 		    CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
71 	/* unused */
72 	{ /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
73 	/* Target autonomous hif_memcpy */
74 	{ /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
75 	/* ce_diag, the Diagnostic Window */
76 	{ /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
77 };
78 
79 /* Target firmware's Copy Engine configuration. */
80 static const struct ce_pipe_config target_ce_config_wlan[] = {
81 	/* host->target HTC control and raw streams */
82 	{ /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
83 	/* target->host HTT + HTC control */
84 	{ /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
85 	/* target->host WMI */
86 	{ /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
87 	/* host->target WMI */
88 	{ /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
89 	/* host->target HTT */
90 	{ /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
91 	/* NB: 50% of src nentries, since tx has 2 frags */
92 	/* unused */
93 	{ /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
94 	/* Reserved for target autonomous hif_memcpy */
95 	{ /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
96 	/* CE7 used only by Host */
97 };
98 
99 /*
100  * Diagnostic read/write access is provided for startup/config/debug usage.
101  * Caller must guarantee proper alignment, when applicable, and single user
102  * at any moment.
103  */
104 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
105 				    int nbytes)
106 {
107 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
108 	int ret = 0;
109 	u32 buf;
110 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
111 	unsigned int id;
112 	unsigned int flags;
113 	struct ce_state *ce_diag;
114 	/* Host buffer address in CE space */
115 	u32 ce_data;
116 	dma_addr_t ce_data_base = 0;
117 	void *data_buf = NULL;
118 	int i;
119 
120 	/*
121 	 * This code cannot handle reads to non-memory space. Redirect to the
122 	 * register read fn but preserve the multi word read capability of
123 	 * this fn
124 	 */
125 	if (address < DRAM_BASE_ADDRESS) {
126 		if (!IS_ALIGNED(address, 4) ||
127 		    !IS_ALIGNED((unsigned long)data, 4))
128 			return -EIO;
129 
130 		while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
131 					   ar, address, (u32 *)data)) == 0)) {
132 			nbytes -= sizeof(u32);
133 			address += sizeof(u32);
134 			data += sizeof(u32);
135 		}
136 		return ret;
137 	}
138 
139 	ce_diag = ar_pci->ce_diag;
140 
141 	/*
142 	 * Allocate a temporary bounce buffer to hold caller's data
143 	 * to be DMA'ed from Target. This guarantees
144 	 *   1) 4-byte alignment
145 	 *   2) Buffer in DMA-able space
146 	 */
147 	orig_nbytes = nbytes;
148 	data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
149 							 orig_nbytes,
150 							 &ce_data_base);
151 
152 	if (!data_buf) {
153 		ret = -ENOMEM;
154 		goto done;
155 	}
156 	memset(data_buf, 0, orig_nbytes);
157 
158 	remaining_bytes = orig_nbytes;
159 	ce_data = ce_data_base;
160 	while (remaining_bytes) {
161 		nbytes = min_t(unsigned int, remaining_bytes,
162 			       DIAG_TRANSFER_LIMIT);
163 
164 		ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
165 		if (ret != 0)
166 			goto done;
167 
168 		/* Request CE to send from Target(!) address to Host buffer */
169 		/*
170 		 * The address supplied by the caller is in the
171 		 * Target CPU virtual address space.
172 		 *
173 		 * In order to use this address with the diagnostic CE,
174 		 * convert it from Target CPU virtual address space
175 		 * to CE address space
176 		 */
177 		ath10k_pci_wake(ar);
178 		address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
179 						     address);
180 		ath10k_pci_sleep(ar);
181 
182 		ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
183 				 0);
184 		if (ret)
185 			goto done;
186 
187 		i = 0;
188 		while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
189 						     &completed_nbytes,
190 						     &id) != 0) {
191 			mdelay(1);
192 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
193 				ret = -EBUSY;
194 				goto done;
195 			}
196 		}
197 
198 		if (nbytes != completed_nbytes) {
199 			ret = -EIO;
200 			goto done;
201 		}
202 
203 		if (buf != (u32) address) {
204 			ret = -EIO;
205 			goto done;
206 		}
207 
208 		i = 0;
209 		while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
210 						     &completed_nbytes,
211 						     &id, &flags) != 0) {
212 			mdelay(1);
213 
214 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
215 				ret = -EBUSY;
216 				goto done;
217 			}
218 		}
219 
220 		if (nbytes != completed_nbytes) {
221 			ret = -EIO;
222 			goto done;
223 		}
224 
225 		if (buf != ce_data) {
226 			ret = -EIO;
227 			goto done;
228 		}
229 
230 		remaining_bytes -= nbytes;
231 		address += nbytes;
232 		ce_data += nbytes;
233 	}
234 
235 done:
236 	if (ret == 0) {
237 		/* Copy data from allocated DMA buf to caller's buf */
238 		WARN_ON_ONCE(orig_nbytes & 3);
239 		for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
240 			((u32 *)data)[i] =
241 				__le32_to_cpu(((__le32 *)data_buf)[i]);
242 		}
243 	} else
244 		ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
245 			   __func__, address);
246 
247 	if (data_buf)
248 		pci_free_consistent(ar_pci->pdev, orig_nbytes,
249 				    data_buf, ce_data_base);
250 
251 	return ret;
252 }
253 
254 /* Read 4-byte aligned data from Target memory or register */
255 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
256 				       u32 *data)
257 {
258 	/* Assume range doesn't cross this boundary */
259 	if (address >= DRAM_BASE_ADDRESS)
260 		return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
261 
262 	ath10k_pci_wake(ar);
263 	*data = ath10k_pci_read32(ar, address);
264 	ath10k_pci_sleep(ar);
265 	return 0;
266 }
267 
268 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
269 				     const void *data, int nbytes)
270 {
271 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
272 	int ret = 0;
273 	u32 buf;
274 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
275 	unsigned int id;
276 	unsigned int flags;
277 	struct ce_state *ce_diag;
278 	void *data_buf = NULL;
279 	u32 ce_data;	/* Host buffer address in CE space */
280 	dma_addr_t ce_data_base = 0;
281 	int i;
282 
283 	ce_diag = ar_pci->ce_diag;
284 
285 	/*
286 	 * Allocate a temporary bounce buffer to hold caller's data
287 	 * to be DMA'ed to Target. This guarantees
288 	 *   1) 4-byte alignment
289 	 *   2) Buffer in DMA-able space
290 	 */
291 	orig_nbytes = nbytes;
292 	data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
293 							 orig_nbytes,
294 							 &ce_data_base);
295 	if (!data_buf) {
296 		ret = -ENOMEM;
297 		goto done;
298 	}
299 
300 	/* Copy caller's data to allocated DMA buf */
301 	WARN_ON_ONCE(orig_nbytes & 3);
302 	for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
303 		((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
304 
305 	/*
306 	 * The address supplied by the caller is in the
307 	 * Target CPU virtual address space.
308 	 *
309 	 * In order to use this address with the diagnostic CE,
310 	 * convert it from
311 	 *    Target CPU virtual address space
312 	 * to
313 	 *    CE address space
314 	 */
315 	ath10k_pci_wake(ar);
316 	address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
317 	ath10k_pci_sleep(ar);
318 
319 	remaining_bytes = orig_nbytes;
320 	ce_data = ce_data_base;
321 	while (remaining_bytes) {
322 		/* FIXME: check cast */
323 		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
324 
325 		/* Set up to receive directly into Target(!) address */
326 		ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
327 		if (ret != 0)
328 			goto done;
329 
330 		/*
331 		 * Request CE to send caller-supplied data that
332 		 * was copied to bounce buffer to Target(!) address.
333 		 */
334 		ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
335 				     nbytes, 0, 0);
336 		if (ret != 0)
337 			goto done;
338 
339 		i = 0;
340 		while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
341 						     &completed_nbytes,
342 						     &id) != 0) {
343 			mdelay(1);
344 
345 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
346 				ret = -EBUSY;
347 				goto done;
348 			}
349 		}
350 
351 		if (nbytes != completed_nbytes) {
352 			ret = -EIO;
353 			goto done;
354 		}
355 
356 		if (buf != ce_data) {
357 			ret = -EIO;
358 			goto done;
359 		}
360 
361 		i = 0;
362 		while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
363 						     &completed_nbytes,
364 						     &id, &flags) != 0) {
365 			mdelay(1);
366 
367 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
368 				ret = -EBUSY;
369 				goto done;
370 			}
371 		}
372 
373 		if (nbytes != completed_nbytes) {
374 			ret = -EIO;
375 			goto done;
376 		}
377 
378 		if (buf != address) {
379 			ret = -EIO;
380 			goto done;
381 		}
382 
383 		remaining_bytes -= nbytes;
384 		address += nbytes;
385 		ce_data += nbytes;
386 	}
387 
388 done:
389 	if (data_buf) {
390 		pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
391 				    ce_data_base);
392 	}
393 
394 	if (ret != 0)
395 		ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
396 			   address);
397 
398 	return ret;
399 }
400 
401 /* Write 4B data to Target memory or register */
402 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
403 					u32 data)
404 {
405 	/* Assume range doesn't cross this boundary */
406 	if (address >= DRAM_BASE_ADDRESS)
407 		return ath10k_pci_diag_write_mem(ar, address, &data,
408 						 sizeof(u32));
409 
410 	ath10k_pci_wake(ar);
411 	ath10k_pci_write32(ar, address, data);
412 	ath10k_pci_sleep(ar);
413 	return 0;
414 }
415 
416 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
417 {
418 	void __iomem *mem = ath10k_pci_priv(ar)->mem;
419 	u32 val;
420 	val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
421 		       RTC_STATE_ADDRESS);
422 	return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
423 }
424 
425 static void ath10k_pci_wait(struct ath10k *ar)
426 {
427 	int n = 100;
428 
429 	while (n-- && !ath10k_pci_target_is_awake(ar))
430 		msleep(10);
431 
432 	if (n < 0)
433 		ath10k_warn("Unable to wakeup target\n");
434 }
435 
436 void ath10k_do_pci_wake(struct ath10k *ar)
437 {
438 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
439 	void __iomem *pci_addr = ar_pci->mem;
440 	int tot_delay = 0;
441 	int curr_delay = 5;
442 
443 	if (atomic_read(&ar_pci->keep_awake_count) == 0) {
444 		/* Force AWAKE */
445 		iowrite32(PCIE_SOC_WAKE_V_MASK,
446 			  pci_addr + PCIE_LOCAL_BASE_ADDRESS +
447 			  PCIE_SOC_WAKE_ADDRESS);
448 	}
449 	atomic_inc(&ar_pci->keep_awake_count);
450 
451 	if (ar_pci->verified_awake)
452 		return;
453 
454 	for (;;) {
455 		if (ath10k_pci_target_is_awake(ar)) {
456 			ar_pci->verified_awake = true;
457 			break;
458 		}
459 
460 		if (tot_delay > PCIE_WAKE_TIMEOUT) {
461 			ath10k_warn("target takes too long to wake up (awake count %d)\n",
462 				    atomic_read(&ar_pci->keep_awake_count));
463 			break;
464 		}
465 
466 		udelay(curr_delay);
467 		tot_delay += curr_delay;
468 
469 		if (curr_delay < 50)
470 			curr_delay += 5;
471 	}
472 }
473 
474 void ath10k_do_pci_sleep(struct ath10k *ar)
475 {
476 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
477 	void __iomem *pci_addr = ar_pci->mem;
478 
479 	if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
480 		/* Allow sleep */
481 		ar_pci->verified_awake = false;
482 		iowrite32(PCIE_SOC_WAKE_RESET,
483 			  pci_addr + PCIE_LOCAL_BASE_ADDRESS +
484 			  PCIE_SOC_WAKE_ADDRESS);
485 	}
486 }
487 
488 /*
489  * FIXME: Handle OOM properly.
490  */
491 static inline
492 struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
493 {
494 	struct ath10k_pci_compl *compl = NULL;
495 
496 	spin_lock_bh(&pipe_info->pipe_lock);
497 	if (list_empty(&pipe_info->compl_free)) {
498 		ath10k_warn("Completion buffers are full\n");
499 		goto exit;
500 	}
501 	compl = list_first_entry(&pipe_info->compl_free,
502 				 struct ath10k_pci_compl, list);
503 	list_del(&compl->list);
504 exit:
505 	spin_unlock_bh(&pipe_info->pipe_lock);
506 	return compl;
507 }
508 
509 /* Called by lower (CE) layer when a send to Target completes. */
510 static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
511 				    void *transfer_context,
512 				    u32 ce_data,
513 				    unsigned int nbytes,
514 				    unsigned int transfer_id)
515 {
516 	struct ath10k *ar = ce_state->ar;
517 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
518 	struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
519 	struct ath10k_pci_compl *compl;
520 	bool process = false;
521 
522 	do {
523 		/*
524 		 * For the send completion of an item in sendlist, just
525 		 * increment num_sends_allowed. The upper layer callback will
526 		 * be triggered when last fragment is done with send.
527 		 */
528 		if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
529 			spin_lock_bh(&pipe_info->pipe_lock);
530 			pipe_info->num_sends_allowed++;
531 			spin_unlock_bh(&pipe_info->pipe_lock);
532 			continue;
533 		}
534 
535 		compl = get_free_compl(pipe_info);
536 		if (!compl)
537 			break;
538 
539 		compl->send_or_recv = HIF_CE_COMPLETE_SEND;
540 		compl->ce_state = ce_state;
541 		compl->pipe_info = pipe_info;
542 		compl->transfer_context = transfer_context;
543 		compl->nbytes = nbytes;
544 		compl->transfer_id = transfer_id;
545 		compl->flags = 0;
546 
547 		/*
548 		 * Add the completion to the processing queue.
549 		 */
550 		spin_lock_bh(&ar_pci->compl_lock);
551 		list_add_tail(&compl->list, &ar_pci->compl_process);
552 		spin_unlock_bh(&ar_pci->compl_lock);
553 
554 		process = true;
555 	} while (ath10k_ce_completed_send_next(ce_state,
556 							   &transfer_context,
557 							   &ce_data, &nbytes,
558 							   &transfer_id) == 0);
559 
560 	/*
561 	 * If only some of the items within a sendlist have completed,
562 	 * don't invoke completion processing until the entire sendlist
563 	 * has been sent.
564 	 */
565 	if (!process)
566 		return;
567 
568 	ath10k_pci_process_ce(ar);
569 }
570 
571 /* Called by lower (CE) layer when data is received from the Target. */
572 static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
573 				    void *transfer_context, u32 ce_data,
574 				    unsigned int nbytes,
575 				    unsigned int transfer_id,
576 				    unsigned int flags)
577 {
578 	struct ath10k *ar = ce_state->ar;
579 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
580 	struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
581 	struct ath10k_pci_compl *compl;
582 	struct sk_buff *skb;
583 
584 	do {
585 		compl = get_free_compl(pipe_info);
586 		if (!compl)
587 			break;
588 
589 		compl->send_or_recv = HIF_CE_COMPLETE_RECV;
590 		compl->ce_state = ce_state;
591 		compl->pipe_info = pipe_info;
592 		compl->transfer_context = transfer_context;
593 		compl->nbytes = nbytes;
594 		compl->transfer_id = transfer_id;
595 		compl->flags = flags;
596 
597 		skb = transfer_context;
598 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
599 				 skb->len + skb_tailroom(skb),
600 				 DMA_FROM_DEVICE);
601 		/*
602 		 * Add the completion to the processing queue.
603 		 */
604 		spin_lock_bh(&ar_pci->compl_lock);
605 		list_add_tail(&compl->list, &ar_pci->compl_process);
606 		spin_unlock_bh(&ar_pci->compl_lock);
607 
608 	} while (ath10k_ce_completed_recv_next(ce_state,
609 							   &transfer_context,
610 							   &ce_data, &nbytes,
611 							   &transfer_id,
612 							   &flags) == 0);
613 
614 	ath10k_pci_process_ce(ar);
615 }
616 
617 /* Send the first nbytes bytes of the buffer */
618 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
619 				    unsigned int transfer_id,
620 				    unsigned int bytes, struct sk_buff *nbuf)
621 {
622 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
623 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
624 	struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
625 	struct ce_state *ce_hdl = pipe_info->ce_hdl;
626 	struct ce_sendlist sendlist;
627 	unsigned int len;
628 	u32 flags = 0;
629 	int ret;
630 
631 	memset(&sendlist, 0, sizeof(struct ce_sendlist));
632 
633 	len = min(bytes, nbuf->len);
634 	bytes -= len;
635 
636 	if (len & 3)
637 		ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
638 
639 	ath10k_dbg(ATH10K_DBG_PCI,
640 		   "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
641 		   nbuf->data, (unsigned long long) skb_cb->paddr,
642 		   nbuf->len, len);
643 	ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
644 			"ath10k tx: data: ",
645 			nbuf->data, nbuf->len);
646 
647 	ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
648 
649 	/* Make sure we have resources to handle this request */
650 	spin_lock_bh(&pipe_info->pipe_lock);
651 	if (!pipe_info->num_sends_allowed) {
652 		ath10k_warn("Pipe: %d is full\n", pipe_id);
653 		spin_unlock_bh(&pipe_info->pipe_lock);
654 		return -ENOSR;
655 	}
656 	pipe_info->num_sends_allowed--;
657 	spin_unlock_bh(&pipe_info->pipe_lock);
658 
659 	ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
660 	if (ret)
661 		ath10k_warn("CE send failed: %p\n", nbuf);
662 
663 	return ret;
664 }
665 
666 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
667 {
668 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
669 	struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
670 	int ret;
671 
672 	spin_lock_bh(&pipe_info->pipe_lock);
673 	ret = pipe_info->num_sends_allowed;
674 	spin_unlock_bh(&pipe_info->pipe_lock);
675 
676 	return ret;
677 }
678 
679 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
680 {
681 	u32 reg_dump_area = 0;
682 	u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
683 	u32 host_addr;
684 	int ret;
685 	u32 i;
686 
687 	ath10k_err("firmware crashed!\n");
688 	ath10k_err("hardware name %s version 0x%x\n",
689 		   ar->hw_params.name, ar->target_version);
690 	ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
691 		   ar->fw_version_minor, ar->fw_version_release,
692 		   ar->fw_version_build);
693 
694 	host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
695 	if (ath10k_pci_diag_read_mem(ar, host_addr,
696 				     &reg_dump_area, sizeof(u32)) != 0) {
697 		ath10k_warn("could not read hi_failure_state\n");
698 		return;
699 	}
700 
701 	ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
702 
703 	ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
704 				       &reg_dump_values[0],
705 				       REG_DUMP_COUNT_QCA988X * sizeof(u32));
706 	if (ret != 0) {
707 		ath10k_err("could not dump FW Dump Area\n");
708 		return;
709 	}
710 
711 	BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
712 
713 	ath10k_err("target Register Dump\n");
714 	for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
715 		ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
716 			   i,
717 			   reg_dump_values[i],
718 			   reg_dump_values[i + 1],
719 			   reg_dump_values[i + 2],
720 			   reg_dump_values[i + 3]);
721 }
722 
723 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
724 					       int force)
725 {
726 	if (!force) {
727 		int resources;
728 		/*
729 		 * Decide whether to actually poll for completions, or just
730 		 * wait for a later chance.
731 		 * If there seem to be plenty of resources left, then just wait
732 		 * since checking involves reading a CE register, which is a
733 		 * relatively expensive operation.
734 		 */
735 		resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
736 
737 		/*
738 		 * If at least 50% of the total resources are still available,
739 		 * don't bother checking again yet.
740 		 */
741 		if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
742 			return;
743 	}
744 	ath10k_ce_per_engine_service(ar, pipe);
745 }
746 
747 static void ath10k_pci_hif_post_init(struct ath10k *ar,
748 				     struct ath10k_hif_cb *callbacks)
749 {
750 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
751 
752 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
753 
754 	memcpy(&ar_pci->msg_callbacks_current, callbacks,
755 	       sizeof(ar_pci->msg_callbacks_current));
756 }
757 
758 static int ath10k_pci_start_ce(struct ath10k *ar)
759 {
760 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
761 	struct ce_state *ce_diag = ar_pci->ce_diag;
762 	const struct ce_attr *attr;
763 	struct hif_ce_pipe_info *pipe_info;
764 	struct ath10k_pci_compl *compl;
765 	int i, pipe_num, completions, disable_interrupts;
766 
767 	spin_lock_init(&ar_pci->compl_lock);
768 	INIT_LIST_HEAD(&ar_pci->compl_process);
769 
770 	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
771 		pipe_info = &ar_pci->pipe_info[pipe_num];
772 
773 		spin_lock_init(&pipe_info->pipe_lock);
774 		INIT_LIST_HEAD(&pipe_info->compl_free);
775 
776 		/* Handle Diagnostic CE specially */
777 		if (pipe_info->ce_hdl == ce_diag)
778 			continue;
779 
780 		attr = &host_ce_config_wlan[pipe_num];
781 		completions = 0;
782 
783 		if (attr->src_nentries) {
784 			disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
785 			ath10k_ce_send_cb_register(pipe_info->ce_hdl,
786 						   ath10k_pci_ce_send_done,
787 						   disable_interrupts);
788 			completions += attr->src_nentries;
789 			pipe_info->num_sends_allowed = attr->src_nentries - 1;
790 		}
791 
792 		if (attr->dest_nentries) {
793 			ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
794 						   ath10k_pci_ce_recv_data);
795 			completions += attr->dest_nentries;
796 		}
797 
798 		if (completions == 0)
799 			continue;
800 
801 		for (i = 0; i < completions; i++) {
802 			compl = kmalloc(sizeof(struct ath10k_pci_compl),
803 					GFP_KERNEL);
804 			if (!compl) {
805 				ath10k_warn("No memory for completion state\n");
806 				ath10k_pci_stop_ce(ar);
807 				return -ENOMEM;
808 			}
809 
810 			compl->send_or_recv = HIF_CE_COMPLETE_FREE;
811 			list_add_tail(&compl->list, &pipe_info->compl_free);
812 		}
813 	}
814 
815 	return 0;
816 }
817 
818 static void ath10k_pci_stop_ce(struct ath10k *ar)
819 {
820 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
821 	struct ath10k_pci_compl *compl;
822 	struct sk_buff *skb;
823 	int i;
824 
825 	ath10k_ce_disable_interrupts(ar);
826 
827 	/* Cancel the pending tasklet */
828 	tasklet_kill(&ar_pci->intr_tq);
829 
830 	for (i = 0; i < CE_COUNT; i++)
831 		tasklet_kill(&ar_pci->pipe_info[i].intr);
832 
833 	/* Mark pending completions as aborted, so that upper layers free up
834 	 * their associated resources */
835 	spin_lock_bh(&ar_pci->compl_lock);
836 	list_for_each_entry(compl, &ar_pci->compl_process, list) {
837 		skb = (struct sk_buff *)compl->transfer_context;
838 		ATH10K_SKB_CB(skb)->is_aborted = true;
839 	}
840 	spin_unlock_bh(&ar_pci->compl_lock);
841 }
842 
843 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
844 {
845 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
846 	struct ath10k_pci_compl *compl, *tmp;
847 	struct hif_ce_pipe_info *pipe_info;
848 	struct sk_buff *netbuf;
849 	int pipe_num;
850 
851 	/* Free pending completions. */
852 	spin_lock_bh(&ar_pci->compl_lock);
853 	if (!list_empty(&ar_pci->compl_process))
854 		ath10k_warn("pending completions still present! possible memory leaks.\n");
855 
856 	list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
857 		list_del(&compl->list);
858 		netbuf = (struct sk_buff *)compl->transfer_context;
859 		dev_kfree_skb_any(netbuf);
860 		kfree(compl);
861 	}
862 	spin_unlock_bh(&ar_pci->compl_lock);
863 
864 	/* Free unused completions for each pipe. */
865 	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
866 		pipe_info = &ar_pci->pipe_info[pipe_num];
867 
868 		spin_lock_bh(&pipe_info->pipe_lock);
869 		list_for_each_entry_safe(compl, tmp,
870 					 &pipe_info->compl_free, list) {
871 			list_del(&compl->list);
872 			kfree(compl);
873 		}
874 		spin_unlock_bh(&pipe_info->pipe_lock);
875 	}
876 }
877 
878 static void ath10k_pci_process_ce(struct ath10k *ar)
879 {
880 	struct ath10k_pci *ar_pci = ar->hif.priv;
881 	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
882 	struct ath10k_pci_compl *compl;
883 	struct sk_buff *skb;
884 	unsigned int nbytes;
885 	int ret, send_done = 0;
886 
887 	/* Upper layers aren't ready to handle tx/rx completions in parallel so
888 	 * we must serialize all completion processing. */
889 
890 	spin_lock_bh(&ar_pci->compl_lock);
891 	if (ar_pci->compl_processing) {
892 		spin_unlock_bh(&ar_pci->compl_lock);
893 		return;
894 	}
895 	ar_pci->compl_processing = true;
896 	spin_unlock_bh(&ar_pci->compl_lock);
897 
898 	for (;;) {
899 		spin_lock_bh(&ar_pci->compl_lock);
900 		if (list_empty(&ar_pci->compl_process)) {
901 			spin_unlock_bh(&ar_pci->compl_lock);
902 			break;
903 		}
904 		compl = list_first_entry(&ar_pci->compl_process,
905 					 struct ath10k_pci_compl, list);
906 		list_del(&compl->list);
907 		spin_unlock_bh(&ar_pci->compl_lock);
908 
909 		if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
910 			cb->tx_completion(ar,
911 					  compl->transfer_context,
912 					  compl->transfer_id);
913 			send_done = 1;
914 		} else {
915 			ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
916 			if (ret) {
917 				ath10k_warn("Unable to post recv buffer for pipe: %d\n",
918 					    compl->pipe_info->pipe_num);
919 				break;
920 			}
921 
922 			skb = (struct sk_buff *)compl->transfer_context;
923 			nbytes = compl->nbytes;
924 
925 			ath10k_dbg(ATH10K_DBG_PCI,
926 				   "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
927 				   skb, nbytes);
928 			ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
929 					"ath10k rx: ", skb->data, nbytes);
930 
931 			if (skb->len + skb_tailroom(skb) >= nbytes) {
932 				skb_trim(skb, 0);
933 				skb_put(skb, nbytes);
934 				cb->rx_completion(ar, skb,
935 						  compl->pipe_info->pipe_num);
936 			} else {
937 				ath10k_warn("rxed more than expected (nbytes %d, max %d)",
938 					    nbytes,
939 					    skb->len + skb_tailroom(skb));
940 			}
941 		}
942 
943 		compl->send_or_recv = HIF_CE_COMPLETE_FREE;
944 
945 		/*
946 		 * Add completion back to the pipe's free list.
947 		 */
948 		spin_lock_bh(&compl->pipe_info->pipe_lock);
949 		list_add_tail(&compl->list, &compl->pipe_info->compl_free);
950 		compl->pipe_info->num_sends_allowed += send_done;
951 		spin_unlock_bh(&compl->pipe_info->pipe_lock);
952 	}
953 
954 	spin_lock_bh(&ar_pci->compl_lock);
955 	ar_pci->compl_processing = false;
956 	spin_unlock_bh(&ar_pci->compl_lock);
957 }
958 
959 /* TODO - temporary mapping while we have too few CE's */
960 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
961 					      u16 service_id, u8 *ul_pipe,
962 					      u8 *dl_pipe, int *ul_is_polled,
963 					      int *dl_is_polled)
964 {
965 	int ret = 0;
966 
967 	/* polling for received messages not supported */
968 	*dl_is_polled = 0;
969 
970 	switch (service_id) {
971 	case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
972 		/*
973 		 * Host->target HTT gets its own pipe, so it can be polled
974 		 * while other pipes are interrupt driven.
975 		 */
976 		*ul_pipe = 4;
977 		/*
978 		 * Use the same target->host pipe for HTC ctrl, HTC raw
979 		 * streams, and HTT.
980 		 */
981 		*dl_pipe = 1;
982 		break;
983 
984 	case ATH10K_HTC_SVC_ID_RSVD_CTRL:
985 	case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
986 		/*
987 		 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
988 		 * HTC_CTRL_RSVD_SVC could share the same pipe as the
989 		 * WMI services.  So, if another CE is needed, change
990 		 * this to *ul_pipe = 3, which frees up CE 0.
991 		 */
992 		/* *ul_pipe = 3; */
993 		*ul_pipe = 0;
994 		*dl_pipe = 1;
995 		break;
996 
997 	case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
998 	case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
999 	case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1000 	case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1001 
1002 	case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1003 		*ul_pipe = 3;
1004 		*dl_pipe = 2;
1005 		break;
1006 
1007 		/* pipe 5 unused   */
1008 		/* pipe 6 reserved */
1009 		/* pipe 7 reserved */
1010 
1011 	default:
1012 		ret = -1;
1013 		break;
1014 	}
1015 	*ul_is_polled =
1016 		(host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1017 
1018 	return ret;
1019 }
1020 
1021 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1022 						u8 *ul_pipe, u8 *dl_pipe)
1023 {
1024 	int ul_is_polled, dl_is_polled;
1025 
1026 	(void)ath10k_pci_hif_map_service_to_pipe(ar,
1027 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1028 						 ul_pipe,
1029 						 dl_pipe,
1030 						 &ul_is_polled,
1031 						 &dl_is_polled);
1032 }
1033 
1034 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
1035 				   int num)
1036 {
1037 	struct ath10k *ar = pipe_info->hif_ce_state;
1038 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1039 	struct ce_state *ce_state = pipe_info->ce_hdl;
1040 	struct sk_buff *skb;
1041 	dma_addr_t ce_data;
1042 	int i, ret = 0;
1043 
1044 	if (pipe_info->buf_sz == 0)
1045 		return 0;
1046 
1047 	for (i = 0; i < num; i++) {
1048 		skb = dev_alloc_skb(pipe_info->buf_sz);
1049 		if (!skb) {
1050 			ath10k_warn("could not allocate skbuff for pipe %d\n",
1051 				    num);
1052 			ret = -ENOMEM;
1053 			goto err;
1054 		}
1055 
1056 		WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1057 
1058 		ce_data = dma_map_single(ar->dev, skb->data,
1059 					 skb->len + skb_tailroom(skb),
1060 					 DMA_FROM_DEVICE);
1061 
1062 		if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1063 			ath10k_warn("could not dma map skbuff\n");
1064 			dev_kfree_skb_any(skb);
1065 			ret = -EIO;
1066 			goto err;
1067 		}
1068 
1069 		ATH10K_SKB_CB(skb)->paddr = ce_data;
1070 
1071 		pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1072 					       pipe_info->buf_sz,
1073 					       PCI_DMA_FROMDEVICE);
1074 
1075 		ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1076 						 ce_data);
1077 		if (ret) {
1078 			ath10k_warn("could not enqueue to pipe %d (%d)\n",
1079 				    num, ret);
1080 			goto err;
1081 		}
1082 	}
1083 
1084 	return ret;
1085 
1086 err:
1087 	ath10k_pci_rx_pipe_cleanup(pipe_info);
1088 	return ret;
1089 }
1090 
1091 static int ath10k_pci_post_rx(struct ath10k *ar)
1092 {
1093 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1094 	struct hif_ce_pipe_info *pipe_info;
1095 	const struct ce_attr *attr;
1096 	int pipe_num, ret = 0;
1097 
1098 	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1099 		pipe_info = &ar_pci->pipe_info[pipe_num];
1100 		attr = &host_ce_config_wlan[pipe_num];
1101 
1102 		if (attr->dest_nentries == 0)
1103 			continue;
1104 
1105 		ret = ath10k_pci_post_rx_pipe(pipe_info,
1106 					      attr->dest_nentries - 1);
1107 		if (ret) {
1108 			ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1109 				    pipe_num);
1110 
1111 			for (; pipe_num >= 0; pipe_num--) {
1112 				pipe_info = &ar_pci->pipe_info[pipe_num];
1113 				ath10k_pci_rx_pipe_cleanup(pipe_info);
1114 			}
1115 			return ret;
1116 		}
1117 	}
1118 
1119 	return 0;
1120 }
1121 
1122 static int ath10k_pci_hif_start(struct ath10k *ar)
1123 {
1124 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1125 	int ret;
1126 
1127 	ret = ath10k_pci_start_ce(ar);
1128 	if (ret) {
1129 		ath10k_warn("could not start CE (%d)\n", ret);
1130 		return ret;
1131 	}
1132 
1133 	/* Post buffers once to start things off. */
1134 	ret = ath10k_pci_post_rx(ar);
1135 	if (ret) {
1136 		ath10k_warn("could not post rx pipes (%d)\n", ret);
1137 		return ret;
1138 	}
1139 
1140 	ar_pci->started = 1;
1141 	return 0;
1142 }
1143 
1144 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1145 {
1146 	struct ath10k *ar;
1147 	struct ath10k_pci *ar_pci;
1148 	struct ce_state *ce_hdl;
1149 	u32 buf_sz;
1150 	struct sk_buff *netbuf;
1151 	u32 ce_data;
1152 
1153 	buf_sz = pipe_info->buf_sz;
1154 
1155 	/* Unused Copy Engine */
1156 	if (buf_sz == 0)
1157 		return;
1158 
1159 	ar = pipe_info->hif_ce_state;
1160 	ar_pci = ath10k_pci_priv(ar);
1161 
1162 	if (!ar_pci->started)
1163 		return;
1164 
1165 	ce_hdl = pipe_info->ce_hdl;
1166 
1167 	while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1168 					  &ce_data) == 0) {
1169 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1170 				 netbuf->len + skb_tailroom(netbuf),
1171 				 DMA_FROM_DEVICE);
1172 		dev_kfree_skb_any(netbuf);
1173 	}
1174 }
1175 
1176 static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1177 {
1178 	struct ath10k *ar;
1179 	struct ath10k_pci *ar_pci;
1180 	struct ce_state *ce_hdl;
1181 	struct sk_buff *netbuf;
1182 	u32 ce_data;
1183 	unsigned int nbytes;
1184 	unsigned int id;
1185 	u32 buf_sz;
1186 
1187 	buf_sz = pipe_info->buf_sz;
1188 
1189 	/* Unused Copy Engine */
1190 	if (buf_sz == 0)
1191 		return;
1192 
1193 	ar = pipe_info->hif_ce_state;
1194 	ar_pci = ath10k_pci_priv(ar);
1195 
1196 	if (!ar_pci->started)
1197 		return;
1198 
1199 	ce_hdl = pipe_info->ce_hdl;
1200 
1201 	while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1202 					  &ce_data, &nbytes, &id) == 0) {
1203 		if (netbuf != CE_SENDLIST_ITEM_CTXT)
1204 			/*
1205 			 * Indicate the completion to higer layer to free
1206 			 * the buffer
1207 			 */
1208 			ATH10K_SKB_CB(netbuf)->is_aborted = true;
1209 			ar_pci->msg_callbacks_current.tx_completion(ar,
1210 								    netbuf,
1211 								    id);
1212 	}
1213 }
1214 
1215 /*
1216  * Cleanup residual buffers for device shutdown:
1217  *    buffers that were enqueued for receive
1218  *    buffers that were to be sent
1219  * Note: Buffers that had completed but which were
1220  * not yet processed are on a completion queue. They
1221  * are handled when the completion thread shuts down.
1222  */
1223 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1224 {
1225 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1226 	int pipe_num;
1227 
1228 	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1229 		struct hif_ce_pipe_info *pipe_info;
1230 
1231 		pipe_info = &ar_pci->pipe_info[pipe_num];
1232 		ath10k_pci_rx_pipe_cleanup(pipe_info);
1233 		ath10k_pci_tx_pipe_cleanup(pipe_info);
1234 	}
1235 }
1236 
1237 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1238 {
1239 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1240 	struct hif_ce_pipe_info *pipe_info;
1241 	int pipe_num;
1242 
1243 	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1244 		pipe_info = &ar_pci->pipe_info[pipe_num];
1245 		if (pipe_info->ce_hdl) {
1246 			ath10k_ce_deinit(pipe_info->ce_hdl);
1247 			pipe_info->ce_hdl = NULL;
1248 			pipe_info->buf_sz = 0;
1249 		}
1250 	}
1251 }
1252 
1253 static void ath10k_pci_hif_stop(struct ath10k *ar)
1254 {
1255 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1256 
1257 	ath10k_pci_stop_ce(ar);
1258 
1259 	/* At this point, asynchronous threads are stopped, the target should
1260 	 * not DMA nor interrupt. We process the leftovers and then free
1261 	 * everything else up. */
1262 
1263 	ath10k_pci_process_ce(ar);
1264 	ath10k_pci_cleanup_ce(ar);
1265 	ath10k_pci_buffer_cleanup(ar);
1266 	ath10k_pci_ce_deinit(ar);
1267 }
1268 
1269 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1270 					   void *req, u32 req_len,
1271 					   void *resp, u32 *resp_len)
1272 {
1273 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1274 	struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
1275 	struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
1276 	dma_addr_t req_paddr = 0;
1277 	dma_addr_t resp_paddr = 0;
1278 	struct bmi_xfer xfer = {};
1279 	void *treq, *tresp = NULL;
1280 	int ret = 0;
1281 
1282 	if (resp && !resp_len)
1283 		return -EINVAL;
1284 
1285 	if (resp && resp_len && *resp_len == 0)
1286 		return -EINVAL;
1287 
1288 	treq = kmemdup(req, req_len, GFP_KERNEL);
1289 	if (!treq)
1290 		return -ENOMEM;
1291 
1292 	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1293 	ret = dma_mapping_error(ar->dev, req_paddr);
1294 	if (ret)
1295 		goto err_dma;
1296 
1297 	if (resp && resp_len) {
1298 		tresp = kzalloc(*resp_len, GFP_KERNEL);
1299 		if (!tresp) {
1300 			ret = -ENOMEM;
1301 			goto err_req;
1302 		}
1303 
1304 		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1305 					    DMA_FROM_DEVICE);
1306 		ret = dma_mapping_error(ar->dev, resp_paddr);
1307 		if (ret)
1308 			goto err_req;
1309 
1310 		xfer.wait_for_resp = true;
1311 		xfer.resp_len = 0;
1312 
1313 		ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1314 	}
1315 
1316 	init_completion(&xfer.done);
1317 
1318 	ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1319 	if (ret)
1320 		goto err_resp;
1321 
1322 	ret = wait_for_completion_timeout(&xfer.done,
1323 					  BMI_COMMUNICATION_TIMEOUT_HZ);
1324 	if (ret <= 0) {
1325 		u32 unused_buffer;
1326 		unsigned int unused_nbytes;
1327 		unsigned int unused_id;
1328 
1329 		ret = -ETIMEDOUT;
1330 		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1331 					   &unused_nbytes, &unused_id);
1332 	} else {
1333 		/* non-zero means we did not time out */
1334 		ret = 0;
1335 	}
1336 
1337 err_resp:
1338 	if (resp) {
1339 		u32 unused_buffer;
1340 
1341 		ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1342 		dma_unmap_single(ar->dev, resp_paddr,
1343 				 *resp_len, DMA_FROM_DEVICE);
1344 	}
1345 err_req:
1346 	dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1347 
1348 	if (ret == 0 && resp_len) {
1349 		*resp_len = min(*resp_len, xfer.resp_len);
1350 		memcpy(resp, tresp, xfer.resp_len);
1351 	}
1352 err_dma:
1353 	kfree(treq);
1354 	kfree(tresp);
1355 
1356 	return ret;
1357 }
1358 
1359 static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1360 				     void *transfer_context,
1361 				     u32 data,
1362 				     unsigned int nbytes,
1363 				     unsigned int transfer_id)
1364 {
1365 	struct bmi_xfer *xfer = transfer_context;
1366 
1367 	if (xfer->wait_for_resp)
1368 		return;
1369 
1370 	complete(&xfer->done);
1371 }
1372 
1373 static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
1374 				     void *transfer_context,
1375 				     u32 data,
1376 				     unsigned int nbytes,
1377 				     unsigned int transfer_id,
1378 				     unsigned int flags)
1379 {
1380 	struct bmi_xfer *xfer = transfer_context;
1381 
1382 	if (!xfer->wait_for_resp) {
1383 		ath10k_warn("unexpected: BMI data received; ignoring\n");
1384 		return;
1385 	}
1386 
1387 	xfer->resp_len = nbytes;
1388 	complete(&xfer->done);
1389 }
1390 
1391 /*
1392  * Map from service/endpoint to Copy Engine.
1393  * This table is derived from the CE_PCI TABLE, above.
1394  * It is passed to the Target at startup for use by firmware.
1395  */
1396 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1397 	{
1398 		 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1399 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1400 		 3,
1401 	},
1402 	{
1403 		 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1404 		 PIPEDIR_IN,		/* in = DL = target -> host */
1405 		 2,
1406 	},
1407 	{
1408 		 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1409 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1410 		 3,
1411 	},
1412 	{
1413 		 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1414 		 PIPEDIR_IN,		/* in = DL = target -> host */
1415 		 2,
1416 	},
1417 	{
1418 		 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1419 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1420 		 3,
1421 	},
1422 	{
1423 		 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1424 		 PIPEDIR_IN,		/* in = DL = target -> host */
1425 		 2,
1426 	},
1427 	{
1428 		 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1429 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1430 		 3,
1431 	},
1432 	{
1433 		 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1434 		 PIPEDIR_IN,		/* in = DL = target -> host */
1435 		 2,
1436 	},
1437 	{
1438 		 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1439 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1440 		 3,
1441 	},
1442 	{
1443 		 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1444 		 PIPEDIR_IN,		/* in = DL = target -> host */
1445 		 2,
1446 	},
1447 	{
1448 		 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1449 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1450 		 0,		/* could be moved to 3 (share with WMI) */
1451 	},
1452 	{
1453 		 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1454 		 PIPEDIR_IN,		/* in = DL = target -> host */
1455 		 1,
1456 	},
1457 	{
1458 		 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,	/* not currently used */
1459 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1460 		 0,
1461 	},
1462 	{
1463 		 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,	/* not currently used */
1464 		 PIPEDIR_IN,		/* in = DL = target -> host */
1465 		 1,
1466 	},
1467 	{
1468 		 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1469 		 PIPEDIR_OUT,		/* out = UL = host -> target */
1470 		 4,
1471 	},
1472 	{
1473 		 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1474 		 PIPEDIR_IN,		/* in = DL = target -> host */
1475 		 1,
1476 	},
1477 
1478 	/* (Additions here) */
1479 
1480 	{				/* Must be last */
1481 		 0,
1482 		 0,
1483 		 0,
1484 	},
1485 };
1486 
1487 /*
1488  * Send an interrupt to the device to wake up the Target CPU
1489  * so it has an opportunity to notice any changed state.
1490  */
1491 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1492 {
1493 	int ret;
1494 	u32 core_ctrl;
1495 
1496 	ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1497 					      CORE_CTRL_ADDRESS,
1498 					  &core_ctrl);
1499 	if (ret) {
1500 		ath10k_warn("Unable to read core ctrl\n");
1501 		return ret;
1502 	}
1503 
1504 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1505 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1506 
1507 	ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1508 					       CORE_CTRL_ADDRESS,
1509 					   core_ctrl);
1510 	if (ret)
1511 		ath10k_warn("Unable to set interrupt mask\n");
1512 
1513 	return ret;
1514 }
1515 
1516 static int ath10k_pci_init_config(struct ath10k *ar)
1517 {
1518 	u32 interconnect_targ_addr;
1519 	u32 pcie_state_targ_addr = 0;
1520 	u32 pipe_cfg_targ_addr = 0;
1521 	u32 svc_to_pipe_map = 0;
1522 	u32 pcie_config_flags = 0;
1523 	u32 ealloc_value;
1524 	u32 ealloc_targ_addr;
1525 	u32 flag2_value;
1526 	u32 flag2_targ_addr;
1527 	int ret = 0;
1528 
1529 	/* Download to Target the CE Config and the service-to-CE map */
1530 	interconnect_targ_addr =
1531 		host_interest_item_address(HI_ITEM(hi_interconnect_state));
1532 
1533 	/* Supply Target-side CE configuration */
1534 	ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1535 					  &pcie_state_targ_addr);
1536 	if (ret != 0) {
1537 		ath10k_err("Failed to get pcie state addr: %d\n", ret);
1538 		return ret;
1539 	}
1540 
1541 	if (pcie_state_targ_addr == 0) {
1542 		ret = -EIO;
1543 		ath10k_err("Invalid pcie state addr\n");
1544 		return ret;
1545 	}
1546 
1547 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1548 					  offsetof(struct pcie_state,
1549 						   pipe_cfg_addr),
1550 					  &pipe_cfg_targ_addr);
1551 	if (ret != 0) {
1552 		ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1553 		return ret;
1554 	}
1555 
1556 	if (pipe_cfg_targ_addr == 0) {
1557 		ret = -EIO;
1558 		ath10k_err("Invalid pipe cfg addr\n");
1559 		return ret;
1560 	}
1561 
1562 	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1563 				 target_ce_config_wlan,
1564 				 sizeof(target_ce_config_wlan));
1565 
1566 	if (ret != 0) {
1567 		ath10k_err("Failed to write pipe cfg: %d\n", ret);
1568 		return ret;
1569 	}
1570 
1571 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1572 					  offsetof(struct pcie_state,
1573 						   svc_to_pipe_map),
1574 					  &svc_to_pipe_map);
1575 	if (ret != 0) {
1576 		ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1577 		return ret;
1578 	}
1579 
1580 	if (svc_to_pipe_map == 0) {
1581 		ret = -EIO;
1582 		ath10k_err("Invalid svc_to_pipe map\n");
1583 		return ret;
1584 	}
1585 
1586 	ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1587 				 target_service_to_ce_map_wlan,
1588 				 sizeof(target_service_to_ce_map_wlan));
1589 	if (ret != 0) {
1590 		ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1591 		return ret;
1592 	}
1593 
1594 	ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1595 					  offsetof(struct pcie_state,
1596 						   config_flags),
1597 					  &pcie_config_flags);
1598 	if (ret != 0) {
1599 		ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1600 		return ret;
1601 	}
1602 
1603 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1604 
1605 	ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1606 				 offsetof(struct pcie_state, config_flags),
1607 				 &pcie_config_flags,
1608 				 sizeof(pcie_config_flags));
1609 	if (ret != 0) {
1610 		ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1611 		return ret;
1612 	}
1613 
1614 	/* configure early allocation */
1615 	ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1616 
1617 	ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1618 	if (ret != 0) {
1619 		ath10k_err("Faile to get early alloc val: %d\n", ret);
1620 		return ret;
1621 	}
1622 
1623 	/* first bank is switched to IRAM */
1624 	ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1625 			 HI_EARLY_ALLOC_MAGIC_MASK);
1626 	ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1627 			 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1628 
1629 	ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1630 	if (ret != 0) {
1631 		ath10k_err("Failed to set early alloc val: %d\n", ret);
1632 		return ret;
1633 	}
1634 
1635 	/* Tell Target to proceed with initialization */
1636 	flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1637 
1638 	ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1639 	if (ret != 0) {
1640 		ath10k_err("Failed to get option val: %d\n", ret);
1641 		return ret;
1642 	}
1643 
1644 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1645 
1646 	ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1647 	if (ret != 0) {
1648 		ath10k_err("Failed to set option val: %d\n", ret);
1649 		return ret;
1650 	}
1651 
1652 	return 0;
1653 }
1654 
1655 
1656 
1657 static int ath10k_pci_ce_init(struct ath10k *ar)
1658 {
1659 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1660 	struct hif_ce_pipe_info *pipe_info;
1661 	const struct ce_attr *attr;
1662 	int pipe_num;
1663 
1664 	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1665 		pipe_info = &ar_pci->pipe_info[pipe_num];
1666 		pipe_info->pipe_num = pipe_num;
1667 		pipe_info->hif_ce_state = ar;
1668 		attr = &host_ce_config_wlan[pipe_num];
1669 
1670 		pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1671 		if (pipe_info->ce_hdl == NULL) {
1672 			ath10k_err("Unable to initialize CE for pipe: %d\n",
1673 				   pipe_num);
1674 
1675 			/* It is safe to call it here. It checks if ce_hdl is
1676 			 * valid for each pipe */
1677 			ath10k_pci_ce_deinit(ar);
1678 			return -1;
1679 		}
1680 
1681 		if (pipe_num == ar_pci->ce_count - 1) {
1682 			/*
1683 			 * Reserve the ultimate CE for
1684 			 * diagnostic Window support
1685 			 */
1686 			ar_pci->ce_diag =
1687 			ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1688 			continue;
1689 		}
1690 
1691 		pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1692 	}
1693 
1694 	/*
1695 	 * Initially, establish CE completion handlers for use with BMI.
1696 	 * These are overwritten with generic handlers after we exit BMI phase.
1697 	 */
1698 	pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1699 	ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1700 				   ath10k_pci_bmi_send_done, 0);
1701 
1702 	pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1703 	ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1704 				   ath10k_pci_bmi_recv_data);
1705 
1706 	return 0;
1707 }
1708 
1709 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1710 {
1711 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1712 	u32 fw_indicator_address, fw_indicator;
1713 
1714 	ath10k_pci_wake(ar);
1715 
1716 	fw_indicator_address = ar_pci->fw_indicator_address;
1717 	fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1718 
1719 	if (fw_indicator & FW_IND_EVENT_PENDING) {
1720 		/* ACK: clear Target-side pending event */
1721 		ath10k_pci_write32(ar, fw_indicator_address,
1722 				   fw_indicator & ~FW_IND_EVENT_PENDING);
1723 
1724 		if (ar_pci->started) {
1725 			ath10k_pci_hif_dump_area(ar);
1726 		} else {
1727 			/*
1728 			 * Probable Target failure before we're prepared
1729 			 * to handle it.  Generally unexpected.
1730 			 */
1731 			ath10k_warn("early firmware event indicated\n");
1732 		}
1733 	}
1734 
1735 	ath10k_pci_sleep(ar);
1736 }
1737 
1738 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1739 	.send_head		= ath10k_pci_hif_send_head,
1740 	.exchange_bmi_msg	= ath10k_pci_hif_exchange_bmi_msg,
1741 	.start			= ath10k_pci_hif_start,
1742 	.stop			= ath10k_pci_hif_stop,
1743 	.map_service_to_pipe	= ath10k_pci_hif_map_service_to_pipe,
1744 	.get_default_pipe	= ath10k_pci_hif_get_default_pipe,
1745 	.send_complete_check	= ath10k_pci_hif_send_complete_check,
1746 	.init			= ath10k_pci_hif_post_init,
1747 	.get_free_queue_number	= ath10k_pci_hif_get_free_queue_number,
1748 };
1749 
1750 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1751 {
1752 	struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
1753 	struct ath10k_pci *ar_pci = pipe->ar_pci;
1754 
1755 	ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1756 }
1757 
1758 static void ath10k_msi_err_tasklet(unsigned long data)
1759 {
1760 	struct ath10k *ar = (struct ath10k *)data;
1761 
1762 	ath10k_pci_fw_interrupt_handler(ar);
1763 }
1764 
1765 /*
1766  * Handler for a per-engine interrupt on a PARTICULAR CE.
1767  * This is used in cases where each CE has a private MSI interrupt.
1768  */
1769 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1770 {
1771 	struct ath10k *ar = arg;
1772 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1773 	int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1774 
1775 	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1776 		ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1777 		return IRQ_HANDLED;
1778 	}
1779 
1780 	/*
1781 	 * NOTE: We are able to derive ce_id from irq because we
1782 	 * use a one-to-one mapping for CE's 0..5.
1783 	 * CE's 6 & 7 do not use interrupts at all.
1784 	 *
1785 	 * This mapping must be kept in sync with the mapping
1786 	 * used by firmware.
1787 	 */
1788 	tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1789 	return IRQ_HANDLED;
1790 }
1791 
1792 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1793 {
1794 	struct ath10k *ar = arg;
1795 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1796 
1797 	tasklet_schedule(&ar_pci->msi_fw_err);
1798 	return IRQ_HANDLED;
1799 }
1800 
1801 /*
1802  * Top-level interrupt handler for all PCI interrupts from a Target.
1803  * When a block of MSI interrupts is allocated, this top-level handler
1804  * is not used; instead, we directly call the correct sub-handler.
1805  */
1806 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
1807 {
1808 	struct ath10k *ar = arg;
1809 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1810 
1811 	if (ar_pci->num_msi_intrs == 0) {
1812 		/*
1813 		 * IMPORTANT: INTR_CLR regiser has to be set after
1814 		 * INTR_ENABLE is set to 0, otherwise interrupt can not be
1815 		 * really cleared.
1816 		 */
1817 		iowrite32(0, ar_pci->mem +
1818 			  (SOC_CORE_BASE_ADDRESS |
1819 			   PCIE_INTR_ENABLE_ADDRESS));
1820 		iowrite32(PCIE_INTR_FIRMWARE_MASK |
1821 			  PCIE_INTR_CE_MASK_ALL,
1822 			  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1823 					 PCIE_INTR_CLR_ADDRESS));
1824 		/*
1825 		 * IMPORTANT: this extra read transaction is required to
1826 		 * flush the posted write buffer.
1827 		 */
1828 		(void) ioread32(ar_pci->mem +
1829 				(SOC_CORE_BASE_ADDRESS |
1830 				 PCIE_INTR_ENABLE_ADDRESS));
1831 	}
1832 
1833 	tasklet_schedule(&ar_pci->intr_tq);
1834 
1835 	return IRQ_HANDLED;
1836 }
1837 
1838 static void ath10k_pci_tasklet(unsigned long data)
1839 {
1840 	struct ath10k *ar = (struct ath10k *)data;
1841 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1842 
1843 	ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
1844 	ath10k_ce_per_engine_service_any(ar);
1845 
1846 	if (ar_pci->num_msi_intrs == 0) {
1847 		/* Enable Legacy PCI line interrupts */
1848 		iowrite32(PCIE_INTR_FIRMWARE_MASK |
1849 			  PCIE_INTR_CE_MASK_ALL,
1850 			  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1851 					 PCIE_INTR_ENABLE_ADDRESS));
1852 		/*
1853 		 * IMPORTANT: this extra read transaction is required to
1854 		 * flush the posted write buffer
1855 		 */
1856 		(void) ioread32(ar_pci->mem +
1857 				(SOC_CORE_BASE_ADDRESS |
1858 				 PCIE_INTR_ENABLE_ADDRESS));
1859 	}
1860 }
1861 
1862 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
1863 {
1864 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1865 	int ret;
1866 	int i;
1867 
1868 	ret = pci_enable_msi_block(ar_pci->pdev, num);
1869 	if (ret)
1870 		return ret;
1871 
1872 	ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
1873 			  ath10k_pci_msi_fw_handler,
1874 			  IRQF_SHARED, "ath10k_pci", ar);
1875 	if (ret)
1876 		return ret;
1877 
1878 	for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
1879 		ret = request_irq(ar_pci->pdev->irq + i,
1880 				  ath10k_pci_per_engine_handler,
1881 				  IRQF_SHARED, "ath10k_pci", ar);
1882 		if (ret) {
1883 			ath10k_warn("request_irq(%d) failed %d\n",
1884 				    ar_pci->pdev->irq + i, ret);
1885 
1886 			for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
1887 				free_irq(ar_pci->pdev->irq + i, ar);
1888 
1889 			free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
1890 			pci_disable_msi(ar_pci->pdev);
1891 			return ret;
1892 		}
1893 	}
1894 
1895 	ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
1896 	return 0;
1897 }
1898 
1899 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
1900 {
1901 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1902 	int ret;
1903 
1904 	ret = pci_enable_msi(ar_pci->pdev);
1905 	if (ret < 0)
1906 		return ret;
1907 
1908 	ret = request_irq(ar_pci->pdev->irq,
1909 			  ath10k_pci_interrupt_handler,
1910 			  IRQF_SHARED, "ath10k_pci", ar);
1911 	if (ret < 0) {
1912 		pci_disable_msi(ar_pci->pdev);
1913 		return ret;
1914 	}
1915 
1916 	ath10k_info("MSI interrupt handling\n");
1917 	return 0;
1918 }
1919 
1920 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
1921 {
1922 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1923 	int ret;
1924 
1925 	ret = request_irq(ar_pci->pdev->irq,
1926 			  ath10k_pci_interrupt_handler,
1927 			  IRQF_SHARED, "ath10k_pci", ar);
1928 	if (ret < 0)
1929 		return ret;
1930 
1931 	/*
1932 	 * Make sure to wake the Target before enabling Legacy
1933 	 * Interrupt.
1934 	 */
1935 	iowrite32(PCIE_SOC_WAKE_V_MASK,
1936 		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
1937 		  PCIE_SOC_WAKE_ADDRESS);
1938 
1939 	ath10k_pci_wait(ar);
1940 
1941 	/*
1942 	 * A potential race occurs here: The CORE_BASE write
1943 	 * depends on target correctly decoding AXI address but
1944 	 * host won't know when target writes BAR to CORE_CTRL.
1945 	 * This write might get lost if target has NOT written BAR.
1946 	 * For now, fix the race by repeating the write in below
1947 	 * synchronization checking.
1948 	 */
1949 	iowrite32(PCIE_INTR_FIRMWARE_MASK |
1950 		  PCIE_INTR_CE_MASK_ALL,
1951 		  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1952 				 PCIE_INTR_ENABLE_ADDRESS));
1953 	iowrite32(PCIE_SOC_WAKE_RESET,
1954 		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
1955 		  PCIE_SOC_WAKE_ADDRESS);
1956 
1957 	ath10k_info("legacy interrupt handling\n");
1958 	return 0;
1959 }
1960 
1961 static int ath10k_pci_start_intr(struct ath10k *ar)
1962 {
1963 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1964 	int num = MSI_NUM_REQUEST;
1965 	int ret;
1966 	int i;
1967 
1968 	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
1969 	tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
1970 		     (unsigned long) ar);
1971 
1972 	for (i = 0; i < CE_COUNT; i++) {
1973 		ar_pci->pipe_info[i].ar_pci = ar_pci;
1974 		tasklet_init(&ar_pci->pipe_info[i].intr,
1975 			     ath10k_pci_ce_tasklet,
1976 			     (unsigned long)&ar_pci->pipe_info[i]);
1977 	}
1978 
1979 	if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
1980 		num = 1;
1981 
1982 	if (num > 1) {
1983 		ret = ath10k_pci_start_intr_msix(ar, num);
1984 		if (ret == 0)
1985 			goto exit;
1986 
1987 		ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
1988 		num = 1;
1989 	}
1990 
1991 	if (num == 1) {
1992 		ret = ath10k_pci_start_intr_msi(ar);
1993 		if (ret == 0)
1994 			goto exit;
1995 
1996 		ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
1997 			    ret);
1998 		num = 0;
1999 	}
2000 
2001 	ret = ath10k_pci_start_intr_legacy(ar);
2002 
2003 exit:
2004 	ar_pci->num_msi_intrs = num;
2005 	ar_pci->ce_count = CE_COUNT;
2006 	return ret;
2007 }
2008 
2009 static void ath10k_pci_stop_intr(struct ath10k *ar)
2010 {
2011 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2012 	int i;
2013 
2014 	/* There's at least one interrupt irregardless whether its legacy INTR
2015 	 * or MSI or MSI-X */
2016 	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2017 		free_irq(ar_pci->pdev->irq + i, ar);
2018 
2019 	if (ar_pci->num_msi_intrs > 0)
2020 		pci_disable_msi(ar_pci->pdev);
2021 }
2022 
2023 static int ath10k_pci_reset_target(struct ath10k *ar)
2024 {
2025 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2026 	int wait_limit = 300; /* 3 sec */
2027 
2028 	/* Wait for Target to finish initialization before we proceed. */
2029 	iowrite32(PCIE_SOC_WAKE_V_MASK,
2030 		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2031 		  PCIE_SOC_WAKE_ADDRESS);
2032 
2033 	ath10k_pci_wait(ar);
2034 
2035 	while (wait_limit-- &&
2036 	       !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2037 		 FW_IND_INITIALIZED)) {
2038 		if (ar_pci->num_msi_intrs == 0)
2039 			/* Fix potential race by repeating CORE_BASE writes */
2040 			iowrite32(PCIE_INTR_FIRMWARE_MASK |
2041 				  PCIE_INTR_CE_MASK_ALL,
2042 				  ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2043 						 PCIE_INTR_ENABLE_ADDRESS));
2044 		mdelay(10);
2045 	}
2046 
2047 	if (wait_limit < 0) {
2048 		ath10k_err("Target stalled\n");
2049 		iowrite32(PCIE_SOC_WAKE_RESET,
2050 			  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2051 			  PCIE_SOC_WAKE_ADDRESS);
2052 		return -EIO;
2053 	}
2054 
2055 	iowrite32(PCIE_SOC_WAKE_RESET,
2056 		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2057 		  PCIE_SOC_WAKE_ADDRESS);
2058 
2059 	return 0;
2060 }
2061 
2062 static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci)
2063 {
2064 	struct ath10k *ar = ar_pci->ar;
2065 	void __iomem *mem = ar_pci->mem;
2066 	int i;
2067 	u32 val;
2068 
2069 	if (!SOC_GLOBAL_RESET_ADDRESS)
2070 		return;
2071 
2072 	if (!mem)
2073 		return;
2074 
2075 	ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2076 			       PCIE_SOC_WAKE_V_MASK);
2077 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2078 		if (ath10k_pci_target_is_awake(ar))
2079 			break;
2080 		msleep(1);
2081 	}
2082 
2083 	/* Put Target, including PCIe, into RESET. */
2084 	val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2085 	val |= 1;
2086 	ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2087 
2088 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2089 		if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2090 					  RTC_STATE_COLD_RESET_MASK)
2091 			break;
2092 		msleep(1);
2093 	}
2094 
2095 	/* Pull Target, including PCIe, out of RESET. */
2096 	val &= ~1;
2097 	ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2098 
2099 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2100 		if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2101 					    RTC_STATE_COLD_RESET_MASK))
2102 			break;
2103 		msleep(1);
2104 	}
2105 
2106 	ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2107 }
2108 
2109 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2110 {
2111 	int i;
2112 
2113 	for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2114 		if (!test_bit(i, ar_pci->features))
2115 			continue;
2116 
2117 		switch (i) {
2118 		case ATH10K_PCI_FEATURE_MSI_X:
2119 			ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2120 			break;
2121 		case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND:
2122 			ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2123 			break;
2124 		}
2125 	}
2126 }
2127 
2128 static int ath10k_pci_probe(struct pci_dev *pdev,
2129 			    const struct pci_device_id *pci_dev)
2130 {
2131 	void __iomem *mem;
2132 	int ret = 0;
2133 	struct ath10k *ar;
2134 	struct ath10k_pci *ar_pci;
2135 	u32 lcr_val;
2136 
2137 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2138 
2139 	ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2140 	if (ar_pci == NULL)
2141 		return -ENOMEM;
2142 
2143 	ar_pci->pdev = pdev;
2144 	ar_pci->dev = &pdev->dev;
2145 
2146 	switch (pci_dev->device) {
2147 	case QCA988X_1_0_DEVICE_ID:
2148 		set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features);
2149 		break;
2150 	case QCA988X_2_0_DEVICE_ID:
2151 		set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2152 		break;
2153 	default:
2154 		ret = -ENODEV;
2155 		ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2156 		goto err_ar_pci;
2157 	}
2158 
2159 	ath10k_pci_dump_features(ar_pci);
2160 
2161 	ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI,
2162 				&ath10k_pci_hif_ops);
2163 	if (!ar) {
2164 		ath10k_err("ath10k_core_create failed!\n");
2165 		ret = -EINVAL;
2166 		goto err_ar_pci;
2167 	}
2168 
2169 	/* Enable QCA988X_1.0 HW workarounds */
2170 	if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features))
2171 		spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2172 
2173 	ar_pci->ar = ar;
2174 	ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2175 	atomic_set(&ar_pci->keep_awake_count, 0);
2176 
2177 	pci_set_drvdata(pdev, ar);
2178 
2179 	/*
2180 	 * Without any knowledge of the Host, the Target may have been reset or
2181 	 * power cycled and its Config Space may no longer reflect the PCI
2182 	 * address space that was assigned earlier by the PCI infrastructure.
2183 	 * Refresh it now.
2184 	 */
2185 	ret = pci_assign_resource(pdev, BAR_NUM);
2186 	if (ret) {
2187 		ath10k_err("cannot assign PCI space: %d\n", ret);
2188 		goto err_ar;
2189 	}
2190 
2191 	ret = pci_enable_device(pdev);
2192 	if (ret) {
2193 		ath10k_err("cannot enable PCI device: %d\n", ret);
2194 		goto err_ar;
2195 	}
2196 
2197 	/* Request MMIO resources */
2198 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2199 	if (ret) {
2200 		ath10k_err("PCI MMIO reservation error: %d\n", ret);
2201 		goto err_device;
2202 	}
2203 
2204 	/*
2205 	 * Target structures have a limit of 32 bit DMA pointers.
2206 	 * DMA pointers can be wider than 32 bits by default on some systems.
2207 	 */
2208 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2209 	if (ret) {
2210 		ath10k_err("32-bit DMA not available: %d\n", ret);
2211 		goto err_region;
2212 	}
2213 
2214 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2215 	if (ret) {
2216 		ath10k_err("cannot enable 32-bit consistent DMA\n");
2217 		goto err_region;
2218 	}
2219 
2220 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2221 	pci_set_master(pdev);
2222 
2223 	/*
2224 	 * Temporary FIX: disable ASPM
2225 	 * Will be removed after the OTP is programmed
2226 	 */
2227 	pci_read_config_dword(pdev, 0x80, &lcr_val);
2228 	pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2229 
2230 	/* Arrange for access to Target SoC registers. */
2231 	mem = pci_iomap(pdev, BAR_NUM, 0);
2232 	if (!mem) {
2233 		ath10k_err("PCI iomap error\n");
2234 		ret = -EIO;
2235 		goto err_master;
2236 	}
2237 
2238 	ar_pci->mem = mem;
2239 
2240 	spin_lock_init(&ar_pci->ce_lock);
2241 
2242 	ar_pci->cacheline_sz = dma_get_cache_alignment();
2243 
2244 	ret = ath10k_pci_start_intr(ar);
2245 	if (ret) {
2246 		ath10k_err("could not start interrupt handling (%d)\n", ret);
2247 		goto err_iomap;
2248 	}
2249 
2250 	/*
2251 	 * Bring the target up cleanly.
2252 	 *
2253 	 * The target may be in an undefined state with an AUX-powered Target
2254 	 * and a Host in WoW mode. If the Host crashes, loses power, or is
2255 	 * restarted (without unloading the driver) then the Target is left
2256 	 * (aux) powered and running. On a subsequent driver load, the Target
2257 	 * is in an unexpected state. We try to catch that here in order to
2258 	 * reset the Target and retry the probe.
2259 	 */
2260 	ath10k_pci_device_reset(ar_pci);
2261 
2262 	ret = ath10k_pci_reset_target(ar);
2263 	if (ret)
2264 		goto err_intr;
2265 
2266 	if (ath10k_target_ps) {
2267 		ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
2268 	} else {
2269 		/* Force AWAKE forever */
2270 		ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
2271 		ath10k_do_pci_wake(ar);
2272 	}
2273 
2274 	ret = ath10k_pci_ce_init(ar);
2275 	if (ret)
2276 		goto err_intr;
2277 
2278 	ret = ath10k_pci_init_config(ar);
2279 	if (ret)
2280 		goto err_ce;
2281 
2282 	ret = ath10k_pci_wake_target_cpu(ar);
2283 	if (ret) {
2284 		ath10k_err("could not wake up target CPU (%d)\n", ret);
2285 		goto err_ce;
2286 	}
2287 
2288 	ret = ath10k_core_register(ar);
2289 	if (ret) {
2290 		ath10k_err("could not register driver core (%d)\n", ret);
2291 		goto err_ce;
2292 	}
2293 
2294 	return 0;
2295 
2296 err_ce:
2297 	ath10k_pci_ce_deinit(ar);
2298 err_intr:
2299 	ath10k_pci_stop_intr(ar);
2300 err_iomap:
2301 	pci_iounmap(pdev, mem);
2302 err_master:
2303 	pci_clear_master(pdev);
2304 err_region:
2305 	pci_release_region(pdev, BAR_NUM);
2306 err_device:
2307 	pci_disable_device(pdev);
2308 err_ar:
2309 	pci_set_drvdata(pdev, NULL);
2310 	ath10k_core_destroy(ar);
2311 err_ar_pci:
2312 	/* call HIF PCI free here */
2313 	kfree(ar_pci);
2314 
2315 	return ret;
2316 }
2317 
2318 static void ath10k_pci_remove(struct pci_dev *pdev)
2319 {
2320 	struct ath10k *ar = pci_get_drvdata(pdev);
2321 	struct ath10k_pci *ar_pci;
2322 
2323 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2324 
2325 	if (!ar)
2326 		return;
2327 
2328 	ar_pci = ath10k_pci_priv(ar);
2329 
2330 	if (!ar_pci)
2331 		return;
2332 
2333 	tasklet_kill(&ar_pci->msi_fw_err);
2334 
2335 	ath10k_core_unregister(ar);
2336 	ath10k_pci_stop_intr(ar);
2337 
2338 	pci_set_drvdata(pdev, NULL);
2339 	pci_iounmap(pdev, ar_pci->mem);
2340 	pci_release_region(pdev, BAR_NUM);
2341 	pci_clear_master(pdev);
2342 	pci_disable_device(pdev);
2343 
2344 	ath10k_core_destroy(ar);
2345 	kfree(ar_pci);
2346 }
2347 
2348 #if defined(CONFIG_PM_SLEEP)
2349 
2350 #define ATH10K_PCI_PM_CONTROL 0x44
2351 
2352 static int ath10k_pci_suspend(struct device *device)
2353 {
2354 	struct pci_dev *pdev = to_pci_dev(device);
2355 	struct ath10k *ar = pci_get_drvdata(pdev);
2356 	struct ath10k_pci *ar_pci;
2357 	u32 val;
2358 	int ret, retval;
2359 
2360 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2361 
2362 	if (!ar)
2363 		return -ENODEV;
2364 
2365 	ar_pci = ath10k_pci_priv(ar);
2366 	if (!ar_pci)
2367 		return -ENODEV;
2368 
2369 	if (ath10k_core_target_suspend(ar))
2370 		return -EBUSY;
2371 
2372 	ret = wait_event_interruptible_timeout(ar->event_queue,
2373 						ar->is_target_paused == true,
2374 						1 * HZ);
2375 	if (ret < 0) {
2376 		ath10k_warn("suspend interrupted (%d)\n", ret);
2377 		retval = ret;
2378 		goto resume;
2379 	} else if (ret == 0) {
2380 		ath10k_warn("suspend timed out - target pause event never came\n");
2381 		retval = EIO;
2382 		goto resume;
2383 	}
2384 
2385 	/*
2386 	 * reset is_target_paused and host can check that in next time,
2387 	 * or it will always be TRUE and host just skip the waiting
2388 	 * condition, it causes target assert due to host already
2389 	 * suspend
2390 	 */
2391 	ar->is_target_paused = false;
2392 
2393 	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2394 
2395 	if ((val & 0x000000ff) != 0x3) {
2396 		pci_save_state(pdev);
2397 		pci_disable_device(pdev);
2398 		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2399 				       (val & 0xffffff00) | 0x03);
2400 	}
2401 
2402 	return 0;
2403 resume:
2404 	ret = ath10k_core_target_resume(ar);
2405 	if (ret)
2406 		ath10k_warn("could not resume (%d)\n", ret);
2407 
2408 	return retval;
2409 }
2410 
2411 static int ath10k_pci_resume(struct device *device)
2412 {
2413 	struct pci_dev *pdev = to_pci_dev(device);
2414 	struct ath10k *ar = pci_get_drvdata(pdev);
2415 	struct ath10k_pci *ar_pci;
2416 	int ret;
2417 	u32 val;
2418 
2419 	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2420 
2421 	if (!ar)
2422 		return -ENODEV;
2423 	ar_pci = ath10k_pci_priv(ar);
2424 
2425 	if (!ar_pci)
2426 		return -ENODEV;
2427 
2428 	ret = pci_enable_device(pdev);
2429 	if (ret) {
2430 		ath10k_warn("cannot enable PCI device: %d\n", ret);
2431 		return ret;
2432 	}
2433 
2434 	pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2435 
2436 	if ((val & 0x000000ff) != 0) {
2437 		pci_restore_state(pdev);
2438 		pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2439 				       val & 0xffffff00);
2440 		/*
2441 		 * Suspend/Resume resets the PCI configuration space,
2442 		 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2443 		 * to keep PCI Tx retries from interfering with C3 CPU state
2444 		 */
2445 		pci_read_config_dword(pdev, 0x40, &val);
2446 
2447 		if ((val & 0x0000ff00) != 0)
2448 			pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2449 	}
2450 
2451 	ret = ath10k_core_target_resume(ar);
2452 	if (ret)
2453 		ath10k_warn("target resume failed: %d\n", ret);
2454 
2455 	return ret;
2456 }
2457 
2458 static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
2459 			 ath10k_pci_suspend,
2460 			 ath10k_pci_resume);
2461 
2462 #define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
2463 
2464 #else
2465 
2466 #define ATH10K_PCI_PM_OPS NULL
2467 
2468 #endif /* CONFIG_PM_SLEEP */
2469 
2470 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2471 
2472 static struct pci_driver ath10k_pci_driver = {
2473 	.name = "ath10k_pci",
2474 	.id_table = ath10k_pci_id_table,
2475 	.probe = ath10k_pci_probe,
2476 	.remove = ath10k_pci_remove,
2477 	.driver.pm = ATH10K_PCI_PM_OPS,
2478 };
2479 
2480 static int __init ath10k_pci_init(void)
2481 {
2482 	int ret;
2483 
2484 	ret = pci_register_driver(&ath10k_pci_driver);
2485 	if (ret)
2486 		ath10k_err("pci_register_driver failed [%d]\n", ret);
2487 
2488 	return ret;
2489 }
2490 module_init(ath10k_pci_init);
2491 
2492 static void __exit ath10k_pci_exit(void)
2493 {
2494 	pci_unregister_driver(&ath10k_pci_driver);
2495 }
2496 
2497 module_exit(ath10k_pci_exit);
2498 
2499 MODULE_AUTHOR("Qualcomm Atheros");
2500 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2501 MODULE_LICENSE("Dual BSD/GPL");
2502 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
2503 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
2504 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
2505 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2506 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2507 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2508