1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018 Quantenna Communications */
3 
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/firmware.h>
7 #include <linux/pci.h>
8 #include <linux/vmalloc.h>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/sched.h>
12 #include <linux/completion.h>
13 #include <linux/crc32.h>
14 #include <linux/spinlock.h>
15 #include <linux/circ_buf.h>
16 #include <linux/log2.h>
17 
18 #include "pcie_priv.h"
19 #include "pearl_pcie_regs.h"
20 #include "pearl_pcie_ipc.h"
21 #include "qtn_hw_ids.h"
22 #include "core.h"
23 #include "bus.h"
24 #include "shm_ipc.h"
25 #include "debug.h"
26 
27 static bool use_msi = true;
28 module_param(use_msi, bool, 0644);
29 MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
30 
31 static unsigned int tx_bd_size_param = 32;
32 module_param(tx_bd_size_param, uint, 0644);
33 MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two");
34 
35 static unsigned int rx_bd_size_param = 256;
36 module_param(rx_bd_size_param, uint, 0644);
37 MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two");
38 
39 static u8 flashboot = 1;
40 module_param(flashboot, byte, 0644);
41 MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
42 
43 #define DRV_NAME	"qtnfmac_pearl_pcie"
44 
45 struct qtnf_pearl_bda {
46 	__le16 bda_len;
47 	__le16 bda_version;
48 	__le32 bda_pci_endian;
49 	__le32 bda_ep_state;
50 	__le32 bda_rc_state;
51 	__le32 bda_dma_mask;
52 	__le32 bda_msi_addr;
53 	__le32 bda_flashsz;
54 	u8 bda_boardname[PCIE_BDA_NAMELEN];
55 	__le32 bda_rc_msi_enabled;
56 	u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE];
57 	__le32 bda_dsbw_start_index;
58 	__le32 bda_dsbw_end_index;
59 	__le32 bda_dsbw_total_bytes;
60 	__le32 bda_rc_tx_bd_base;
61 	__le32 bda_rc_tx_bd_num;
62 	u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH];
63 	struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */
64 	struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */
65 } __packed;
66 
67 struct qtnf_pearl_tx_bd {
68 	__le32 addr;
69 	__le32 addr_h;
70 	__le32 info;
71 	__le32 info_h;
72 } __packed;
73 
74 struct qtnf_pearl_rx_bd {
75 	__le32 addr;
76 	__le32 addr_h;
77 	__le32 info;
78 	__le32 info_h;
79 	__le32 next_ptr;
80 	__le32 next_ptr_h;
81 } __packed;
82 
83 struct qtnf_pearl_fw_hdr {
84 	u8 boardflg[8];
85 	__le32 fwsize;
86 	__le32 seqnum;
87 	__le32 type;
88 	__le32 pktlen;
89 	__le32 crc;
90 } __packed;
91 
92 struct qtnf_pcie_pearl_state {
93 	struct qtnf_pcie_bus_priv base;
94 
95 	/* lock for irq configuration changes */
96 	spinlock_t irq_lock;
97 
98 	struct qtnf_pearl_bda __iomem *bda;
99 	void __iomem *pcie_reg_base;
100 
101 	struct qtnf_pearl_tx_bd *tx_bd_vbase;
102 	dma_addr_t tx_bd_pbase;
103 
104 	struct qtnf_pearl_rx_bd *rx_bd_vbase;
105 	dma_addr_t rx_bd_pbase;
106 
107 	dma_addr_t bd_table_paddr;
108 	void *bd_table_vaddr;
109 	u32 bd_table_len;
110 	u32 pcie_irq_mask;
111 	u32 pcie_irq_rx_count;
112 	u32 pcie_irq_tx_count;
113 	u32 pcie_irq_uf_count;
114 };
115 
116 static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
117 {
118 	unsigned long flags;
119 
120 	spin_lock_irqsave(&ps->irq_lock, flags);
121 	ps->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS);
122 	spin_unlock_irqrestore(&ps->irq_lock, flags);
123 }
124 
125 static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
126 {
127 	unsigned long flags;
128 
129 	spin_lock_irqsave(&ps->irq_lock, flags);
130 	writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
131 	spin_unlock_irqrestore(&ps->irq_lock, flags);
132 }
133 
134 static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
135 {
136 	unsigned long flags;
137 
138 	spin_lock_irqsave(&ps->irq_lock, flags);
139 	writel(0x0, PCIE_HDP_INT_EN(ps->pcie_reg_base));
140 	spin_unlock_irqrestore(&ps->irq_lock, flags);
141 }
142 
143 static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_pearl_state *ps)
144 {
145 	unsigned long flags;
146 
147 	spin_lock_irqsave(&ps->irq_lock, flags);
148 	ps->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS;
149 	writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
150 	spin_unlock_irqrestore(&ps->irq_lock, flags);
151 }
152 
153 static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_pearl_state *ps)
154 {
155 	unsigned long flags;
156 
157 	spin_lock_irqsave(&ps->irq_lock, flags);
158 	ps->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS;
159 	writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
160 	spin_unlock_irqrestore(&ps->irq_lock, flags);
161 }
162 
163 static inline void qtnf_en_txdone_irq(struct qtnf_pcie_pearl_state *ps)
164 {
165 	unsigned long flags;
166 
167 	spin_lock_irqsave(&ps->irq_lock, flags);
168 	ps->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS;
169 	writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
170 	spin_unlock_irqrestore(&ps->irq_lock, flags);
171 }
172 
173 static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_pearl_state *ps)
174 {
175 	unsigned long flags;
176 
177 	spin_lock_irqsave(&ps->irq_lock, flags);
178 	ps->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS;
179 	writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
180 	spin_unlock_irqrestore(&ps->irq_lock, flags);
181 }
182 
183 static void qtnf_deassert_intx(struct qtnf_pcie_pearl_state *ps)
184 {
185 	void __iomem *reg = ps->base.sysctl_bar + PEARL_PCIE_CFG0_OFFSET;
186 	u32 cfg;
187 
188 	cfg = readl(reg);
189 	cfg &= ~PEARL_ASSERT_INTX;
190 	qtnf_non_posted_write(cfg, reg);
191 }
192 
193 static void qtnf_pearl_reset_ep(struct qtnf_pcie_pearl_state *ps)
194 {
195 	const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET);
196 	void __iomem *reg = ps->base.sysctl_bar +
197 			    QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
198 
199 	qtnf_non_posted_write(data, reg);
200 	msleep(QTN_EP_RESET_WAIT_MS);
201 	pci_restore_state(ps->base.pdev);
202 }
203 
204 static void qtnf_pcie_pearl_ipc_gen_ep_int(void *arg)
205 {
206 	const struct qtnf_pcie_pearl_state *ps = arg;
207 	const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ);
208 	void __iomem *reg = ps->base.sysctl_bar +
209 			    QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
210 
211 	qtnf_non_posted_write(data, reg);
212 }
213 
214 static int qtnf_is_state(__le32 __iomem *reg, u32 state)
215 {
216 	u32 s = readl(reg);
217 
218 	return s & state;
219 }
220 
221 static void qtnf_set_state(__le32 __iomem *reg, u32 state)
222 {
223 	u32 s = readl(reg);
224 
225 	qtnf_non_posted_write(state | s, reg);
226 }
227 
228 static void qtnf_clear_state(__le32 __iomem *reg, u32 state)
229 {
230 	u32 s = readl(reg);
231 
232 	qtnf_non_posted_write(s & ~state, reg);
233 }
234 
235 static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
236 {
237 	u32 timeout = 0;
238 
239 	while ((qtnf_is_state(reg, state) == 0)) {
240 		usleep_range(1000, 1200);
241 		if (++timeout > delay_in_ms)
242 			return -1;
243 	}
244 
245 	return 0;
246 }
247 
248 static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps)
249 {
250 	struct qtnf_pcie_bus_priv *priv = &ps->base;
251 	dma_addr_t paddr;
252 	void *vaddr;
253 	int len;
254 
255 	len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
256 		priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
257 
258 	vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
259 	if (!vaddr)
260 		return -ENOMEM;
261 
262 	/* tx bd */
263 
264 	memset(vaddr, 0, len);
265 
266 	ps->bd_table_vaddr = vaddr;
267 	ps->bd_table_paddr = paddr;
268 	ps->bd_table_len = len;
269 
270 	ps->tx_bd_vbase = vaddr;
271 	ps->tx_bd_pbase = paddr;
272 
273 	pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
274 
275 	priv->tx_bd_r_index = 0;
276 	priv->tx_bd_w_index = 0;
277 
278 	/* rx bd */
279 
280 	vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
281 	paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
282 
283 	ps->rx_bd_vbase = vaddr;
284 	ps->rx_bd_pbase = paddr;
285 
286 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
287 	writel(QTN_HOST_HI32(paddr),
288 	       PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base));
289 #endif
290 	writel(QTN_HOST_LO32(paddr),
291 	       PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base));
292 	writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16,
293 	       PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base));
294 
295 	pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
296 
297 	return 0;
298 }
299 
300 static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
301 {
302 	struct qtnf_pcie_bus_priv *priv = &ps->base;
303 	struct qtnf_pearl_rx_bd *rxbd;
304 	struct sk_buff *skb;
305 	dma_addr_t paddr;
306 
307 	skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
308 	if (!skb) {
309 		priv->rx_skb[index] = NULL;
310 		return -ENOMEM;
311 	}
312 
313 	priv->rx_skb[index] = skb;
314 	rxbd = &ps->rx_bd_vbase[index];
315 
316 	paddr = pci_map_single(priv->pdev, skb->data,
317 			       SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
318 	if (pci_dma_mapping_error(priv->pdev, paddr)) {
319 		pr_err("skb DMA mapping error: %pad\n", &paddr);
320 		return -ENOMEM;
321 	}
322 
323 	/* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
324 	rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
325 	rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
326 	rxbd->info = 0x0;
327 
328 	priv->rx_bd_w_index = index;
329 
330 	/* sync up all descriptor updates */
331 	wmb();
332 
333 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
334 	writel(QTN_HOST_HI32(paddr),
335 	       PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
336 #endif
337 	writel(QTN_HOST_LO32(paddr),
338 	       PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));
339 
340 	writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
341 	return 0;
342 }
343 
344 static int pearl_alloc_rx_buffers(struct qtnf_pcie_pearl_state *ps)
345 {
346 	u16 i;
347 	int ret = 0;
348 
349 	memset(ps->rx_bd_vbase, 0x0,
350 	       ps->base.rx_bd_num * sizeof(struct qtnf_pearl_rx_bd));
351 
352 	for (i = 0; i < ps->base.rx_bd_num; i++) {
353 		ret = pearl_skb2rbd_attach(ps, i);
354 		if (ret)
355 			break;
356 	}
357 
358 	return ret;
359 }
360 
361 /* all rx/tx activity should have ceased before calling this function */
362 static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps)
363 {
364 	struct qtnf_pcie_bus_priv *priv = &ps->base;
365 	struct qtnf_pearl_tx_bd *txbd;
366 	struct qtnf_pearl_rx_bd *rxbd;
367 	struct sk_buff *skb;
368 	dma_addr_t paddr;
369 	int i;
370 
371 	/* free rx buffers */
372 	for (i = 0; i < priv->rx_bd_num; i++) {
373 		if (priv->rx_skb && priv->rx_skb[i]) {
374 			rxbd = &ps->rx_bd_vbase[i];
375 			skb = priv->rx_skb[i];
376 			paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
377 					      le32_to_cpu(rxbd->addr));
378 			pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
379 					 PCI_DMA_FROMDEVICE);
380 			dev_kfree_skb_any(skb);
381 			priv->rx_skb[i] = NULL;
382 		}
383 	}
384 
385 	/* free tx buffers */
386 	for (i = 0; i < priv->tx_bd_num; i++) {
387 		if (priv->tx_skb && priv->tx_skb[i]) {
388 			txbd = &ps->tx_bd_vbase[i];
389 			skb = priv->tx_skb[i];
390 			paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
391 					      le32_to_cpu(txbd->addr));
392 			pci_unmap_single(priv->pdev, paddr, skb->len,
393 					 PCI_DMA_TODEVICE);
394 			dev_kfree_skb_any(skb);
395 			priv->tx_skb[i] = NULL;
396 		}
397 	}
398 }
399 
400 static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
401 {
402 	u32 val;
403 
404 	val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
405 	val |= HHBM_CONFIG_SOFT_RESET;
406 	writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
407 	usleep_range(50, 100);
408 	val &= ~HHBM_CONFIG_SOFT_RESET;
409 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
410 	val |= HHBM_64BIT;
411 #endif
412 	writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
413 	writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base));
414 
415 	return 0;
416 }
417 
418 static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps)
419 {
420 	struct qtnf_pcie_bus_priv *priv = &ps->base;
421 	int ret;
422 	u32 val;
423 
424 	priv->tx_bd_num = tx_bd_size_param;
425 	priv->rx_bd_num = rx_bd_size_param;
426 	priv->rx_bd_w_index = 0;
427 	priv->rx_bd_r_index = 0;
428 
429 	if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) {
430 		pr_err("tx_bd_size_param %u is not power of two\n",
431 		       priv->tx_bd_num);
432 		return -EINVAL;
433 	}
434 
435 	val = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
436 	if (val > PCIE_HHBM_MAX_SIZE) {
437 		pr_err("tx_bd_size_param %u is too large\n",
438 		       priv->tx_bd_num);
439 		return -EINVAL;
440 	}
441 
442 	if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
443 		pr_err("rx_bd_size_param %u is not power of two\n",
444 		       priv->rx_bd_num);
445 		return -EINVAL;
446 	}
447 
448 	val = priv->rx_bd_num * sizeof(dma_addr_t);
449 	if (val > PCIE_HHBM_MAX_SIZE) {
450 		pr_err("rx_bd_size_param %u is too large\n",
451 		       priv->rx_bd_num);
452 		return -EINVAL;
453 	}
454 
455 	ret = pearl_hhbm_init(ps);
456 	if (ret) {
457 		pr_err("failed to init h/w queues\n");
458 		return ret;
459 	}
460 
461 	ret = qtnf_pcie_alloc_skb_array(priv);
462 	if (ret) {
463 		pr_err("failed to allocate skb array\n");
464 		return ret;
465 	}
466 
467 	ret = pearl_alloc_bd_table(ps);
468 	if (ret) {
469 		pr_err("failed to allocate bd table\n");
470 		return ret;
471 	}
472 
473 	ret = pearl_alloc_rx_buffers(ps);
474 	if (ret) {
475 		pr_err("failed to allocate rx buffers\n");
476 		return ret;
477 	}
478 
479 	return ret;
480 }
481 
482 static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
483 {
484 	struct qtnf_pcie_bus_priv *priv = &ps->base;
485 	struct qtnf_pearl_tx_bd *txbd;
486 	struct sk_buff *skb;
487 	unsigned long flags;
488 	dma_addr_t paddr;
489 	u32 tx_done_index;
490 	int count = 0;
491 	int i;
492 
493 	spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
494 
495 	tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
496 			& (priv->tx_bd_num - 1);
497 
498 	i = priv->tx_bd_r_index;
499 
500 	while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
501 		skb = priv->tx_skb[i];
502 		if (likely(skb)) {
503 			txbd = &ps->tx_bd_vbase[i];
504 			paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
505 					      le32_to_cpu(txbd->addr));
506 			pci_unmap_single(priv->pdev, paddr, skb->len,
507 					 PCI_DMA_TODEVICE);
508 
509 			if (skb->dev) {
510 				qtnf_update_tx_stats(skb->dev, skb);
511 				if (unlikely(priv->tx_stopped)) {
512 					qtnf_wake_all_queues(skb->dev);
513 					priv->tx_stopped = 0;
514 				}
515 			}
516 
517 			dev_kfree_skb_any(skb);
518 		}
519 
520 		priv->tx_skb[i] = NULL;
521 		count++;
522 
523 		if (++i >= priv->tx_bd_num)
524 			i = 0;
525 	}
526 
527 	priv->tx_reclaim_done += count;
528 	priv->tx_reclaim_req++;
529 	priv->tx_bd_r_index = i;
530 
531 	spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
532 }
533 
534 static int qtnf_tx_queue_ready(struct qtnf_pcie_pearl_state *ps)
535 {
536 	struct qtnf_pcie_bus_priv *priv = &ps->base;
537 
538 	if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
539 			priv->tx_bd_num)) {
540 		qtnf_pearl_data_tx_reclaim(ps);
541 
542 		if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
543 				priv->tx_bd_num)) {
544 			pr_warn_ratelimited("reclaim full Tx queue\n");
545 			priv->tx_full_count++;
546 			return 0;
547 		}
548 	}
549 
550 	return 1;
551 }
552 
553 static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
554 {
555 	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
556 	struct qtnf_pcie_bus_priv *priv = &ps->base;
557 	dma_addr_t txbd_paddr, skb_paddr;
558 	struct qtnf_pearl_tx_bd *txbd;
559 	unsigned long flags;
560 	int len, i;
561 	u32 info;
562 	int ret = 0;
563 
564 	spin_lock_irqsave(&priv->tx_lock, flags);
565 
566 	if (!qtnf_tx_queue_ready(ps)) {
567 		if (skb->dev) {
568 			netif_tx_stop_all_queues(skb->dev);
569 			priv->tx_stopped = 1;
570 		}
571 
572 		spin_unlock_irqrestore(&priv->tx_lock, flags);
573 		return NETDEV_TX_BUSY;
574 	}
575 
576 	i = priv->tx_bd_w_index;
577 	priv->tx_skb[i] = skb;
578 	len = skb->len;
579 
580 	skb_paddr = pci_map_single(priv->pdev, skb->data,
581 				   skb->len, PCI_DMA_TODEVICE);
582 	if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
583 		pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
584 		ret = -ENOMEM;
585 		goto tx_done;
586 	}
587 
588 	txbd = &ps->tx_bd_vbase[i];
589 	txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
590 	txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));
591 
592 	info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
593 	txbd->info = cpu_to_le32(info);
594 
595 	/* sync up all descriptor updates before passing them to EP */
596 	dma_wmb();
597 
598 	/* write new TX descriptor to PCIE_RX_FIFO on EP */
599 	txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd);
600 
601 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
602 	writel(QTN_HOST_HI32(txbd_paddr),
603 	       PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base));
604 #endif
605 	writel(QTN_HOST_LO32(txbd_paddr),
606 	       PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base));
607 
608 	if (++i >= priv->tx_bd_num)
609 		i = 0;
610 
611 	priv->tx_bd_w_index = i;
612 
613 tx_done:
614 	if (ret && skb) {
615 		pr_err_ratelimited("drop skb\n");
616 		if (skb->dev)
617 			skb->dev->stats.tx_dropped++;
618 		dev_kfree_skb_any(skb);
619 	}
620 
621 	priv->tx_done_count++;
622 	spin_unlock_irqrestore(&priv->tx_lock, flags);
623 
624 	qtnf_pearl_data_tx_reclaim(ps);
625 
626 	return NETDEV_TX_OK;
627 }
628 
629 static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
630 {
631 	struct qtnf_bus *bus = (struct qtnf_bus *)data;
632 	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
633 	struct qtnf_pcie_bus_priv *priv = &ps->base;
634 	u32 status;
635 
636 	priv->pcie_irq_count++;
637 	status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
638 
639 	qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
640 	qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
641 
642 	if (!(status & ps->pcie_irq_mask))
643 		goto irq_done;
644 
645 	if (status & PCIE_HDP_INT_RX_BITS)
646 		ps->pcie_irq_rx_count++;
647 
648 	if (status & PCIE_HDP_INT_TX_BITS)
649 		ps->pcie_irq_tx_count++;
650 
651 	if (status & PCIE_HDP_INT_HHBM_UF)
652 		ps->pcie_irq_uf_count++;
653 
654 	if (status & PCIE_HDP_INT_RX_BITS) {
655 		qtnf_dis_rxdone_irq(ps);
656 		napi_schedule(&bus->mux_napi);
657 	}
658 
659 	if (status & PCIE_HDP_INT_TX_BITS) {
660 		qtnf_dis_txdone_irq(ps);
661 		tasklet_hi_schedule(&priv->reclaim_tq);
662 	}
663 
664 irq_done:
665 	/* H/W workaround: clean all bits, not only enabled */
666 	qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
667 
668 	if (!priv->msi_enabled)
669 		qtnf_deassert_intx(ps);
670 
671 	return IRQ_HANDLED;
672 }
673 
674 static int qtnf_rx_data_ready(struct qtnf_pcie_pearl_state *ps)
675 {
676 	u16 index = ps->base.rx_bd_r_index;
677 	struct qtnf_pearl_rx_bd *rxbd;
678 	u32 descw;
679 
680 	rxbd = &ps->rx_bd_vbase[index];
681 	descw = le32_to_cpu(rxbd->info);
682 
683 	if (descw & QTN_TXDONE_MASK)
684 		return 1;
685 
686 	return 0;
687 }
688 
689 static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
690 {
691 	struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
692 	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
693 	struct qtnf_pcie_bus_priv *priv = &ps->base;
694 	struct net_device *ndev = NULL;
695 	struct sk_buff *skb = NULL;
696 	int processed = 0;
697 	struct qtnf_pearl_rx_bd *rxbd;
698 	dma_addr_t skb_paddr;
699 	int consume;
700 	u32 descw;
701 	u32 psize;
702 	u16 r_idx;
703 	u16 w_idx;
704 	int ret;
705 
706 	while (processed < budget) {
707 		if (!qtnf_rx_data_ready(ps))
708 			goto rx_out;
709 
710 		r_idx = priv->rx_bd_r_index;
711 		rxbd = &ps->rx_bd_vbase[r_idx];
712 		descw = le32_to_cpu(rxbd->info);
713 
714 		skb = priv->rx_skb[r_idx];
715 		psize = QTN_GET_LEN(descw);
716 		consume = 1;
717 
718 		if (!(descw & QTN_TXDONE_MASK)) {
719 			pr_warn("skip invalid rxbd[%d]\n", r_idx);
720 			consume = 0;
721 		}
722 
723 		if (!skb) {
724 			pr_warn("skip missing rx_skb[%d]\n", r_idx);
725 			consume = 0;
726 		}
727 
728 		if (skb && (skb_tailroom(skb) <  psize)) {
729 			pr_err("skip packet with invalid length: %u > %u\n",
730 			       psize, skb_tailroom(skb));
731 			consume = 0;
732 		}
733 
734 		if (skb) {
735 			skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
736 						  le32_to_cpu(rxbd->addr));
737 			pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
738 					 PCI_DMA_FROMDEVICE);
739 		}
740 
741 		if (consume) {
742 			skb_put(skb, psize);
743 			ndev = qtnf_classify_skb(bus, skb);
744 			if (likely(ndev)) {
745 				qtnf_update_rx_stats(ndev, skb);
746 				skb->protocol = eth_type_trans(skb, ndev);
747 				napi_gro_receive(napi, skb);
748 			} else {
749 				pr_debug("drop untagged skb\n");
750 				bus->mux_dev.stats.rx_dropped++;
751 				dev_kfree_skb_any(skb);
752 			}
753 		} else {
754 			if (skb) {
755 				bus->mux_dev.stats.rx_dropped++;
756 				dev_kfree_skb_any(skb);
757 			}
758 		}
759 
760 		priv->rx_skb[r_idx] = NULL;
761 		if (++r_idx >= priv->rx_bd_num)
762 			r_idx = 0;
763 
764 		priv->rx_bd_r_index = r_idx;
765 
766 		/* repalce processed buffer by a new one */
767 		w_idx = priv->rx_bd_w_index;
768 		while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
769 				  priv->rx_bd_num) > 0) {
770 			if (++w_idx >= priv->rx_bd_num)
771 				w_idx = 0;
772 
773 			ret = pearl_skb2rbd_attach(ps, w_idx);
774 			if (ret) {
775 				pr_err("failed to allocate new rx_skb[%d]\n",
776 				       w_idx);
777 				break;
778 			}
779 		}
780 
781 		processed++;
782 	}
783 
784 rx_out:
785 	if (processed < budget) {
786 		napi_complete(napi);
787 		qtnf_en_rxdone_irq(ps);
788 	}
789 
790 	return processed;
791 }
792 
793 static void
794 qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
795 {
796 	struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
797 
798 	tasklet_hi_schedule(&ps->base.reclaim_tq);
799 }
800 
801 static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
802 {
803 	struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
804 
805 	qtnf_enable_hdp_irqs(ps);
806 	napi_enable(&bus->mux_napi);
807 }
808 
809 static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
810 {
811 	struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
812 
813 	napi_disable(&bus->mux_napi);
814 	qtnf_disable_hdp_irqs(ps);
815 }
816 
817 static const struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = {
818 	/* control path methods */
819 	.control_tx	= qtnf_pcie_control_tx,
820 
821 	/* data path methods */
822 	.data_tx		= qtnf_pcie_data_tx,
823 	.data_tx_timeout	= qtnf_pcie_data_tx_timeout,
824 	.data_rx_start		= qtnf_pcie_data_rx_start,
825 	.data_rx_stop		= qtnf_pcie_data_rx_stop,
826 };
827 
828 static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
829 {
830 	struct qtnf_bus *bus = dev_get_drvdata(s->private);
831 	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
832 	u32 reg = readl(PCIE_HDP_INT_EN(ps->pcie_reg_base));
833 	u32 status;
834 
835 	seq_printf(s, "pcie_irq_count(%u)\n", ps->base.pcie_irq_count);
836 	seq_printf(s, "pcie_irq_tx_count(%u)\n", ps->pcie_irq_tx_count);
837 	status = reg &  PCIE_HDP_INT_TX_BITS;
838 	seq_printf(s, "pcie_irq_tx_status(%s)\n",
839 		   (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS");
840 	seq_printf(s, "pcie_irq_rx_count(%u)\n", ps->pcie_irq_rx_count);
841 	status = reg &  PCIE_HDP_INT_RX_BITS;
842 	seq_printf(s, "pcie_irq_rx_status(%s)\n",
843 		   (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS");
844 	seq_printf(s, "pcie_irq_uf_count(%u)\n", ps->pcie_irq_uf_count);
845 	status = reg &  PCIE_HDP_INT_HHBM_UF;
846 	seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n",
847 		   (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS");
848 
849 	return 0;
850 }
851 
852 static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
853 {
854 	struct qtnf_bus *bus = dev_get_drvdata(s->private);
855 	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
856 	struct qtnf_pcie_bus_priv *priv = &ps->base;
857 
858 	seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
859 	seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
860 	seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
861 	seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
862 
863 	seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
864 	seq_printf(s, "tx_bd_p_index(%u)\n",
865 		   readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
866 			& (priv->tx_bd_num - 1));
867 	seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
868 	seq_printf(s, "tx queue len(%u)\n",
869 		   CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
870 			    priv->tx_bd_num));
871 
872 	seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
873 	seq_printf(s, "rx_bd_p_index(%u)\n",
874 		   readl(PCIE_HDP_TX0DMA_CNT(ps->pcie_reg_base))
875 			& (priv->rx_bd_num - 1));
876 	seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
877 	seq_printf(s, "rx alloc queue len(%u)\n",
878 		   CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
879 			      priv->rx_bd_num));
880 
881 	return 0;
882 }
883 
884 static int qtnf_ep_fw_send(struct pci_dev *pdev, uint32_t size,
885 			   int blk, const u8 *pblk, const u8 *fw)
886 {
887 	struct qtnf_bus *bus = pci_get_drvdata(pdev);
888 
889 	struct qtnf_pearl_fw_hdr *hdr;
890 	u8 *pdata;
891 
892 	int hds = sizeof(*hdr);
893 	struct sk_buff *skb = NULL;
894 	int len = 0;
895 	int ret;
896 
897 	skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
898 	if (!skb)
899 		return -ENOMEM;
900 
901 	skb->len = QTN_PCIE_FW_BUFSZ;
902 	skb->dev = NULL;
903 
904 	hdr = (struct qtnf_pearl_fw_hdr *)skb->data;
905 	memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
906 	hdr->fwsize = cpu_to_le32(size);
907 	hdr->seqnum = cpu_to_le32(blk);
908 
909 	if (blk)
910 		hdr->type = cpu_to_le32(QTN_FW_DSUB);
911 	else
912 		hdr->type = cpu_to_le32(QTN_FW_DBEGIN);
913 
914 	pdata = skb->data + hds;
915 
916 	len = QTN_PCIE_FW_BUFSZ - hds;
917 	if (pblk >= (fw + size - len)) {
918 		len = fw + size - pblk;
919 		hdr->type = cpu_to_le32(QTN_FW_DEND);
920 	}
921 
922 	hdr->pktlen = cpu_to_le32(len);
923 	memcpy(pdata, pblk, len);
924 	hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
925 
926 	ret = qtnf_pcie_data_tx(bus, skb);
927 
928 	return (ret == NETDEV_TX_OK) ? len : 0;
929 }
930 
931 static int
932 qtnf_ep_fw_load(struct qtnf_pcie_pearl_state *ps, const u8 *fw, u32 fw_size)
933 {
934 	int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pearl_fw_hdr);
935 	int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
936 	const u8 *pblk = fw;
937 	int threshold = 0;
938 	int blk = 0;
939 	int len;
940 
941 	pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size);
942 
943 	while (blk < blk_count) {
944 		if (++threshold > 10000) {
945 			pr_err("FW upload failed: too many retries\n");
946 			return -ETIMEDOUT;
947 		}
948 
949 		len = qtnf_ep_fw_send(ps->base.pdev, fw_size, blk, pblk, fw);
950 		if (len <= 0)
951 			continue;
952 
953 		if (!((blk + 1) & QTN_PCIE_FW_DLMASK) ||
954 		    (blk == (blk_count - 1))) {
955 			qtnf_set_state(&ps->bda->bda_rc_state,
956 				       QTN_RC_FW_SYNC);
957 			if (qtnf_poll_state(&ps->bda->bda_ep_state,
958 					    QTN_EP_FW_SYNC,
959 					    QTN_FW_DL_TIMEOUT_MS)) {
960 				pr_err("FW upload failed: SYNC timed out\n");
961 				return -ETIMEDOUT;
962 			}
963 
964 			qtnf_clear_state(&ps->bda->bda_ep_state,
965 					 QTN_EP_FW_SYNC);
966 
967 			if (qtnf_is_state(&ps->bda->bda_ep_state,
968 					  QTN_EP_FW_RETRY)) {
969 				if (blk == (blk_count - 1)) {
970 					int last_round =
971 						blk_count & QTN_PCIE_FW_DLMASK;
972 					blk -= last_round;
973 					pblk -= ((last_round - 1) *
974 						blk_size + len);
975 				} else {
976 					blk -= QTN_PCIE_FW_DLMASK;
977 					pblk -= QTN_PCIE_FW_DLMASK * blk_size;
978 				}
979 
980 				qtnf_clear_state(&ps->bda->bda_ep_state,
981 						 QTN_EP_FW_RETRY);
982 
983 				pr_warn("FW upload retry: block #%d\n", blk);
984 				continue;
985 			}
986 
987 			qtnf_pearl_data_tx_reclaim(ps);
988 		}
989 
990 		pblk += len;
991 		blk++;
992 	}
993 
994 	pr_debug("FW upload completed: totally sent %d blocks\n", blk);
995 	return 0;
996 }
997 
998 static void qtnf_pearl_fw_work_handler(struct work_struct *work)
999 {
1000 	struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
1001 	struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
1002 	struct pci_dev *pdev = ps->base.pdev;
1003 	const struct firmware *fw;
1004 	int ret;
1005 	u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
1006 	const char *fwname = QTN_PCI_PEARL_FW_NAME;
1007 	bool fw_boot_success = false;
1008 
1009 	if (flashboot) {
1010 		state |= QTN_RC_FW_FLASHBOOT;
1011 	} else {
1012 		ret = request_firmware(&fw, fwname, &pdev->dev);
1013 		if (ret < 0) {
1014 			pr_err("failed to get firmware %s\n", fwname);
1015 			goto fw_load_exit;
1016 		}
1017 	}
1018 
1019 	qtnf_set_state(&ps->bda->bda_rc_state, state);
1020 
1021 	if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
1022 			    QTN_FW_DL_TIMEOUT_MS)) {
1023 		pr_err("card is not ready\n");
1024 
1025 		if (!flashboot)
1026 			release_firmware(fw);
1027 
1028 		goto fw_load_exit;
1029 	}
1030 
1031 	qtnf_clear_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
1032 
1033 	if (flashboot) {
1034 		pr_info("booting firmware from flash\n");
1035 
1036 	} else {
1037 		pr_info("starting firmware upload: %s\n", fwname);
1038 
1039 		ret = qtnf_ep_fw_load(ps, fw->data, fw->size);
1040 		release_firmware(fw);
1041 		if (ret) {
1042 			pr_err("firmware upload error\n");
1043 			goto fw_load_exit;
1044 		}
1045 	}
1046 
1047 	if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_DONE,
1048 			    QTN_FW_DL_TIMEOUT_MS)) {
1049 		pr_err("firmware bringup timed out\n");
1050 		goto fw_load_exit;
1051 	}
1052 
1053 	pr_info("firmware is up and running\n");
1054 
1055 	if (qtnf_poll_state(&ps->bda->bda_ep_state,
1056 			    QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
1057 		pr_err("firmware runtime failure\n");
1058 		goto fw_load_exit;
1059 	}
1060 
1061 	fw_boot_success = true;
1062 
1063 fw_load_exit:
1064 	qtnf_pcie_fw_boot_done(bus, fw_boot_success, DRV_NAME);
1065 
1066 	if (fw_boot_success) {
1067 		qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
1068 		qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
1069 	}
1070 }
1071 
1072 static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
1073 {
1074 	struct qtnf_pcie_pearl_state *ps = (void *)data;
1075 
1076 	qtnf_pearl_data_tx_reclaim(ps);
1077 	qtnf_en_txdone_irq(ps);
1078 }
1079 
1080 static int qtnf_pearl_check_chip_id(struct qtnf_pcie_pearl_state *ps)
1081 {
1082 	unsigned int chipid;
1083 
1084 	chipid = qtnf_chip_id_get(ps->base.sysctl_bar);
1085 
1086 	switch (chipid) {
1087 	case QTN_CHIP_ID_PEARL:
1088 	case QTN_CHIP_ID_PEARL_B:
1089 	case QTN_CHIP_ID_PEARL_C:
1090 		pr_info("chip ID is 0x%x\n", chipid);
1091 		break;
1092 	default:
1093 		pr_err("incorrect chip ID 0x%x\n", chipid);
1094 		return -ENODEV;
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 static int qtnf_pcie_pearl_probe(struct pci_dev *pdev,
1101 				 const struct pci_device_id *id)
1102 {
1103 	struct qtnf_shm_ipc_int ipc_int;
1104 	struct qtnf_pcie_pearl_state *ps;
1105 	struct qtnf_bus *bus;
1106 	int ret;
1107 	u64 dma_mask;
1108 
1109 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1110 	dma_mask = DMA_BIT_MASK(64);
1111 #else
1112 	dma_mask = DMA_BIT_MASK(32);
1113 #endif
1114 
1115 	ret = qtnf_pcie_probe(pdev, sizeof(*ps), &qtnf_pcie_pearl_bus_ops,
1116 			      dma_mask, use_msi);
1117 	if (ret)
1118 		return ret;
1119 
1120 	bus = pci_get_drvdata(pdev);
1121 	ps = get_bus_priv(bus);
1122 
1123 	spin_lock_init(&ps->irq_lock);
1124 
1125 	tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn,
1126 		     (unsigned long)ps);
1127 	netif_napi_add(&bus->mux_dev, &bus->mux_napi,
1128 		       qtnf_pcie_pearl_rx_poll, 10);
1129 	INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler);
1130 
1131 	ps->pcie_reg_base = ps->base.dmareg_bar;
1132 	ps->bda = ps->base.epmem_bar;
1133 	writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
1134 
1135 	ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
1136 	ipc_int.arg = ps;
1137 	qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1,
1138 			       &ps->bda->bda_shm_reg2, &ipc_int);
1139 
1140 	ret = qtnf_pearl_check_chip_id(ps);
1141 	if (ret)
1142 		goto error;
1143 
1144 	ret = qtnf_pcie_pearl_init_xfer(ps);
1145 	if (ret) {
1146 		pr_err("PCIE xfer init failed\n");
1147 		goto error;
1148 	}
1149 
1150 	/* init default irq settings */
1151 	qtnf_init_hdp_irqs(ps);
1152 
1153 	/* start with disabled irqs */
1154 	qtnf_disable_hdp_irqs(ps);
1155 
1156 	ret = devm_request_irq(&pdev->dev, pdev->irq,
1157 			       &qtnf_pcie_pearl_interrupt, 0,
1158 			       "qtnf_pcie_irq", (void *)bus);
1159 	if (ret) {
1160 		pr_err("failed to request pcie irq %d\n", pdev->irq);
1161 		goto err_xfer;
1162 	}
1163 
1164 	qtnf_pcie_bringup_fw_async(bus);
1165 
1166 	return 0;
1167 
1168 err_xfer:
1169 	qtnf_pearl_free_xfer_buffers(ps);
1170 error:
1171 	qtnf_pcie_remove(bus, &ps->base);
1172 
1173 	return ret;
1174 }
1175 
1176 static void qtnf_pcie_pearl_remove(struct pci_dev *pdev)
1177 {
1178 	struct qtnf_pcie_pearl_state *ps;
1179 	struct qtnf_bus *bus;
1180 
1181 	bus = pci_get_drvdata(pdev);
1182 	if (!bus)
1183 		return;
1184 
1185 	ps = get_bus_priv(bus);
1186 
1187 	qtnf_pcie_remove(bus, &ps->base);
1188 	qtnf_pearl_reset_ep(ps);
1189 	qtnf_pearl_free_xfer_buffers(ps);
1190 }
1191 
1192 #ifdef CONFIG_PM_SLEEP
1193 static int qtnf_pcie_pearl_suspend(struct device *dev)
1194 {
1195 	return -EOPNOTSUPP;
1196 }
1197 
1198 static int qtnf_pcie_pearl_resume(struct device *dev)
1199 {
1200 	return 0;
1201 }
1202 #endif /* CONFIG_PM_SLEEP */
1203 
1204 #ifdef CONFIG_PM_SLEEP
1205 /* Power Management Hooks */
1206 static SIMPLE_DEV_PM_OPS(qtnf_pcie_pearl_pm_ops, qtnf_pcie_pearl_suspend,
1207 			 qtnf_pcie_pearl_resume);
1208 #endif
1209 
1210 static const struct pci_device_id qtnf_pcie_devid_table[] = {
1211 	{
1212 		PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
1213 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1214 	},
1215 	{ },
1216 };
1217 
1218 MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
1219 
1220 static struct pci_driver qtnf_pcie_pearl_drv_data = {
1221 	.name = DRV_NAME,
1222 	.id_table = qtnf_pcie_devid_table,
1223 	.probe = qtnf_pcie_pearl_probe,
1224 	.remove = qtnf_pcie_pearl_remove,
1225 #ifdef CONFIG_PM_SLEEP
1226 	.driver = {
1227 		.pm = &qtnf_pcie_pearl_pm_ops,
1228 	},
1229 #endif
1230 };
1231 
1232 static int __init qtnf_pcie_pearl_register(void)
1233 {
1234 	pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
1235 	return pci_register_driver(&qtnf_pcie_pearl_drv_data);
1236 }
1237 
1238 static void __exit qtnf_pcie_pearl_exit(void)
1239 {
1240 	pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
1241 	pci_unregister_driver(&qtnf_pcie_pearl_drv_data);
1242 }
1243 
1244 module_init(qtnf_pcie_pearl_register);
1245 module_exit(qtnf_pcie_pearl_exit);
1246 
1247 MODULE_AUTHOR("Quantenna Communications");
1248 MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
1249 MODULE_LICENSE("GPL");
1250