1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (c) 2018 Quantenna Communications */ 3 4 #include <linux/kernel.h> 5 #include <linux/firmware.h> 6 #include <linux/pci.h> 7 #include <linux/vmalloc.h> 8 #include <linux/delay.h> 9 #include <linux/interrupt.h> 10 #include <linux/sched.h> 11 #include <linux/completion.h> 12 #include <linux/crc32.h> 13 #include <linux/spinlock.h> 14 #include <linux/circ_buf.h> 15 #include <linux/log2.h> 16 17 #include "pcie_priv.h" 18 #include "pearl_pcie_regs.h" 19 #include "pearl_pcie_ipc.h" 20 #include "qtn_hw_ids.h" 21 #include "core.h" 22 #include "bus.h" 23 #include "shm_ipc.h" 24 #include "debug.h" 25 26 #define PEARL_TX_BD_SIZE_DEFAULT 32 27 28 struct qtnf_pearl_bda { 29 __le16 bda_len; 30 __le16 bda_version; 31 __le32 bda_pci_endian; 32 __le32 bda_ep_state; 33 __le32 bda_rc_state; 34 __le32 bda_dma_mask; 35 __le32 bda_msi_addr; 36 __le32 bda_flashsz; 37 u8 bda_boardname[PCIE_BDA_NAMELEN]; 38 __le32 bda_rc_msi_enabled; 39 u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE]; 40 __le32 bda_dsbw_start_index; 41 __le32 bda_dsbw_end_index; 42 __le32 bda_dsbw_total_bytes; 43 __le32 bda_rc_tx_bd_base; 44 __le32 bda_rc_tx_bd_num; 45 u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH]; 46 struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */ 47 struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */ 48 } __packed; 49 50 struct qtnf_pearl_tx_bd { 51 __le32 addr; 52 __le32 addr_h; 53 __le32 info; 54 __le32 info_h; 55 } __packed; 56 57 struct qtnf_pearl_rx_bd { 58 __le32 addr; 59 __le32 addr_h; 60 __le32 info; 61 __le32 info_h; 62 __le32 next_ptr; 63 __le32 next_ptr_h; 64 } __packed; 65 66 struct qtnf_pearl_fw_hdr { 67 u8 boardflg[8]; 68 __le32 fwsize; 69 __le32 seqnum; 70 __le32 type; 71 __le32 pktlen; 72 __le32 crc; 73 } __packed; 74 75 struct qtnf_pcie_pearl_state { 76 struct qtnf_pcie_bus_priv base; 77 78 /* lock for irq configuration changes */ 79 spinlock_t irq_lock; 80 81 struct qtnf_pearl_bda __iomem *bda; 82 void __iomem *pcie_reg_base; 83 84 struct qtnf_pearl_tx_bd *tx_bd_vbase; 85 dma_addr_t tx_bd_pbase; 86 87 struct qtnf_pearl_rx_bd *rx_bd_vbase; 88 dma_addr_t rx_bd_pbase; 89 90 dma_addr_t bd_table_paddr; 91 void *bd_table_vaddr; 92 u32 bd_table_len; 93 u32 pcie_irq_mask; 94 u32 pcie_irq_rx_count; 95 u32 pcie_irq_tx_count; 96 u32 pcie_irq_uf_count; 97 }; 98 99 static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps) 100 { 101 unsigned long flags; 102 103 spin_lock_irqsave(&ps->irq_lock, flags); 104 ps->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS); 105 spin_unlock_irqrestore(&ps->irq_lock, flags); 106 } 107 108 static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_pearl_state *ps) 109 { 110 unsigned long flags; 111 112 spin_lock_irqsave(&ps->irq_lock, flags); 113 writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base)); 114 spin_unlock_irqrestore(&ps->irq_lock, flags); 115 } 116 117 static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_pearl_state *ps) 118 { 119 unsigned long flags; 120 121 spin_lock_irqsave(&ps->irq_lock, flags); 122 writel(0x0, PCIE_HDP_INT_EN(ps->pcie_reg_base)); 123 spin_unlock_irqrestore(&ps->irq_lock, flags); 124 } 125 126 static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_pearl_state *ps) 127 { 128 unsigned long flags; 129 130 spin_lock_irqsave(&ps->irq_lock, flags); 131 ps->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS; 132 writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base)); 133 spin_unlock_irqrestore(&ps->irq_lock, flags); 134 } 135 136 static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_pearl_state *ps) 137 { 138 unsigned long flags; 139 140 spin_lock_irqsave(&ps->irq_lock, flags); 141 ps->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS; 142 writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base)); 143 spin_unlock_irqrestore(&ps->irq_lock, flags); 144 } 145 146 static inline void qtnf_en_txdone_irq(struct qtnf_pcie_pearl_state *ps) 147 { 148 unsigned long flags; 149 150 spin_lock_irqsave(&ps->irq_lock, flags); 151 ps->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS; 152 writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base)); 153 spin_unlock_irqrestore(&ps->irq_lock, flags); 154 } 155 156 static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_pearl_state *ps) 157 { 158 unsigned long flags; 159 160 spin_lock_irqsave(&ps->irq_lock, flags); 161 ps->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS; 162 writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base)); 163 spin_unlock_irqrestore(&ps->irq_lock, flags); 164 } 165 166 static void qtnf_deassert_intx(struct qtnf_pcie_pearl_state *ps) 167 { 168 void __iomem *reg = ps->base.sysctl_bar + PEARL_PCIE_CFG0_OFFSET; 169 u32 cfg; 170 171 cfg = readl(reg); 172 cfg &= ~PEARL_ASSERT_INTX; 173 qtnf_non_posted_write(cfg, reg); 174 } 175 176 static void qtnf_pearl_reset_ep(struct qtnf_pcie_pearl_state *ps) 177 { 178 const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET); 179 void __iomem *reg = ps->base.sysctl_bar + 180 QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET; 181 182 qtnf_non_posted_write(data, reg); 183 msleep(QTN_EP_RESET_WAIT_MS); 184 pci_restore_state(ps->base.pdev); 185 } 186 187 static void qtnf_pcie_pearl_ipc_gen_ep_int(void *arg) 188 { 189 const struct qtnf_pcie_pearl_state *ps = arg; 190 const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ); 191 void __iomem *reg = ps->base.sysctl_bar + 192 QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET; 193 194 qtnf_non_posted_write(data, reg); 195 } 196 197 static int qtnf_is_state(__le32 __iomem *reg, u32 state) 198 { 199 u32 s = readl(reg); 200 201 return s & state; 202 } 203 204 static void qtnf_set_state(__le32 __iomem *reg, u32 state) 205 { 206 u32 s = readl(reg); 207 208 qtnf_non_posted_write(state | s, reg); 209 } 210 211 static void qtnf_clear_state(__le32 __iomem *reg, u32 state) 212 { 213 u32 s = readl(reg); 214 215 qtnf_non_posted_write(s & ~state, reg); 216 } 217 218 static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms) 219 { 220 u32 timeout = 0; 221 222 while ((qtnf_is_state(reg, state) == 0)) { 223 usleep_range(1000, 1200); 224 if (++timeout > delay_in_ms) 225 return -1; 226 } 227 228 return 0; 229 } 230 231 static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps) 232 { 233 struct qtnf_pcie_bus_priv *priv = &ps->base; 234 dma_addr_t paddr; 235 void *vaddr; 236 int len; 237 238 len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) + 239 priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd); 240 241 vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL); 242 if (!vaddr) 243 return -ENOMEM; 244 245 /* tx bd */ 246 247 memset(vaddr, 0, len); 248 249 ps->bd_table_vaddr = vaddr; 250 ps->bd_table_paddr = paddr; 251 ps->bd_table_len = len; 252 253 ps->tx_bd_vbase = vaddr; 254 ps->tx_bd_pbase = paddr; 255 256 pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); 257 258 priv->tx_bd_r_index = 0; 259 priv->tx_bd_w_index = 0; 260 261 /* rx bd */ 262 263 vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num; 264 paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd); 265 266 ps->rx_bd_vbase = vaddr; 267 ps->rx_bd_pbase = paddr; 268 269 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 270 writel(QTN_HOST_HI32(paddr), 271 PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base)); 272 #endif 273 writel(QTN_HOST_LO32(paddr), 274 PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base)); 275 writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16, 276 PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base)); 277 278 pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); 279 280 return 0; 281 } 282 283 static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index) 284 { 285 struct qtnf_pcie_bus_priv *priv = &ps->base; 286 struct qtnf_pearl_rx_bd *rxbd; 287 struct sk_buff *skb; 288 dma_addr_t paddr; 289 290 skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC); 291 if (!skb) { 292 priv->rx_skb[index] = NULL; 293 return -ENOMEM; 294 } 295 296 priv->rx_skb[index] = skb; 297 rxbd = &ps->rx_bd_vbase[index]; 298 299 paddr = pci_map_single(priv->pdev, skb->data, 300 SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); 301 if (pci_dma_mapping_error(priv->pdev, paddr)) { 302 pr_err("skb DMA mapping error: %pad\n", &paddr); 303 return -ENOMEM; 304 } 305 306 /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */ 307 rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr)); 308 rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr)); 309 rxbd->info = 0x0; 310 311 priv->rx_bd_w_index = index; 312 313 /* sync up all descriptor updates */ 314 wmb(); 315 316 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 317 writel(QTN_HOST_HI32(paddr), 318 PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base)); 319 #endif 320 writel(QTN_HOST_LO32(paddr), 321 PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base)); 322 323 writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base)); 324 return 0; 325 } 326 327 static int pearl_alloc_rx_buffers(struct qtnf_pcie_pearl_state *ps) 328 { 329 u16 i; 330 int ret = 0; 331 332 memset(ps->rx_bd_vbase, 0x0, 333 ps->base.rx_bd_num * sizeof(struct qtnf_pearl_rx_bd)); 334 335 for (i = 0; i < ps->base.rx_bd_num; i++) { 336 ret = pearl_skb2rbd_attach(ps, i); 337 if (ret) 338 break; 339 } 340 341 return ret; 342 } 343 344 /* all rx/tx activity should have ceased before calling this function */ 345 static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps) 346 { 347 struct qtnf_pcie_bus_priv *priv = &ps->base; 348 struct qtnf_pearl_tx_bd *txbd; 349 struct qtnf_pearl_rx_bd *rxbd; 350 struct sk_buff *skb; 351 dma_addr_t paddr; 352 int i; 353 354 /* free rx buffers */ 355 for (i = 0; i < priv->rx_bd_num; i++) { 356 if (priv->rx_skb && priv->rx_skb[i]) { 357 rxbd = &ps->rx_bd_vbase[i]; 358 skb = priv->rx_skb[i]; 359 paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), 360 le32_to_cpu(rxbd->addr)); 361 pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE, 362 PCI_DMA_FROMDEVICE); 363 dev_kfree_skb_any(skb); 364 priv->rx_skb[i] = NULL; 365 } 366 } 367 368 /* free tx buffers */ 369 for (i = 0; i < priv->tx_bd_num; i++) { 370 if (priv->tx_skb && priv->tx_skb[i]) { 371 txbd = &ps->tx_bd_vbase[i]; 372 skb = priv->tx_skb[i]; 373 paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), 374 le32_to_cpu(txbd->addr)); 375 pci_unmap_single(priv->pdev, paddr, skb->len, 376 PCI_DMA_TODEVICE); 377 dev_kfree_skb_any(skb); 378 priv->tx_skb[i] = NULL; 379 } 380 } 381 } 382 383 static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps) 384 { 385 u32 val; 386 387 val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base)); 388 val |= HHBM_CONFIG_SOFT_RESET; 389 writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base)); 390 usleep_range(50, 100); 391 val &= ~HHBM_CONFIG_SOFT_RESET; 392 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 393 val |= HHBM_64BIT; 394 #endif 395 writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base)); 396 writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base)); 397 398 return 0; 399 } 400 401 static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps, 402 unsigned int tx_bd_size) 403 { 404 struct qtnf_pcie_bus_priv *priv = &ps->base; 405 int ret; 406 u32 val; 407 408 if (tx_bd_size == 0) 409 tx_bd_size = PEARL_TX_BD_SIZE_DEFAULT; 410 411 val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd); 412 413 if (!is_power_of_2(tx_bd_size) || val > PCIE_HHBM_MAX_SIZE) { 414 pr_warn("bad tx_bd_size value %u\n", tx_bd_size); 415 priv->tx_bd_num = PEARL_TX_BD_SIZE_DEFAULT; 416 } else { 417 priv->tx_bd_num = tx_bd_size; 418 } 419 420 priv->rx_bd_w_index = 0; 421 priv->rx_bd_r_index = 0; 422 423 if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) { 424 pr_err("rx_bd_size_param %u is not power of two\n", 425 priv->rx_bd_num); 426 return -EINVAL; 427 } 428 429 val = priv->rx_bd_num * sizeof(dma_addr_t); 430 if (val > PCIE_HHBM_MAX_SIZE) { 431 pr_err("rx_bd_size_param %u is too large\n", 432 priv->rx_bd_num); 433 return -EINVAL; 434 } 435 436 ret = pearl_hhbm_init(ps); 437 if (ret) { 438 pr_err("failed to init h/w queues\n"); 439 return ret; 440 } 441 442 ret = qtnf_pcie_alloc_skb_array(priv); 443 if (ret) { 444 pr_err("failed to allocate skb array\n"); 445 return ret; 446 } 447 448 ret = pearl_alloc_bd_table(ps); 449 if (ret) { 450 pr_err("failed to allocate bd table\n"); 451 return ret; 452 } 453 454 ret = pearl_alloc_rx_buffers(ps); 455 if (ret) { 456 pr_err("failed to allocate rx buffers\n"); 457 return ret; 458 } 459 460 return ret; 461 } 462 463 static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps) 464 { 465 struct qtnf_pcie_bus_priv *priv = &ps->base; 466 struct qtnf_pearl_tx_bd *txbd; 467 struct sk_buff *skb; 468 unsigned long flags; 469 dma_addr_t paddr; 470 u32 tx_done_index; 471 int count = 0; 472 int i; 473 474 spin_lock_irqsave(&priv->tx_reclaim_lock, flags); 475 476 tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base)) 477 & (priv->tx_bd_num - 1); 478 479 i = priv->tx_bd_r_index; 480 481 while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) { 482 skb = priv->tx_skb[i]; 483 if (likely(skb)) { 484 txbd = &ps->tx_bd_vbase[i]; 485 paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), 486 le32_to_cpu(txbd->addr)); 487 pci_unmap_single(priv->pdev, paddr, skb->len, 488 PCI_DMA_TODEVICE); 489 490 if (skb->dev) { 491 qtnf_update_tx_stats(skb->dev, skb); 492 if (unlikely(priv->tx_stopped)) { 493 qtnf_wake_all_queues(skb->dev); 494 priv->tx_stopped = 0; 495 } 496 } 497 498 dev_kfree_skb_any(skb); 499 } 500 501 priv->tx_skb[i] = NULL; 502 count++; 503 504 if (++i >= priv->tx_bd_num) 505 i = 0; 506 } 507 508 priv->tx_reclaim_done += count; 509 priv->tx_reclaim_req++; 510 priv->tx_bd_r_index = i; 511 512 spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags); 513 } 514 515 static int qtnf_tx_queue_ready(struct qtnf_pcie_pearl_state *ps) 516 { 517 struct qtnf_pcie_bus_priv *priv = &ps->base; 518 519 if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, 520 priv->tx_bd_num)) { 521 qtnf_pearl_data_tx_reclaim(ps); 522 523 if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, 524 priv->tx_bd_num)) { 525 pr_warn_ratelimited("reclaim full Tx queue\n"); 526 priv->tx_full_count++; 527 return 0; 528 } 529 } 530 531 return 1; 532 } 533 534 static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) 535 { 536 struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); 537 struct qtnf_pcie_bus_priv *priv = &ps->base; 538 dma_addr_t txbd_paddr, skb_paddr; 539 struct qtnf_pearl_tx_bd *txbd; 540 unsigned long flags; 541 int len, i; 542 u32 info; 543 int ret = 0; 544 545 spin_lock_irqsave(&priv->tx_lock, flags); 546 547 if (!qtnf_tx_queue_ready(ps)) { 548 if (skb->dev) { 549 netif_tx_stop_all_queues(skb->dev); 550 priv->tx_stopped = 1; 551 } 552 553 spin_unlock_irqrestore(&priv->tx_lock, flags); 554 return NETDEV_TX_BUSY; 555 } 556 557 i = priv->tx_bd_w_index; 558 priv->tx_skb[i] = skb; 559 len = skb->len; 560 561 skb_paddr = pci_map_single(priv->pdev, skb->data, 562 skb->len, PCI_DMA_TODEVICE); 563 if (pci_dma_mapping_error(priv->pdev, skb_paddr)) { 564 pr_err("skb DMA mapping error: %pad\n", &skb_paddr); 565 ret = -ENOMEM; 566 goto tx_done; 567 } 568 569 txbd = &ps->tx_bd_vbase[i]; 570 txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr)); 571 txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr)); 572 573 info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT; 574 txbd->info = cpu_to_le32(info); 575 576 /* sync up all descriptor updates before passing them to EP */ 577 dma_wmb(); 578 579 /* write new TX descriptor to PCIE_RX_FIFO on EP */ 580 txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd); 581 582 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 583 writel(QTN_HOST_HI32(txbd_paddr), 584 PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base)); 585 #endif 586 writel(QTN_HOST_LO32(txbd_paddr), 587 PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base)); 588 589 if (++i >= priv->tx_bd_num) 590 i = 0; 591 592 priv->tx_bd_w_index = i; 593 594 tx_done: 595 if (ret && skb) { 596 pr_err_ratelimited("drop skb\n"); 597 if (skb->dev) 598 skb->dev->stats.tx_dropped++; 599 dev_kfree_skb_any(skb); 600 } 601 602 priv->tx_done_count++; 603 spin_unlock_irqrestore(&priv->tx_lock, flags); 604 605 qtnf_pearl_data_tx_reclaim(ps); 606 607 return NETDEV_TX_OK; 608 } 609 610 static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data) 611 { 612 struct qtnf_bus *bus = (struct qtnf_bus *)data; 613 struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); 614 struct qtnf_pcie_bus_priv *priv = &ps->base; 615 u32 status; 616 617 priv->pcie_irq_count++; 618 status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base)); 619 620 qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in); 621 qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out); 622 623 if (!(status & ps->pcie_irq_mask)) 624 goto irq_done; 625 626 if (status & PCIE_HDP_INT_RX_BITS) 627 ps->pcie_irq_rx_count++; 628 629 if (status & PCIE_HDP_INT_TX_BITS) 630 ps->pcie_irq_tx_count++; 631 632 if (status & PCIE_HDP_INT_HHBM_UF) 633 ps->pcie_irq_uf_count++; 634 635 if (status & PCIE_HDP_INT_RX_BITS) { 636 qtnf_dis_rxdone_irq(ps); 637 napi_schedule(&bus->mux_napi); 638 } 639 640 if (status & PCIE_HDP_INT_TX_BITS) { 641 qtnf_dis_txdone_irq(ps); 642 tasklet_hi_schedule(&priv->reclaim_tq); 643 } 644 645 irq_done: 646 /* H/W workaround: clean all bits, not only enabled */ 647 qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps->pcie_reg_base)); 648 649 if (!priv->msi_enabled) 650 qtnf_deassert_intx(ps); 651 652 return IRQ_HANDLED; 653 } 654 655 static int qtnf_rx_data_ready(struct qtnf_pcie_pearl_state *ps) 656 { 657 u16 index = ps->base.rx_bd_r_index; 658 struct qtnf_pearl_rx_bd *rxbd; 659 u32 descw; 660 661 rxbd = &ps->rx_bd_vbase[index]; 662 descw = le32_to_cpu(rxbd->info); 663 664 if (descw & QTN_TXDONE_MASK) 665 return 1; 666 667 return 0; 668 } 669 670 static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget) 671 { 672 struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi); 673 struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); 674 struct qtnf_pcie_bus_priv *priv = &ps->base; 675 struct net_device *ndev = NULL; 676 struct sk_buff *skb = NULL; 677 int processed = 0; 678 struct qtnf_pearl_rx_bd *rxbd; 679 dma_addr_t skb_paddr; 680 int consume; 681 u32 descw; 682 u32 psize; 683 u16 r_idx; 684 u16 w_idx; 685 int ret; 686 687 while (processed < budget) { 688 if (!qtnf_rx_data_ready(ps)) 689 goto rx_out; 690 691 r_idx = priv->rx_bd_r_index; 692 rxbd = &ps->rx_bd_vbase[r_idx]; 693 descw = le32_to_cpu(rxbd->info); 694 695 skb = priv->rx_skb[r_idx]; 696 psize = QTN_GET_LEN(descw); 697 consume = 1; 698 699 if (!(descw & QTN_TXDONE_MASK)) { 700 pr_warn("skip invalid rxbd[%d]\n", r_idx); 701 consume = 0; 702 } 703 704 if (!skb) { 705 pr_warn("skip missing rx_skb[%d]\n", r_idx); 706 consume = 0; 707 } 708 709 if (skb && (skb_tailroom(skb) < psize)) { 710 pr_err("skip packet with invalid length: %u > %u\n", 711 psize, skb_tailroom(skb)); 712 consume = 0; 713 } 714 715 if (skb) { 716 skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), 717 le32_to_cpu(rxbd->addr)); 718 pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE, 719 PCI_DMA_FROMDEVICE); 720 } 721 722 if (consume) { 723 skb_put(skb, psize); 724 ndev = qtnf_classify_skb(bus, skb); 725 if (likely(ndev)) { 726 qtnf_update_rx_stats(ndev, skb); 727 skb->protocol = eth_type_trans(skb, ndev); 728 napi_gro_receive(napi, skb); 729 } else { 730 pr_debug("drop untagged skb\n"); 731 bus->mux_dev.stats.rx_dropped++; 732 dev_kfree_skb_any(skb); 733 } 734 } else { 735 if (skb) { 736 bus->mux_dev.stats.rx_dropped++; 737 dev_kfree_skb_any(skb); 738 } 739 } 740 741 priv->rx_skb[r_idx] = NULL; 742 if (++r_idx >= priv->rx_bd_num) 743 r_idx = 0; 744 745 priv->rx_bd_r_index = r_idx; 746 747 /* repalce processed buffer by a new one */ 748 w_idx = priv->rx_bd_w_index; 749 while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, 750 priv->rx_bd_num) > 0) { 751 if (++w_idx >= priv->rx_bd_num) 752 w_idx = 0; 753 754 ret = pearl_skb2rbd_attach(ps, w_idx); 755 if (ret) { 756 pr_err("failed to allocate new rx_skb[%d]\n", 757 w_idx); 758 break; 759 } 760 } 761 762 processed++; 763 } 764 765 rx_out: 766 if (processed < budget) { 767 napi_complete(napi); 768 qtnf_en_rxdone_irq(ps); 769 } 770 771 return processed; 772 } 773 774 static void 775 qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev) 776 { 777 struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus); 778 779 tasklet_hi_schedule(&ps->base.reclaim_tq); 780 } 781 782 static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus) 783 { 784 struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus); 785 786 qtnf_enable_hdp_irqs(ps); 787 napi_enable(&bus->mux_napi); 788 } 789 790 static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus) 791 { 792 struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus); 793 794 napi_disable(&bus->mux_napi); 795 qtnf_disable_hdp_irqs(ps); 796 } 797 798 static const struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = { 799 /* control path methods */ 800 .control_tx = qtnf_pcie_control_tx, 801 802 /* data path methods */ 803 .data_tx = qtnf_pcie_data_tx, 804 .data_tx_timeout = qtnf_pcie_data_tx_timeout, 805 .data_rx_start = qtnf_pcie_data_rx_start, 806 .data_rx_stop = qtnf_pcie_data_rx_stop, 807 }; 808 809 static int qtnf_dbg_irq_stats(struct seq_file *s, void *data) 810 { 811 struct qtnf_bus *bus = dev_get_drvdata(s->private); 812 struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); 813 u32 reg = readl(PCIE_HDP_INT_EN(ps->pcie_reg_base)); 814 u32 status; 815 816 seq_printf(s, "pcie_irq_count(%u)\n", ps->base.pcie_irq_count); 817 seq_printf(s, "pcie_irq_tx_count(%u)\n", ps->pcie_irq_tx_count); 818 status = reg & PCIE_HDP_INT_TX_BITS; 819 seq_printf(s, "pcie_irq_tx_status(%s)\n", 820 (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS"); 821 seq_printf(s, "pcie_irq_rx_count(%u)\n", ps->pcie_irq_rx_count); 822 status = reg & PCIE_HDP_INT_RX_BITS; 823 seq_printf(s, "pcie_irq_rx_status(%s)\n", 824 (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS"); 825 seq_printf(s, "pcie_irq_uf_count(%u)\n", ps->pcie_irq_uf_count); 826 status = reg & PCIE_HDP_INT_HHBM_UF; 827 seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n", 828 (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS"); 829 830 return 0; 831 } 832 833 static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data) 834 { 835 struct qtnf_bus *bus = dev_get_drvdata(s->private); 836 struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); 837 struct qtnf_pcie_bus_priv *priv = &ps->base; 838 839 seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count); 840 seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count); 841 seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done); 842 seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req); 843 844 seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index); 845 seq_printf(s, "tx_bd_p_index(%u)\n", 846 readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base)) 847 & (priv->tx_bd_num - 1)); 848 seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index); 849 seq_printf(s, "tx queue len(%u)\n", 850 CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index, 851 priv->tx_bd_num)); 852 853 seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index); 854 seq_printf(s, "rx_bd_p_index(%u)\n", 855 readl(PCIE_HDP_TX0DMA_CNT(ps->pcie_reg_base)) 856 & (priv->rx_bd_num - 1)); 857 seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index); 858 seq_printf(s, "rx alloc queue len(%u)\n", 859 CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, 860 priv->rx_bd_num)); 861 862 return 0; 863 } 864 865 static int qtnf_ep_fw_send(struct pci_dev *pdev, uint32_t size, 866 int blk, const u8 *pblk, const u8 *fw) 867 { 868 struct qtnf_bus *bus = pci_get_drvdata(pdev); 869 870 struct qtnf_pearl_fw_hdr *hdr; 871 u8 *pdata; 872 873 int hds = sizeof(*hdr); 874 struct sk_buff *skb = NULL; 875 int len = 0; 876 int ret; 877 878 skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL); 879 if (!skb) 880 return -ENOMEM; 881 882 skb->len = QTN_PCIE_FW_BUFSZ; 883 skb->dev = NULL; 884 885 hdr = (struct qtnf_pearl_fw_hdr *)skb->data; 886 memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG)); 887 hdr->fwsize = cpu_to_le32(size); 888 hdr->seqnum = cpu_to_le32(blk); 889 890 if (blk) 891 hdr->type = cpu_to_le32(QTN_FW_DSUB); 892 else 893 hdr->type = cpu_to_le32(QTN_FW_DBEGIN); 894 895 pdata = skb->data + hds; 896 897 len = QTN_PCIE_FW_BUFSZ - hds; 898 if (pblk >= (fw + size - len)) { 899 len = fw + size - pblk; 900 hdr->type = cpu_to_le32(QTN_FW_DEND); 901 } 902 903 hdr->pktlen = cpu_to_le32(len); 904 memcpy(pdata, pblk, len); 905 hdr->crc = cpu_to_le32(~crc32(0, pdata, len)); 906 907 ret = qtnf_pcie_data_tx(bus, skb); 908 909 return (ret == NETDEV_TX_OK) ? len : 0; 910 } 911 912 static int 913 qtnf_ep_fw_load(struct qtnf_pcie_pearl_state *ps, const u8 *fw, u32 fw_size) 914 { 915 int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pearl_fw_hdr); 916 int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0); 917 const u8 *pblk = fw; 918 int threshold = 0; 919 int blk = 0; 920 int len; 921 922 pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size); 923 924 while (blk < blk_count) { 925 if (++threshold > 10000) { 926 pr_err("FW upload failed: too many retries\n"); 927 return -ETIMEDOUT; 928 } 929 930 len = qtnf_ep_fw_send(ps->base.pdev, fw_size, blk, pblk, fw); 931 if (len <= 0) 932 continue; 933 934 if (!((blk + 1) & QTN_PCIE_FW_DLMASK) || 935 (blk == (blk_count - 1))) { 936 qtnf_set_state(&ps->bda->bda_rc_state, 937 QTN_RC_FW_SYNC); 938 if (qtnf_poll_state(&ps->bda->bda_ep_state, 939 QTN_EP_FW_SYNC, 940 QTN_FW_DL_TIMEOUT_MS)) { 941 pr_err("FW upload failed: SYNC timed out\n"); 942 return -ETIMEDOUT; 943 } 944 945 qtnf_clear_state(&ps->bda->bda_ep_state, 946 QTN_EP_FW_SYNC); 947 948 if (qtnf_is_state(&ps->bda->bda_ep_state, 949 QTN_EP_FW_RETRY)) { 950 if (blk == (blk_count - 1)) { 951 int last_round = 952 blk_count & QTN_PCIE_FW_DLMASK; 953 blk -= last_round; 954 pblk -= ((last_round - 1) * 955 blk_size + len); 956 } else { 957 blk -= QTN_PCIE_FW_DLMASK; 958 pblk -= QTN_PCIE_FW_DLMASK * blk_size; 959 } 960 961 qtnf_clear_state(&ps->bda->bda_ep_state, 962 QTN_EP_FW_RETRY); 963 964 pr_warn("FW upload retry: block #%d\n", blk); 965 continue; 966 } 967 968 qtnf_pearl_data_tx_reclaim(ps); 969 } 970 971 pblk += len; 972 blk++; 973 } 974 975 pr_debug("FW upload completed: totally sent %d blocks\n", blk); 976 return 0; 977 } 978 979 static void qtnf_pearl_fw_work_handler(struct work_struct *work) 980 { 981 struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work); 982 struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus); 983 u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK; 984 const char *fwname = QTN_PCI_PEARL_FW_NAME; 985 struct pci_dev *pdev = ps->base.pdev; 986 const struct firmware *fw; 987 int ret; 988 989 if (ps->base.flashboot) { 990 state |= QTN_RC_FW_FLASHBOOT; 991 } else { 992 ret = request_firmware(&fw, fwname, &pdev->dev); 993 if (ret < 0) { 994 pr_err("failed to get firmware %s\n", fwname); 995 goto fw_load_exit; 996 } 997 } 998 999 qtnf_set_state(&ps->bda->bda_rc_state, state); 1000 1001 if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY, 1002 QTN_FW_DL_TIMEOUT_MS)) { 1003 pr_err("card is not ready\n"); 1004 1005 if (!ps->base.flashboot) 1006 release_firmware(fw); 1007 1008 goto fw_load_exit; 1009 } 1010 1011 qtnf_clear_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY); 1012 1013 if (ps->base.flashboot) { 1014 pr_info("booting firmware from flash\n"); 1015 1016 } else { 1017 pr_info("starting firmware upload: %s\n", fwname); 1018 1019 ret = qtnf_ep_fw_load(ps, fw->data, fw->size); 1020 release_firmware(fw); 1021 if (ret) { 1022 pr_err("firmware upload error\n"); 1023 goto fw_load_exit; 1024 } 1025 } 1026 1027 if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_DONE, 1028 QTN_FW_DL_TIMEOUT_MS)) { 1029 pr_err("firmware bringup timed out\n"); 1030 goto fw_load_exit; 1031 } 1032 1033 if (qtnf_poll_state(&ps->bda->bda_ep_state, 1034 QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) { 1035 pr_err("firmware runtime failure\n"); 1036 goto fw_load_exit; 1037 } 1038 1039 pr_info("firmware is up and running\n"); 1040 1041 ret = qtnf_pcie_fw_boot_done(bus); 1042 if (ret) 1043 goto fw_load_exit; 1044 1045 qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats); 1046 qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); 1047 1048 fw_load_exit: 1049 put_device(&pdev->dev); 1050 } 1051 1052 static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data) 1053 { 1054 struct qtnf_pcie_pearl_state *ps = (void *)data; 1055 1056 qtnf_pearl_data_tx_reclaim(ps); 1057 qtnf_en_txdone_irq(ps); 1058 } 1059 1060 static u64 qtnf_pearl_dma_mask_get(void) 1061 { 1062 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1063 return DMA_BIT_MASK(64); 1064 #else 1065 return DMA_BIT_MASK(32); 1066 #endif 1067 } 1068 1069 static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size) 1070 { 1071 struct qtnf_shm_ipc_int ipc_int; 1072 struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); 1073 struct pci_dev *pdev = ps->base.pdev; 1074 int ret; 1075 1076 bus->bus_ops = &qtnf_pcie_pearl_bus_ops; 1077 spin_lock_init(&ps->irq_lock); 1078 INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler); 1079 1080 ps->pcie_reg_base = ps->base.dmareg_bar; 1081 ps->bda = ps->base.epmem_bar; 1082 writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled); 1083 1084 ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size); 1085 if (ret) { 1086 pr_err("PCIE xfer init failed\n"); 1087 return ret; 1088 } 1089 1090 /* init default irq settings */ 1091 qtnf_init_hdp_irqs(ps); 1092 1093 /* start with disabled irqs */ 1094 qtnf_disable_hdp_irqs(ps); 1095 1096 ret = devm_request_irq(&pdev->dev, pdev->irq, 1097 &qtnf_pcie_pearl_interrupt, 0, 1098 "qtnf_pearl_irq", (void *)bus); 1099 if (ret) { 1100 pr_err("failed to request pcie irq %d\n", pdev->irq); 1101 qtnf_pearl_free_xfer_buffers(ps); 1102 return ret; 1103 } 1104 1105 tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn, 1106 (unsigned long)ps); 1107 netif_napi_add(&bus->mux_dev, &bus->mux_napi, 1108 qtnf_pcie_pearl_rx_poll, 10); 1109 1110 ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int; 1111 ipc_int.arg = ps; 1112 qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1, 1113 &ps->bda->bda_shm_reg2, &ipc_int); 1114 1115 return 0; 1116 } 1117 1118 static void qtnf_pcie_pearl_remove(struct qtnf_bus *bus) 1119 { 1120 struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); 1121 1122 qtnf_pearl_reset_ep(ps); 1123 qtnf_pearl_free_xfer_buffers(ps); 1124 } 1125 1126 #ifdef CONFIG_PM_SLEEP 1127 static int qtnf_pcie_pearl_suspend(struct qtnf_bus *bus) 1128 { 1129 return -EOPNOTSUPP; 1130 } 1131 1132 static int qtnf_pcie_pearl_resume(struct qtnf_bus *bus) 1133 { 1134 return 0; 1135 } 1136 #endif 1137 1138 struct qtnf_bus *qtnf_pcie_pearl_alloc(struct pci_dev *pdev) 1139 { 1140 struct qtnf_bus *bus; 1141 struct qtnf_pcie_pearl_state *ps; 1142 1143 bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ps), GFP_KERNEL); 1144 if (!bus) 1145 return NULL; 1146 1147 ps = get_bus_priv(bus); 1148 ps->base.probe_cb = qtnf_pcie_pearl_probe; 1149 ps->base.remove_cb = qtnf_pcie_pearl_remove; 1150 ps->base.dma_mask_get_cb = qtnf_pearl_dma_mask_get; 1151 #ifdef CONFIG_PM_SLEEP 1152 ps->base.resume_cb = qtnf_pcie_pearl_resume; 1153 ps->base.suspend_cb = qtnf_pcie_pearl_suspend; 1154 #endif 1155 1156 return bus; 1157 } 1158