1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ 3 4 #include <linux/kernel.h> 5 #include <linux/platform_device.h> 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/bitfield.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/skbuff.h> 11 #include <linux/of_platform.h> 12 #include <linux/of_address.h> 13 #include <linux/of_reserved_mem.h> 14 #include <linux/mfd/syscon.h> 15 #include <linux/debugfs.h> 16 #include <linux/soc/mediatek/mtk_wed.h> 17 #include <net/flow_offload.h> 18 #include <net/pkt_cls.h> 19 #include "mtk_eth_soc.h" 20 #include "mtk_wed_regs.h" 21 #include "mtk_wed.h" 22 #include "mtk_ppe.h" 23 #include "mtk_wed_wo.h" 24 25 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) 26 27 #define MTK_WED_PKT_SIZE 1900 28 #define MTK_WED_BUF_SIZE 2048 29 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) 30 #define MTK_WED_RX_RING_SIZE 1536 31 32 #define MTK_WED_TX_RING_SIZE 2048 33 #define MTK_WED_WDMA_RING_SIZE 1024 34 #define MTK_WED_MAX_GROUP_SIZE 0x100 35 #define MTK_WED_VLD_GROUP_SIZE 0x40 36 #define MTK_WED_PER_GROUP_PKT 128 37 38 #define MTK_WED_FBUF_SIZE 128 39 #define MTK_WED_MIOD_CNT 16 40 #define MTK_WED_FB_CMD_CNT 1024 41 #define MTK_WED_RRO_QUE_CNT 8192 42 #define MTK_WED_MIOD_ENTRY_CNT 128 43 44 static struct mtk_wed_hw *hw_list[2]; 45 static DEFINE_MUTEX(hw_lock); 46 47 struct mtk_wed_flow_block_priv { 48 struct mtk_wed_hw *hw; 49 struct net_device *dev; 50 }; 51 52 static void 53 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 54 { 55 regmap_update_bits(dev->hw->regs, reg, mask | val, val); 56 } 57 58 static void 59 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 60 { 61 return wed_m32(dev, reg, 0, mask); 62 } 63 64 static void 65 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 66 { 67 return wed_m32(dev, reg, mask, 0); 68 } 69 70 static void 71 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 72 { 73 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); 74 } 75 76 static void 77 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 78 { 79 wdma_m32(dev, reg, 0, mask); 80 } 81 82 static void 83 wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 84 { 85 wdma_m32(dev, reg, mask, 0); 86 } 87 88 static u32 89 wifi_r32(struct mtk_wed_device *dev, u32 reg) 90 { 91 return readl(dev->wlan.base + reg); 92 } 93 94 static void 95 wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) 96 { 97 writel(val, dev->wlan.base + reg); 98 } 99 100 static u32 101 mtk_wed_read_reset(struct mtk_wed_device *dev) 102 { 103 return wed_r32(dev, MTK_WED_RESET); 104 } 105 106 static u32 107 mtk_wdma_read_reset(struct mtk_wed_device *dev) 108 { 109 return wdma_r32(dev, MTK_WDMA_GLO_CFG); 110 } 111 112 static int 113 mtk_wdma_rx_reset(struct mtk_wed_device *dev) 114 { 115 u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; 116 int i, ret; 117 118 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); 119 ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, 120 !(status & mask), 0, 10000); 121 if (ret) 122 dev_err(dev->hw->dev, "rx reset failed\n"); 123 124 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 125 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 126 127 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { 128 if (dev->rx_wdma[i].desc) 129 continue; 130 131 wdma_w32(dev, 132 MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 133 } 134 135 return ret; 136 } 137 138 static void 139 mtk_wdma_tx_reset(struct mtk_wed_device *dev) 140 { 141 u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; 142 int i; 143 144 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 145 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, 146 !(status & mask), 0, 10000)) 147 dev_err(dev->hw->dev, "tx reset failed\n"); 148 149 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); 150 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 151 152 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) 153 wdma_w32(dev, 154 MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 155 } 156 157 static void 158 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) 159 { 160 u32 status; 161 162 wed_w32(dev, MTK_WED_RESET, mask); 163 if (readx_poll_timeout(mtk_wed_read_reset, dev, status, 164 !(status & mask), 0, 1000)) 165 WARN_ON_ONCE(1); 166 } 167 168 static u32 169 mtk_wed_wo_read_status(struct mtk_wed_device *dev) 170 { 171 return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); 172 } 173 174 static void 175 mtk_wed_wo_reset(struct mtk_wed_device *dev) 176 { 177 struct mtk_wed_wo *wo = dev->hw->wed_wo; 178 u8 state = MTK_WED_WO_STATE_DISABLE; 179 void __iomem *reg; 180 u32 val; 181 182 mtk_wdma_tx_reset(dev); 183 mtk_wed_reset(dev, MTK_WED_RESET_WED); 184 185 if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 186 MTK_WED_WO_CMD_CHANGE_STATE, &state, 187 sizeof(state), false)) 188 return; 189 190 if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, 191 val == MTK_WED_WOIF_DISABLE_DONE, 192 100, MTK_WOCPU_TIMEOUT)) 193 dev_err(dev->hw->dev, "failed to disable wed-wo\n"); 194 195 reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); 196 197 val = readl(reg); 198 switch (dev->hw->index) { 199 case 0: 200 val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 201 writel(val, reg); 202 val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 203 writel(val, reg); 204 break; 205 case 1: 206 val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 207 writel(val, reg); 208 val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 209 writel(val, reg); 210 break; 211 default: 212 break; 213 } 214 iounmap(reg); 215 } 216 217 void mtk_wed_fe_reset(void) 218 { 219 int i; 220 221 mutex_lock(&hw_lock); 222 223 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 224 struct mtk_wed_hw *hw = hw_list[i]; 225 struct mtk_wed_device *dev; 226 int err; 227 228 if (!hw) 229 break; 230 231 dev = hw->wed_dev; 232 if (!dev || !dev->wlan.reset) 233 continue; 234 235 /* reset callback blocks until WLAN reset is completed */ 236 err = dev->wlan.reset(dev); 237 if (err) 238 dev_err(dev->dev, "wlan reset failed: %d\n", err); 239 } 240 241 mutex_unlock(&hw_lock); 242 } 243 244 void mtk_wed_fe_reset_complete(void) 245 { 246 int i; 247 248 mutex_lock(&hw_lock); 249 250 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 251 struct mtk_wed_hw *hw = hw_list[i]; 252 struct mtk_wed_device *dev; 253 254 if (!hw) 255 break; 256 257 dev = hw->wed_dev; 258 if (!dev || !dev->wlan.reset_complete) 259 continue; 260 261 dev->wlan.reset_complete(dev); 262 } 263 264 mutex_unlock(&hw_lock); 265 } 266 267 static struct mtk_wed_hw * 268 mtk_wed_assign(struct mtk_wed_device *dev) 269 { 270 struct mtk_wed_hw *hw; 271 int i; 272 273 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 274 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; 275 if (!hw) 276 return NULL; 277 278 if (!hw->wed_dev) 279 goto out; 280 281 if (hw->version == 1) 282 return NULL; 283 284 /* MT7986 WED devices do not have any pcie slot restrictions */ 285 } 286 /* MT7986 PCIE or AXI */ 287 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 288 hw = hw_list[i]; 289 if (hw && !hw->wed_dev) 290 goto out; 291 } 292 293 return NULL; 294 295 out: 296 hw->wed_dev = dev; 297 return hw; 298 } 299 300 static int 301 mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) 302 { 303 struct mtk_wdma_desc *desc; 304 dma_addr_t desc_phys; 305 void **page_list; 306 int token = dev->wlan.token_start; 307 int ring_size; 308 int n_pages; 309 int i, page_idx; 310 311 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 312 n_pages = ring_size / MTK_WED_BUF_PER_PAGE; 313 314 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 315 if (!page_list) 316 return -ENOMEM; 317 318 dev->tx_buf_ring.size = ring_size; 319 dev->tx_buf_ring.pages = page_list; 320 321 desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), 322 &desc_phys, GFP_KERNEL); 323 if (!desc) 324 return -ENOMEM; 325 326 dev->tx_buf_ring.desc = desc; 327 dev->tx_buf_ring.desc_phys = desc_phys; 328 329 for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { 330 dma_addr_t page_phys, buf_phys; 331 struct page *page; 332 void *buf; 333 int s; 334 335 page = __dev_alloc_pages(GFP_KERNEL, 0); 336 if (!page) 337 return -ENOMEM; 338 339 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 340 DMA_BIDIRECTIONAL); 341 if (dma_mapping_error(dev->hw->dev, page_phys)) { 342 __free_page(page); 343 return -ENOMEM; 344 } 345 346 page_list[page_idx++] = page; 347 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 348 DMA_BIDIRECTIONAL); 349 350 buf = page_to_virt(page); 351 buf_phys = page_phys; 352 353 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { 354 u32 txd_size; 355 u32 ctrl; 356 357 txd_size = dev->wlan.init_buf(buf, buf_phys, token++); 358 359 desc->buf0 = cpu_to_le32(buf_phys); 360 desc->buf1 = cpu_to_le32(buf_phys + txd_size); 361 362 if (dev->hw->version == 1) 363 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 364 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, 365 MTK_WED_BUF_SIZE - txd_size) | 366 MTK_WDMA_DESC_CTRL_LAST_SEG1; 367 else 368 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 369 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, 370 MTK_WED_BUF_SIZE - txd_size) | 371 MTK_WDMA_DESC_CTRL_LAST_SEG0; 372 desc->ctrl = cpu_to_le32(ctrl); 373 desc->info = 0; 374 desc++; 375 376 buf += MTK_WED_BUF_SIZE; 377 buf_phys += MTK_WED_BUF_SIZE; 378 } 379 380 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 381 DMA_BIDIRECTIONAL); 382 } 383 384 return 0; 385 } 386 387 static void 388 mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) 389 { 390 struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; 391 void **page_list = dev->tx_buf_ring.pages; 392 int page_idx; 393 int i; 394 395 if (!page_list) 396 return; 397 398 if (!desc) 399 goto free_pagelist; 400 401 for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; 402 i += MTK_WED_BUF_PER_PAGE) { 403 void *page = page_list[page_idx++]; 404 dma_addr_t buf_addr; 405 406 if (!page) 407 break; 408 409 buf_addr = le32_to_cpu(desc[i].buf0); 410 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, 411 DMA_BIDIRECTIONAL); 412 __free_page(page); 413 } 414 415 dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), 416 desc, dev->tx_buf_ring.desc_phys); 417 418 free_pagelist: 419 kfree(page_list); 420 } 421 422 static int 423 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) 424 { 425 struct mtk_rxbm_desc *desc; 426 dma_addr_t desc_phys; 427 428 dev->rx_buf_ring.size = dev->wlan.rx_nbuf; 429 desc = dma_alloc_coherent(dev->hw->dev, 430 dev->wlan.rx_nbuf * sizeof(*desc), 431 &desc_phys, GFP_KERNEL); 432 if (!desc) 433 return -ENOMEM; 434 435 dev->rx_buf_ring.desc = desc; 436 dev->rx_buf_ring.desc_phys = desc_phys; 437 dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); 438 439 return 0; 440 } 441 442 static void 443 mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) 444 { 445 struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; 446 447 if (!desc) 448 return; 449 450 dev->wlan.release_rx_buf(dev); 451 dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), 452 desc, dev->rx_buf_ring.desc_phys); 453 } 454 455 static void 456 mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) 457 { 458 wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, 459 FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); 460 wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); 461 wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | 462 FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); 463 wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, 464 FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); 465 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 466 } 467 468 static void 469 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) 470 { 471 if (!ring->desc) 472 return; 473 474 dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, 475 ring->desc, ring->desc_phys); 476 } 477 478 static void 479 mtk_wed_free_rx_rings(struct mtk_wed_device *dev) 480 { 481 mtk_wed_free_rx_buffer(dev); 482 mtk_wed_free_ring(dev, &dev->rro.ring); 483 } 484 485 static void 486 mtk_wed_free_tx_rings(struct mtk_wed_device *dev) 487 { 488 int i; 489 490 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) 491 mtk_wed_free_ring(dev, &dev->tx_ring[i]); 492 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 493 mtk_wed_free_ring(dev, &dev->rx_wdma[i]); 494 } 495 496 static void 497 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) 498 { 499 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 500 501 if (dev->hw->version == 1) 502 mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 503 else 504 mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 505 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 506 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 507 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 508 509 if (!dev->hw->num_flows) 510 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 511 512 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); 513 wed_r32(dev, MTK_WED_EXT_INT_MASK); 514 } 515 516 static void 517 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) 518 { 519 if (enable) { 520 wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 521 wed_w32(dev, MTK_WED_TXP_DW1, 522 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); 523 } else { 524 wed_w32(dev, MTK_WED_TXP_DW1, 525 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); 526 wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 527 } 528 } 529 530 #define MTK_WFMDA_RX_DMA_EN BIT(2) 531 static void 532 mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) 533 { 534 u32 val; 535 int i; 536 537 if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) 538 return; /* queue is not configured by mt76 */ 539 540 for (i = 0; i < 3; i++) { 541 u32 cur_idx; 542 543 cur_idx = wed_r32(dev, 544 MTK_WED_WPDMA_RING_RX_DATA(idx) + 545 MTK_WED_RING_OFS_CPU_IDX); 546 if (cur_idx == MTK_WED_RX_RING_SIZE - 1) 547 break; 548 549 usleep_range(100000, 200000); 550 } 551 552 if (i == 3) { 553 dev_err(dev->hw->dev, "rx dma enable failed\n"); 554 return; 555 } 556 557 val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | 558 MTK_WFMDA_RX_DMA_EN; 559 wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); 560 } 561 562 static void 563 mtk_wed_dma_disable(struct mtk_wed_device *dev) 564 { 565 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 566 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 567 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 568 569 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 570 571 wed_clr(dev, MTK_WED_GLO_CFG, 572 MTK_WED_GLO_CFG_TX_DMA_EN | 573 MTK_WED_GLO_CFG_RX_DMA_EN); 574 575 wdma_clr(dev, MTK_WDMA_GLO_CFG, 576 MTK_WDMA_GLO_CFG_TX_DMA_EN | 577 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 578 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 579 580 if (dev->hw->version == 1) { 581 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); 582 wdma_clr(dev, MTK_WDMA_GLO_CFG, 583 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 584 } else { 585 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 586 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 587 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 588 589 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 590 MTK_WED_WPDMA_RX_D_RX_DRV_EN); 591 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 592 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 593 } 594 595 mtk_wed_set_512_support(dev, false); 596 } 597 598 static void 599 mtk_wed_stop(struct mtk_wed_device *dev) 600 { 601 mtk_wed_set_ext_int(dev, false); 602 603 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); 604 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); 605 wdma_w32(dev, MTK_WDMA_INT_MASK, 0); 606 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); 607 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); 608 609 if (dev->hw->version == 1) 610 return; 611 612 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); 613 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); 614 } 615 616 static void 617 mtk_wed_deinit(struct mtk_wed_device *dev) 618 { 619 mtk_wed_stop(dev); 620 mtk_wed_dma_disable(dev); 621 622 wed_clr(dev, MTK_WED_CTRL, 623 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 624 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 625 MTK_WED_CTRL_WED_TX_BM_EN | 626 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 627 628 if (dev->hw->version == 1) 629 return; 630 631 wed_clr(dev, MTK_WED_CTRL, 632 MTK_WED_CTRL_RX_ROUTE_QM_EN | 633 MTK_WED_CTRL_WED_RX_BM_EN | 634 MTK_WED_CTRL_RX_RRO_QM_EN); 635 } 636 637 static void 638 __mtk_wed_detach(struct mtk_wed_device *dev) 639 { 640 struct mtk_wed_hw *hw = dev->hw; 641 642 mtk_wed_deinit(dev); 643 644 mtk_wdma_rx_reset(dev); 645 mtk_wed_reset(dev, MTK_WED_RESET_WED); 646 mtk_wed_free_tx_buffer(dev); 647 mtk_wed_free_tx_rings(dev); 648 649 if (mtk_wed_get_rx_capa(dev)) { 650 if (hw->wed_wo) 651 mtk_wed_wo_reset(dev); 652 mtk_wed_free_rx_rings(dev); 653 if (hw->wed_wo) 654 mtk_wed_wo_deinit(hw); 655 } 656 657 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 658 struct device_node *wlan_node; 659 660 wlan_node = dev->wlan.pci_dev->dev.of_node; 661 if (of_dma_is_coherent(wlan_node) && hw->hifsys) 662 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 663 BIT(hw->index), BIT(hw->index)); 664 } 665 666 if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) && 667 hw->eth->dma_dev != hw->eth->dev) 668 mtk_eth_set_dma_device(hw->eth, hw->eth->dev); 669 670 memset(dev, 0, sizeof(*dev)); 671 module_put(THIS_MODULE); 672 673 hw->wed_dev = NULL; 674 } 675 676 static void 677 mtk_wed_detach(struct mtk_wed_device *dev) 678 { 679 mutex_lock(&hw_lock); 680 __mtk_wed_detach(dev); 681 mutex_unlock(&hw_lock); 682 } 683 684 #define PCIE_BASE_ADDR0 0x11280000 685 static void 686 mtk_wed_bus_init(struct mtk_wed_device *dev) 687 { 688 switch (dev->wlan.bus_type) { 689 case MTK_WED_BUS_PCIE: { 690 struct device_node *np = dev->hw->eth->dev->of_node; 691 struct regmap *regs; 692 693 regs = syscon_regmap_lookup_by_phandle(np, 694 "mediatek,wed-pcie"); 695 if (IS_ERR(regs)) 696 break; 697 698 regmap_update_bits(regs, 0, BIT(0), BIT(0)); 699 700 wed_w32(dev, MTK_WED_PCIE_INT_CTRL, 701 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); 702 703 /* pcie interrupt control: pola/source selection */ 704 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 705 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 706 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); 707 wed_r32(dev, MTK_WED_PCIE_INT_CTRL); 708 709 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); 710 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); 711 712 /* pcie interrupt status trigger register */ 713 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); 714 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); 715 716 /* pola setting */ 717 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 718 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); 719 break; 720 } 721 case MTK_WED_BUS_AXI: 722 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 723 MTK_WED_WPDMA_INT_CTRL_SIG_SRC | 724 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); 725 break; 726 default: 727 break; 728 } 729 } 730 731 static void 732 mtk_wed_set_wpdma(struct mtk_wed_device *dev) 733 { 734 if (dev->hw->version == 1) { 735 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); 736 } else { 737 mtk_wed_bus_init(dev); 738 739 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); 740 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); 741 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); 742 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); 743 wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); 744 wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); 745 } 746 } 747 748 static void 749 mtk_wed_hw_init_early(struct mtk_wed_device *dev) 750 { 751 u32 mask, set; 752 753 mtk_wed_deinit(dev); 754 mtk_wed_reset(dev, MTK_WED_RESET_WED); 755 mtk_wed_set_wpdma(dev); 756 757 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | 758 MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | 759 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; 760 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | 761 MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | 762 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; 763 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); 764 765 if (dev->hw->version == 1) { 766 u32 offset = dev->hw->index ? 0x04000400 : 0; 767 768 wdma_set(dev, MTK_WDMA_GLO_CFG, 769 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 770 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | 771 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 772 773 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); 774 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); 775 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 776 MTK_PCIE_BASE(dev->hw->index)); 777 } else { 778 wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); 779 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); 780 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 781 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, 782 MTK_WDMA_INT_STATUS) | 783 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, 784 MTK_WDMA_GLO_CFG)); 785 786 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 787 FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, 788 MTK_WDMA_RING_TX(0)) | 789 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, 790 MTK_WDMA_RING_RX(0))); 791 } 792 } 793 794 static int 795 mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 796 int size) 797 { 798 ring->desc = dma_alloc_coherent(dev->hw->dev, 799 size * sizeof(*ring->desc), 800 &ring->desc_phys, GFP_KERNEL); 801 if (!ring->desc) 802 return -ENOMEM; 803 804 ring->desc_size = sizeof(*ring->desc); 805 ring->size = size; 806 807 return 0; 808 } 809 810 #define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) 811 static int 812 mtk_wed_rro_alloc(struct mtk_wed_device *dev) 813 { 814 struct reserved_mem *rmem; 815 struct device_node *np; 816 int index; 817 818 index = of_property_match_string(dev->hw->node, "memory-region-names", 819 "wo-dlm"); 820 if (index < 0) 821 return index; 822 823 np = of_parse_phandle(dev->hw->node, "memory-region", index); 824 if (!np) 825 return -ENODEV; 826 827 rmem = of_reserved_mem_lookup(np); 828 of_node_put(np); 829 830 if (!rmem) 831 return -ENODEV; 832 833 dev->rro.miod_phys = rmem->base; 834 dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; 835 836 return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, 837 MTK_WED_RRO_QUE_CNT); 838 } 839 840 static int 841 mtk_wed_rro_cfg(struct mtk_wed_device *dev) 842 { 843 struct mtk_wed_wo *wo = dev->hw->wed_wo; 844 struct { 845 struct { 846 __le32 base; 847 __le32 cnt; 848 __le32 unit; 849 } ring[2]; 850 __le32 wed; 851 u8 version; 852 } req = { 853 .ring[0] = { 854 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), 855 .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), 856 .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), 857 }, 858 .ring[1] = { 859 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + 860 MTK_WED_MIOD_COUNT), 861 .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), 862 .unit = cpu_to_le32(4), 863 }, 864 }; 865 866 return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 867 MTK_WED_WO_CMD_WED_CFG, 868 &req, sizeof(req), true); 869 } 870 871 static void 872 mtk_wed_rro_hw_init(struct mtk_wed_device *dev) 873 { 874 wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, 875 FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | 876 FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | 877 FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, 878 MTK_WED_MIOD_ENTRY_CNT >> 2)); 879 880 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); 881 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, 882 FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); 883 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); 884 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, 885 FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); 886 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); 887 wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); 888 889 wed_set(dev, MTK_WED_RROQM_RST_IDX, 890 MTK_WED_RROQM_RST_IDX_MIOD | 891 MTK_WED_RROQM_RST_IDX_FDBK); 892 893 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 894 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); 895 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 896 } 897 898 static void 899 mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) 900 { 901 wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); 902 903 for (;;) { 904 usleep_range(100, 200); 905 if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) 906 break; 907 } 908 909 /* configure RX_ROUTE_QM */ 910 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 911 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); 912 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 913 FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); 914 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 915 /* enable RX_ROUTE_QM */ 916 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 917 } 918 919 static void 920 mtk_wed_hw_init(struct mtk_wed_device *dev) 921 { 922 if (dev->init_done) 923 return; 924 925 dev->init_done = true; 926 mtk_wed_set_ext_int(dev, false); 927 wed_w32(dev, MTK_WED_TX_BM_CTRL, 928 MTK_WED_TX_BM_CTRL_PAUSE | 929 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 930 dev->tx_buf_ring.size / 128) | 931 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 932 MTK_WED_TX_RING_SIZE / 256)); 933 934 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); 935 936 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); 937 938 if (dev->hw->version == 1) { 939 wed_w32(dev, MTK_WED_TX_BM_TKID, 940 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 941 dev->wlan.token_start) | 942 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 943 dev->wlan.token_start + 944 dev->wlan.nbuf - 1)); 945 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 946 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | 947 MTK_WED_TX_BM_DYN_THR_HI); 948 } else { 949 wed_w32(dev, MTK_WED_TX_BM_TKID_V2, 950 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 951 dev->wlan.token_start) | 952 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 953 dev->wlan.token_start + 954 dev->wlan.nbuf - 1)); 955 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 956 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | 957 MTK_WED_TX_BM_DYN_THR_HI_V2); 958 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 959 MTK_WED_TX_TKID_CTRL_PAUSE | 960 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, 961 dev->tx_buf_ring.size / 128) | 962 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, 963 dev->tx_buf_ring.size / 128)); 964 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, 965 FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | 966 MTK_WED_TX_TKID_DYN_THR_HI); 967 } 968 969 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 970 971 if (dev->hw->version == 1) { 972 wed_set(dev, MTK_WED_CTRL, 973 MTK_WED_CTRL_WED_TX_BM_EN | 974 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 975 } else { 976 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); 977 /* rx hw init */ 978 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 979 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 980 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 981 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 982 983 mtk_wed_rx_buffer_hw_init(dev); 984 mtk_wed_rro_hw_init(dev); 985 mtk_wed_route_qm_hw_init(dev); 986 } 987 988 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); 989 } 990 991 static void 992 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) 993 { 994 void *head = (void *)ring->desc; 995 int i; 996 997 for (i = 0; i < size; i++) { 998 struct mtk_wdma_desc *desc; 999 1000 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); 1001 desc->buf0 = 0; 1002 if (tx) 1003 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 1004 else 1005 desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); 1006 desc->buf1 = 0; 1007 desc->info = 0; 1008 } 1009 } 1010 1011 static u32 1012 mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 1013 { 1014 return !!(wed_r32(dev, reg) & mask); 1015 } 1016 1017 static int 1018 mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 1019 { 1020 int sleep = 15000; 1021 int timeout = 100 * sleep; 1022 u32 val; 1023 1024 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, 1025 timeout, false, dev, reg, mask); 1026 } 1027 1028 static int 1029 mtk_wed_rx_reset(struct mtk_wed_device *dev) 1030 { 1031 struct mtk_wed_wo *wo = dev->hw->wed_wo; 1032 u8 val = MTK_WED_WO_STATE_SER_RESET; 1033 int i, ret; 1034 1035 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1036 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1037 sizeof(val), true); 1038 if (ret) 1039 return ret; 1040 1041 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); 1042 ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1043 MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); 1044 if (ret) { 1045 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1046 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); 1047 } else { 1048 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1049 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 1050 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 1051 1052 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1053 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1054 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1055 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1056 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1057 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1058 1059 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 1060 } 1061 1062 /* reset rro qm */ 1063 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 1064 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1065 MTK_WED_CTRL_RX_RRO_QM_BUSY); 1066 if (ret) { 1067 mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); 1068 } else { 1069 wed_set(dev, MTK_WED_RROQM_RST_IDX, 1070 MTK_WED_RROQM_RST_IDX_MIOD | 1071 MTK_WED_RROQM_RST_IDX_FDBK); 1072 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 1073 } 1074 1075 /* reset route qm */ 1076 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 1077 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1078 MTK_WED_CTRL_RX_ROUTE_QM_BUSY); 1079 if (ret) 1080 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); 1081 else 1082 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 1083 MTK_WED_RTQM_Q_RST); 1084 1085 /* reset tx wdma */ 1086 mtk_wdma_tx_reset(dev); 1087 1088 /* reset tx wdma drv */ 1089 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); 1090 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1091 MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); 1092 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); 1093 1094 /* reset wed rx dma */ 1095 ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1096 MTK_WED_GLO_CFG_RX_DMA_BUSY); 1097 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); 1098 if (ret) { 1099 mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); 1100 } else { 1101 struct mtk_eth *eth = dev->hw->eth; 1102 1103 if (mtk_is_netsys_v2_or_greater(eth)) 1104 wed_set(dev, MTK_WED_RESET_IDX, 1105 MTK_WED_RESET_IDX_RX_V2); 1106 else 1107 wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX); 1108 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1109 } 1110 1111 /* reset rx bm */ 1112 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 1113 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1114 MTK_WED_CTRL_WED_RX_BM_BUSY); 1115 mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); 1116 1117 /* wo change to enable state */ 1118 val = MTK_WED_WO_STATE_ENABLE; 1119 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1120 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1121 sizeof(val), true); 1122 if (ret) 1123 return ret; 1124 1125 /* wed_rx_ring_reset */ 1126 for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { 1127 if (!dev->rx_ring[i].desc) 1128 continue; 1129 1130 mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE, 1131 false); 1132 } 1133 mtk_wed_free_rx_buffer(dev); 1134 1135 return 0; 1136 } 1137 1138 static void 1139 mtk_wed_reset_dma(struct mtk_wed_device *dev) 1140 { 1141 bool busy = false; 1142 u32 val; 1143 int i; 1144 1145 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { 1146 if (!dev->tx_ring[i].desc) 1147 continue; 1148 1149 mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, 1150 true); 1151 } 1152 1153 /* 1. reset WED tx DMA */ 1154 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); 1155 busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1156 MTK_WED_GLO_CFG_TX_DMA_BUSY); 1157 if (busy) { 1158 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); 1159 } else { 1160 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX); 1161 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1162 } 1163 1164 /* 2. reset WDMA rx DMA */ 1165 busy = !!mtk_wdma_rx_reset(dev); 1166 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1167 if (!busy) 1168 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, 1169 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); 1170 1171 if (busy) { 1172 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); 1173 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); 1174 } else { 1175 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1176 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); 1177 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); 1178 1179 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1180 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1181 1182 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1183 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1184 } 1185 1186 /* 3. reset WED WPDMA tx */ 1187 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1188 1189 for (i = 0; i < 100; i++) { 1190 val = wed_r32(dev, MTK_WED_TX_BM_INTF); 1191 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) 1192 break; 1193 } 1194 1195 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); 1196 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); 1197 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1198 1199 /* 4. reset WED WPDMA tx */ 1200 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1201 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); 1202 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1203 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1204 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1205 if (!busy) 1206 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1207 MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); 1208 1209 if (busy) { 1210 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1211 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); 1212 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); 1213 } else { 1214 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 1215 MTK_WED_WPDMA_RESET_IDX_TX | 1216 MTK_WED_WPDMA_RESET_IDX_RX); 1217 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); 1218 } 1219 1220 dev->init_done = false; 1221 if (dev->hw->version == 1) 1222 return; 1223 1224 if (!busy) { 1225 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); 1226 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1227 } 1228 1229 mtk_wed_rx_reset(dev); 1230 } 1231 1232 static int 1233 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1234 int size, u32 desc_size, bool tx) 1235 { 1236 ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, 1237 &ring->desc_phys, GFP_KERNEL); 1238 if (!ring->desc) 1239 return -ENOMEM; 1240 1241 ring->desc_size = desc_size; 1242 ring->size = size; 1243 mtk_wed_ring_reset(ring, size, tx); 1244 1245 return 0; 1246 } 1247 1248 static int 1249 mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1250 bool reset) 1251 { 1252 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 1253 struct mtk_wed_ring *wdma; 1254 1255 if (idx >= ARRAY_SIZE(dev->rx_wdma)) 1256 return -EINVAL; 1257 1258 wdma = &dev->rx_wdma[idx]; 1259 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1260 desc_size, true)) 1261 return -ENOMEM; 1262 1263 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1264 wdma->desc_phys); 1265 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1266 size); 1267 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1268 1269 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1270 wdma->desc_phys); 1271 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1272 size); 1273 1274 return 0; 1275 } 1276 1277 static int 1278 mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1279 bool reset) 1280 { 1281 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 1282 struct mtk_wed_ring *wdma; 1283 1284 if (idx >= ARRAY_SIZE(dev->tx_wdma)) 1285 return -EINVAL; 1286 1287 wdma = &dev->tx_wdma[idx]; 1288 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1289 desc_size, true)) 1290 return -ENOMEM; 1291 1292 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1293 wdma->desc_phys); 1294 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1295 size); 1296 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1297 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); 1298 1299 if (reset) 1300 mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true); 1301 1302 if (!idx) { 1303 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, 1304 wdma->desc_phys); 1305 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, 1306 size); 1307 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, 1308 0); 1309 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, 1310 0); 1311 } 1312 1313 return 0; 1314 } 1315 1316 static void 1317 mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, 1318 u32 reason, u32 hash) 1319 { 1320 struct mtk_eth *eth = dev->hw->eth; 1321 struct ethhdr *eh; 1322 1323 if (!skb) 1324 return; 1325 1326 if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 1327 return; 1328 1329 skb_set_mac_header(skb, 0); 1330 eh = eth_hdr(skb); 1331 skb->protocol = eh->h_proto; 1332 mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); 1333 } 1334 1335 static void 1336 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) 1337 { 1338 u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); 1339 1340 /* wed control cr set */ 1341 wed_set(dev, MTK_WED_CTRL, 1342 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1343 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1344 MTK_WED_CTRL_WED_TX_BM_EN | 1345 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1346 1347 if (dev->hw->version == 1) { 1348 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, 1349 MTK_WED_PCIE_INT_TRIGGER_STATUS); 1350 1351 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 1352 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | 1353 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); 1354 1355 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); 1356 } else { 1357 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, 1358 GENMASK(1, 0)); 1359 /* initail tx interrupt trigger */ 1360 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, 1361 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | 1362 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | 1363 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | 1364 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | 1365 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, 1366 dev->wlan.tx_tbit[0]) | 1367 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, 1368 dev->wlan.tx_tbit[1])); 1369 1370 /* initail txfree interrupt trigger */ 1371 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, 1372 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | 1373 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | 1374 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, 1375 dev->wlan.txfree_tbit)); 1376 1377 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, 1378 MTK_WED_WPDMA_INT_CTRL_RX0_EN | 1379 MTK_WED_WPDMA_INT_CTRL_RX0_CLR | 1380 MTK_WED_WPDMA_INT_CTRL_RX1_EN | 1381 MTK_WED_WPDMA_INT_CTRL_RX1_CLR | 1382 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, 1383 dev->wlan.rx_tbit[0]) | 1384 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, 1385 dev->wlan.rx_tbit[1])); 1386 1387 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); 1388 wed_set(dev, MTK_WED_WDMA_INT_CTRL, 1389 FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, 1390 dev->wdma_idx)); 1391 } 1392 1393 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); 1394 1395 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); 1396 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); 1397 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 1398 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 1399 } 1400 1401 static void 1402 mtk_wed_dma_enable(struct mtk_wed_device *dev) 1403 { 1404 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); 1405 1406 wed_set(dev, MTK_WED_GLO_CFG, 1407 MTK_WED_GLO_CFG_TX_DMA_EN | 1408 MTK_WED_GLO_CFG_RX_DMA_EN); 1409 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 1410 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1411 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1412 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1413 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1414 1415 wdma_set(dev, MTK_WDMA_GLO_CFG, 1416 MTK_WDMA_GLO_CFG_TX_DMA_EN | 1417 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1418 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 1419 1420 if (dev->hw->version == 1) { 1421 wdma_set(dev, MTK_WDMA_GLO_CFG, 1422 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1423 } else { 1424 int i; 1425 1426 wed_set(dev, MTK_WED_WPDMA_CTRL, 1427 MTK_WED_WPDMA_CTRL_SDL1_FIXED); 1428 1429 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1430 MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | 1431 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 1432 1433 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 1434 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 1435 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 1436 1437 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1438 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | 1439 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); 1440 1441 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1442 MTK_WED_WPDMA_RX_D_RX_DRV_EN | 1443 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | 1444 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 1445 0x2)); 1446 1447 for (i = 0; i < MTK_WED_RX_QUEUES; i++) 1448 mtk_wed_check_wfdma_rx_fill(dev, i); 1449 } 1450 } 1451 1452 static void 1453 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) 1454 { 1455 int i; 1456 1457 if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) 1458 return; 1459 1460 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 1461 if (!dev->rx_wdma[i].desc) 1462 mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); 1463 1464 mtk_wed_hw_init(dev); 1465 mtk_wed_configure_irq(dev, irq_mask); 1466 1467 mtk_wed_set_ext_int(dev, true); 1468 1469 if (dev->hw->version == 1) { 1470 u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | 1471 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, 1472 dev->hw->index); 1473 1474 val |= BIT(0) | (BIT(1) * !!dev->hw->index); 1475 regmap_write(dev->hw->mirror, dev->hw->index * 4, val); 1476 } else { 1477 /* driver set mid ready and only once */ 1478 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 1479 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 1480 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 1481 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 1482 1483 wed_r32(dev, MTK_WED_EXT_INT_MASK1); 1484 wed_r32(dev, MTK_WED_EXT_INT_MASK2); 1485 1486 if (mtk_wed_rro_cfg(dev)) 1487 return; 1488 1489 } 1490 1491 mtk_wed_set_512_support(dev, dev->wlan.wcid_512); 1492 1493 mtk_wed_dma_enable(dev); 1494 dev->running = true; 1495 } 1496 1497 static int 1498 mtk_wed_attach(struct mtk_wed_device *dev) 1499 __releases(RCU) 1500 { 1501 struct mtk_wed_hw *hw; 1502 struct device *device; 1503 int ret = 0; 1504 1505 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 1506 "mtk_wed_attach without holding the RCU read lock"); 1507 1508 if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && 1509 pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || 1510 !try_module_get(THIS_MODULE)) 1511 ret = -ENODEV; 1512 1513 rcu_read_unlock(); 1514 1515 if (ret) 1516 return ret; 1517 1518 mutex_lock(&hw_lock); 1519 1520 hw = mtk_wed_assign(dev); 1521 if (!hw) { 1522 module_put(THIS_MODULE); 1523 ret = -ENODEV; 1524 goto unlock; 1525 } 1526 1527 device = dev->wlan.bus_type == MTK_WED_BUS_PCIE 1528 ? &dev->wlan.pci_dev->dev 1529 : &dev->wlan.platform_dev->dev; 1530 dev_info(device, "attaching wed device %d version %d\n", 1531 hw->index, hw->version); 1532 1533 dev->hw = hw; 1534 dev->dev = hw->dev; 1535 dev->irq = hw->irq; 1536 dev->wdma_idx = hw->index; 1537 dev->version = hw->version; 1538 1539 if (hw->eth->dma_dev == hw->eth->dev && 1540 of_dma_is_coherent(hw->eth->dev->of_node)) 1541 mtk_eth_set_dma_device(hw->eth, hw->dev); 1542 1543 ret = mtk_wed_tx_buffer_alloc(dev); 1544 if (ret) 1545 goto out; 1546 1547 if (mtk_wed_get_rx_capa(dev)) { 1548 ret = mtk_wed_rro_alloc(dev); 1549 if (ret) 1550 goto out; 1551 } 1552 1553 mtk_wed_hw_init_early(dev); 1554 if (hw->version == 1) { 1555 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 1556 BIT(hw->index), 0); 1557 } else { 1558 dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); 1559 ret = mtk_wed_wo_init(hw); 1560 } 1561 out: 1562 if (ret) { 1563 dev_err(dev->hw->dev, "failed to attach wed device\n"); 1564 __mtk_wed_detach(dev); 1565 } 1566 unlock: 1567 mutex_unlock(&hw_lock); 1568 1569 return ret; 1570 } 1571 1572 static int 1573 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 1574 bool reset) 1575 { 1576 struct mtk_wed_ring *ring = &dev->tx_ring[idx]; 1577 1578 /* 1579 * Tx ring redirection: 1580 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN 1581 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) 1582 * registers. 1583 * 1584 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it 1585 * into MTK_WED_WPDMA_RING_TX(n) registers. 1586 * It gets filled with packets picked up from WED TX ring and from 1587 * WDMA RX. 1588 */ 1589 1590 if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) 1591 return -EINVAL; 1592 1593 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1594 sizeof(*ring->desc), true)) 1595 return -ENOMEM; 1596 1597 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 1598 reset)) 1599 return -ENOMEM; 1600 1601 ring->reg_base = MTK_WED_RING_TX(idx); 1602 ring->wpdma = regs; 1603 1604 /* WED -> WPDMA */ 1605 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 1606 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); 1607 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); 1608 1609 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1610 ring->desc_phys); 1611 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1612 MTK_WED_TX_RING_SIZE); 1613 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1614 1615 return 0; 1616 } 1617 1618 static int 1619 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 1620 { 1621 struct mtk_wed_ring *ring = &dev->txfree_ring; 1622 int i, index = dev->hw->version == 1; 1623 1624 /* 1625 * For txfree event handling, the same DMA ring is shared between WED 1626 * and WLAN. The WLAN driver accesses the ring index registers through 1627 * WED 1628 */ 1629 ring->reg_base = MTK_WED_RING_RX(index); 1630 ring->wpdma = regs; 1631 1632 for (i = 0; i < 12; i += 4) { 1633 u32 val = readl(regs + i); 1634 1635 wed_w32(dev, MTK_WED_RING_RX(index) + i, val); 1636 wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); 1637 } 1638 1639 return 0; 1640 } 1641 1642 static int 1643 mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 1644 bool reset) 1645 { 1646 struct mtk_wed_ring *ring = &dev->rx_ring[idx]; 1647 1648 if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) 1649 return -EINVAL; 1650 1651 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1652 sizeof(*ring->desc), false)) 1653 return -ENOMEM; 1654 1655 if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 1656 reset)) 1657 return -ENOMEM; 1658 1659 ring->reg_base = MTK_WED_RING_RX_DATA(idx); 1660 ring->wpdma = regs; 1661 ring->flags |= MTK_WED_RING_CONFIGURED; 1662 1663 /* WPDMA -> WED */ 1664 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 1665 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); 1666 1667 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, 1668 ring->desc_phys); 1669 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, 1670 MTK_WED_RX_RING_SIZE); 1671 1672 return 0; 1673 } 1674 1675 static u32 1676 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) 1677 { 1678 u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 1679 1680 if (dev->hw->version == 1) 1681 ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 1682 else 1683 ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 1684 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 1685 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 1686 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 1687 1688 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); 1689 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); 1690 val &= ext_mask; 1691 if (!dev->hw->num_flows) 1692 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 1693 if (val && net_ratelimit()) 1694 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); 1695 1696 val = wed_r32(dev, MTK_WED_INT_STATUS); 1697 val &= mask; 1698 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ 1699 1700 return val; 1701 } 1702 1703 static void 1704 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) 1705 { 1706 if (!dev->running) 1707 return; 1708 1709 mtk_wed_set_ext_int(dev, !!mask); 1710 wed_w32(dev, MTK_WED_INT_MASK, mask); 1711 } 1712 1713 int mtk_wed_flow_add(int index) 1714 { 1715 struct mtk_wed_hw *hw = hw_list[index]; 1716 int ret; 1717 1718 if (!hw || !hw->wed_dev) 1719 return -ENODEV; 1720 1721 if (hw->num_flows) { 1722 hw->num_flows++; 1723 return 0; 1724 } 1725 1726 mutex_lock(&hw_lock); 1727 if (!hw->wed_dev) { 1728 ret = -ENODEV; 1729 goto out; 1730 } 1731 1732 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); 1733 if (!ret) 1734 hw->num_flows++; 1735 mtk_wed_set_ext_int(hw->wed_dev, true); 1736 1737 out: 1738 mutex_unlock(&hw_lock); 1739 1740 return ret; 1741 } 1742 1743 void mtk_wed_flow_remove(int index) 1744 { 1745 struct mtk_wed_hw *hw = hw_list[index]; 1746 1747 if (!hw) 1748 return; 1749 1750 if (--hw->num_flows) 1751 return; 1752 1753 mutex_lock(&hw_lock); 1754 if (!hw->wed_dev) 1755 goto out; 1756 1757 hw->wed_dev->wlan.offload_disable(hw->wed_dev); 1758 mtk_wed_set_ext_int(hw->wed_dev, true); 1759 1760 out: 1761 mutex_unlock(&hw_lock); 1762 } 1763 1764 static int 1765 mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 1766 { 1767 struct mtk_wed_flow_block_priv *priv = cb_priv; 1768 struct flow_cls_offload *cls = type_data; 1769 struct mtk_wed_hw *hw = priv->hw; 1770 1771 if (!tc_can_offload(priv->dev)) 1772 return -EOPNOTSUPP; 1773 1774 if (type != TC_SETUP_CLSFLOWER) 1775 return -EOPNOTSUPP; 1776 1777 return mtk_flow_offload_cmd(hw->eth, cls, hw->index); 1778 } 1779 1780 static int 1781 mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev, 1782 struct flow_block_offload *f) 1783 { 1784 struct mtk_wed_flow_block_priv *priv; 1785 static LIST_HEAD(block_cb_list); 1786 struct flow_block_cb *block_cb; 1787 struct mtk_eth *eth = hw->eth; 1788 flow_setup_cb_t *cb; 1789 1790 if (!eth->soc->offload_version) 1791 return -EOPNOTSUPP; 1792 1793 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1794 return -EOPNOTSUPP; 1795 1796 cb = mtk_wed_setup_tc_block_cb; 1797 f->driver_block_list = &block_cb_list; 1798 1799 switch (f->command) { 1800 case FLOW_BLOCK_BIND: 1801 block_cb = flow_block_cb_lookup(f->block, cb, dev); 1802 if (block_cb) { 1803 flow_block_cb_incref(block_cb); 1804 return 0; 1805 } 1806 1807 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1808 if (!priv) 1809 return -ENOMEM; 1810 1811 priv->hw = hw; 1812 priv->dev = dev; 1813 block_cb = flow_block_cb_alloc(cb, dev, priv, NULL); 1814 if (IS_ERR(block_cb)) { 1815 kfree(priv); 1816 return PTR_ERR(block_cb); 1817 } 1818 1819 flow_block_cb_incref(block_cb); 1820 flow_block_cb_add(block_cb, f); 1821 list_add_tail(&block_cb->driver_list, &block_cb_list); 1822 return 0; 1823 case FLOW_BLOCK_UNBIND: 1824 block_cb = flow_block_cb_lookup(f->block, cb, dev); 1825 if (!block_cb) 1826 return -ENOENT; 1827 1828 if (!flow_block_cb_decref(block_cb)) { 1829 flow_block_cb_remove(block_cb, f); 1830 list_del(&block_cb->driver_list); 1831 kfree(block_cb->cb_priv); 1832 } 1833 return 0; 1834 default: 1835 return -EOPNOTSUPP; 1836 } 1837 } 1838 1839 static int 1840 mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, 1841 enum tc_setup_type type, void *type_data) 1842 { 1843 struct mtk_wed_hw *hw = wed->hw; 1844 1845 if (hw->version < 2) 1846 return -EOPNOTSUPP; 1847 1848 switch (type) { 1849 case TC_SETUP_BLOCK: 1850 case TC_SETUP_FT: 1851 return mtk_wed_setup_tc_block(hw, dev, type_data); 1852 default: 1853 return -EOPNOTSUPP; 1854 } 1855 } 1856 1857 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, 1858 void __iomem *wdma, phys_addr_t wdma_phy, 1859 int index) 1860 { 1861 static const struct mtk_wed_ops wed_ops = { 1862 .attach = mtk_wed_attach, 1863 .tx_ring_setup = mtk_wed_tx_ring_setup, 1864 .rx_ring_setup = mtk_wed_rx_ring_setup, 1865 .txfree_ring_setup = mtk_wed_txfree_ring_setup, 1866 .msg_update = mtk_wed_mcu_msg_update, 1867 .start = mtk_wed_start, 1868 .stop = mtk_wed_stop, 1869 .reset_dma = mtk_wed_reset_dma, 1870 .reg_read = wed_r32, 1871 .reg_write = wed_w32, 1872 .irq_get = mtk_wed_irq_get, 1873 .irq_set_mask = mtk_wed_irq_set_mask, 1874 .detach = mtk_wed_detach, 1875 .ppe_check = mtk_wed_ppe_check, 1876 .setup_tc = mtk_wed_setup_tc, 1877 }; 1878 struct device_node *eth_np = eth->dev->of_node; 1879 struct platform_device *pdev; 1880 struct mtk_wed_hw *hw; 1881 struct regmap *regs; 1882 int irq; 1883 1884 if (!np) 1885 return; 1886 1887 pdev = of_find_device_by_node(np); 1888 if (!pdev) 1889 goto err_of_node_put; 1890 1891 get_device(&pdev->dev); 1892 irq = platform_get_irq(pdev, 0); 1893 if (irq < 0) 1894 goto err_put_device; 1895 1896 regs = syscon_regmap_lookup_by_phandle(np, NULL); 1897 if (IS_ERR(regs)) 1898 goto err_put_device; 1899 1900 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); 1901 1902 mutex_lock(&hw_lock); 1903 1904 if (WARN_ON(hw_list[index])) 1905 goto unlock; 1906 1907 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 1908 if (!hw) 1909 goto unlock; 1910 1911 hw->node = np; 1912 hw->regs = regs; 1913 hw->eth = eth; 1914 hw->dev = &pdev->dev; 1915 hw->wdma_phy = wdma_phy; 1916 hw->wdma = wdma; 1917 hw->index = index; 1918 hw->irq = irq; 1919 hw->version = mtk_is_netsys_v1(eth) ? 1 : 2; 1920 1921 if (hw->version == 1) { 1922 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, 1923 "mediatek,pcie-mirror"); 1924 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, 1925 "mediatek,hifsys"); 1926 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { 1927 kfree(hw); 1928 goto unlock; 1929 } 1930 1931 if (!index) { 1932 regmap_write(hw->mirror, 0, 0); 1933 regmap_write(hw->mirror, 4, 0); 1934 } 1935 } 1936 1937 mtk_wed_hw_add_debugfs(hw); 1938 1939 hw_list[index] = hw; 1940 1941 mutex_unlock(&hw_lock); 1942 1943 return; 1944 1945 unlock: 1946 mutex_unlock(&hw_lock); 1947 err_put_device: 1948 put_device(&pdev->dev); 1949 err_of_node_put: 1950 of_node_put(np); 1951 } 1952 1953 void mtk_wed_exit(void) 1954 { 1955 int i; 1956 1957 rcu_assign_pointer(mtk_soc_wed_ops, NULL); 1958 1959 synchronize_rcu(); 1960 1961 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 1962 struct mtk_wed_hw *hw; 1963 1964 hw = hw_list[i]; 1965 if (!hw) 1966 continue; 1967 1968 hw_list[i] = NULL; 1969 debugfs_remove(hw->debugfs_dir); 1970 put_device(hw->dev); 1971 of_node_put(hw->node); 1972 kfree(hw); 1973 } 1974 } 1975