1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ 3 4 #include <linux/kernel.h> 5 #include <linux/slab.h> 6 #include <linux/module.h> 7 #include <linux/bitfield.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/skbuff.h> 10 #include <linux/of_platform.h> 11 #include <linux/of_address.h> 12 #include <linux/of_reserved_mem.h> 13 #include <linux/mfd/syscon.h> 14 #include <linux/debugfs.h> 15 #include <linux/soc/mediatek/mtk_wed.h> 16 #include "mtk_eth_soc.h" 17 #include "mtk_wed_regs.h" 18 #include "mtk_wed.h" 19 #include "mtk_ppe.h" 20 #include "mtk_wed_wo.h" 21 22 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) 23 24 #define MTK_WED_PKT_SIZE 1900 25 #define MTK_WED_BUF_SIZE 2048 26 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) 27 #define MTK_WED_RX_RING_SIZE 1536 28 29 #define MTK_WED_TX_RING_SIZE 2048 30 #define MTK_WED_WDMA_RING_SIZE 1024 31 #define MTK_WED_MAX_GROUP_SIZE 0x100 32 #define MTK_WED_VLD_GROUP_SIZE 0x40 33 #define MTK_WED_PER_GROUP_PKT 128 34 35 #define MTK_WED_FBUF_SIZE 128 36 #define MTK_WED_MIOD_CNT 16 37 #define MTK_WED_FB_CMD_CNT 1024 38 #define MTK_WED_RRO_QUE_CNT 8192 39 #define MTK_WED_MIOD_ENTRY_CNT 128 40 41 static struct mtk_wed_hw *hw_list[2]; 42 static DEFINE_MUTEX(hw_lock); 43 44 static void 45 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 46 { 47 regmap_update_bits(dev->hw->regs, reg, mask | val, val); 48 } 49 50 static void 51 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 52 { 53 return wed_m32(dev, reg, 0, mask); 54 } 55 56 static void 57 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 58 { 59 return wed_m32(dev, reg, mask, 0); 60 } 61 62 static void 63 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 64 { 65 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); 66 } 67 68 static void 69 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 70 { 71 wdma_m32(dev, reg, 0, mask); 72 } 73 74 static void 75 wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 76 { 77 wdma_m32(dev, reg, mask, 0); 78 } 79 80 static u32 81 wifi_r32(struct mtk_wed_device *dev, u32 reg) 82 { 83 return readl(dev->wlan.base + reg); 84 } 85 86 static void 87 wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) 88 { 89 writel(val, dev->wlan.base + reg); 90 } 91 92 static u32 93 mtk_wed_read_reset(struct mtk_wed_device *dev) 94 { 95 return wed_r32(dev, MTK_WED_RESET); 96 } 97 98 static u32 99 mtk_wdma_read_reset(struct mtk_wed_device *dev) 100 { 101 return wdma_r32(dev, MTK_WDMA_GLO_CFG); 102 } 103 104 static int 105 mtk_wdma_rx_reset(struct mtk_wed_device *dev) 106 { 107 u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; 108 int i, ret; 109 110 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); 111 ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, 112 !(status & mask), 0, 10000); 113 if (ret) 114 dev_err(dev->hw->dev, "rx reset failed\n"); 115 116 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 117 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 118 119 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { 120 if (dev->rx_wdma[i].desc) 121 continue; 122 123 wdma_w32(dev, 124 MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 125 } 126 127 return ret; 128 } 129 130 static void 131 mtk_wdma_tx_reset(struct mtk_wed_device *dev) 132 { 133 u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; 134 int i; 135 136 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 137 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, 138 !(status & mask), 0, 10000)) 139 dev_err(dev->hw->dev, "tx reset failed\n"); 140 141 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); 142 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 143 144 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) 145 wdma_w32(dev, 146 MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 147 } 148 149 static void 150 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) 151 { 152 u32 status; 153 154 wed_w32(dev, MTK_WED_RESET, mask); 155 if (readx_poll_timeout(mtk_wed_read_reset, dev, status, 156 !(status & mask), 0, 1000)) 157 WARN_ON_ONCE(1); 158 } 159 160 static u32 161 mtk_wed_wo_read_status(struct mtk_wed_device *dev) 162 { 163 return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); 164 } 165 166 static void 167 mtk_wed_wo_reset(struct mtk_wed_device *dev) 168 { 169 struct mtk_wed_wo *wo = dev->hw->wed_wo; 170 u8 state = MTK_WED_WO_STATE_DISABLE; 171 void __iomem *reg; 172 u32 val; 173 174 mtk_wdma_tx_reset(dev); 175 mtk_wed_reset(dev, MTK_WED_RESET_WED); 176 177 if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 178 MTK_WED_WO_CMD_CHANGE_STATE, &state, 179 sizeof(state), false)) 180 return; 181 182 if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, 183 val == MTK_WED_WOIF_DISABLE_DONE, 184 100, MTK_WOCPU_TIMEOUT)) 185 dev_err(dev->hw->dev, "failed to disable wed-wo\n"); 186 187 reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); 188 189 val = readl(reg); 190 switch (dev->hw->index) { 191 case 0: 192 val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 193 writel(val, reg); 194 val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 195 writel(val, reg); 196 break; 197 case 1: 198 val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 199 writel(val, reg); 200 val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 201 writel(val, reg); 202 break; 203 default: 204 break; 205 } 206 iounmap(reg); 207 } 208 209 void mtk_wed_fe_reset(void) 210 { 211 int i; 212 213 mutex_lock(&hw_lock); 214 215 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 216 struct mtk_wed_hw *hw = hw_list[i]; 217 struct mtk_wed_device *dev = hw->wed_dev; 218 int err; 219 220 if (!dev || !dev->wlan.reset) 221 continue; 222 223 /* reset callback blocks until WLAN reset is completed */ 224 err = dev->wlan.reset(dev); 225 if (err) 226 dev_err(dev->dev, "wlan reset failed: %d\n", err); 227 } 228 229 mutex_unlock(&hw_lock); 230 } 231 232 void mtk_wed_fe_reset_complete(void) 233 { 234 int i; 235 236 mutex_lock(&hw_lock); 237 238 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 239 struct mtk_wed_hw *hw = hw_list[i]; 240 struct mtk_wed_device *dev = hw->wed_dev; 241 242 if (!dev || !dev->wlan.reset_complete) 243 continue; 244 245 dev->wlan.reset_complete(dev); 246 } 247 248 mutex_unlock(&hw_lock); 249 } 250 251 static struct mtk_wed_hw * 252 mtk_wed_assign(struct mtk_wed_device *dev) 253 { 254 struct mtk_wed_hw *hw; 255 int i; 256 257 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 258 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; 259 if (!hw) 260 return NULL; 261 262 if (!hw->wed_dev) 263 goto out; 264 265 if (hw->version == 1) 266 return NULL; 267 268 /* MT7986 WED devices do not have any pcie slot restrictions */ 269 } 270 /* MT7986 PCIE or AXI */ 271 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 272 hw = hw_list[i]; 273 if (hw && !hw->wed_dev) 274 goto out; 275 } 276 277 return NULL; 278 279 out: 280 hw->wed_dev = dev; 281 return hw; 282 } 283 284 static int 285 mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) 286 { 287 struct mtk_wdma_desc *desc; 288 dma_addr_t desc_phys; 289 void **page_list; 290 int token = dev->wlan.token_start; 291 int ring_size; 292 int n_pages; 293 int i, page_idx; 294 295 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 296 n_pages = ring_size / MTK_WED_BUF_PER_PAGE; 297 298 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 299 if (!page_list) 300 return -ENOMEM; 301 302 dev->tx_buf_ring.size = ring_size; 303 dev->tx_buf_ring.pages = page_list; 304 305 desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), 306 &desc_phys, GFP_KERNEL); 307 if (!desc) 308 return -ENOMEM; 309 310 dev->tx_buf_ring.desc = desc; 311 dev->tx_buf_ring.desc_phys = desc_phys; 312 313 for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { 314 dma_addr_t page_phys, buf_phys; 315 struct page *page; 316 void *buf; 317 int s; 318 319 page = __dev_alloc_pages(GFP_KERNEL, 0); 320 if (!page) 321 return -ENOMEM; 322 323 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 324 DMA_BIDIRECTIONAL); 325 if (dma_mapping_error(dev->hw->dev, page_phys)) { 326 __free_page(page); 327 return -ENOMEM; 328 } 329 330 page_list[page_idx++] = page; 331 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 332 DMA_BIDIRECTIONAL); 333 334 buf = page_to_virt(page); 335 buf_phys = page_phys; 336 337 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { 338 u32 txd_size; 339 u32 ctrl; 340 341 txd_size = dev->wlan.init_buf(buf, buf_phys, token++); 342 343 desc->buf0 = cpu_to_le32(buf_phys); 344 desc->buf1 = cpu_to_le32(buf_phys + txd_size); 345 346 if (dev->hw->version == 1) 347 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 348 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, 349 MTK_WED_BUF_SIZE - txd_size) | 350 MTK_WDMA_DESC_CTRL_LAST_SEG1; 351 else 352 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 353 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, 354 MTK_WED_BUF_SIZE - txd_size) | 355 MTK_WDMA_DESC_CTRL_LAST_SEG0; 356 desc->ctrl = cpu_to_le32(ctrl); 357 desc->info = 0; 358 desc++; 359 360 buf += MTK_WED_BUF_SIZE; 361 buf_phys += MTK_WED_BUF_SIZE; 362 } 363 364 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 365 DMA_BIDIRECTIONAL); 366 } 367 368 return 0; 369 } 370 371 static void 372 mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) 373 { 374 struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; 375 void **page_list = dev->tx_buf_ring.pages; 376 int page_idx; 377 int i; 378 379 if (!page_list) 380 return; 381 382 if (!desc) 383 goto free_pagelist; 384 385 for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; 386 i += MTK_WED_BUF_PER_PAGE) { 387 void *page = page_list[page_idx++]; 388 dma_addr_t buf_addr; 389 390 if (!page) 391 break; 392 393 buf_addr = le32_to_cpu(desc[i].buf0); 394 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, 395 DMA_BIDIRECTIONAL); 396 __free_page(page); 397 } 398 399 dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), 400 desc, dev->tx_buf_ring.desc_phys); 401 402 free_pagelist: 403 kfree(page_list); 404 } 405 406 static int 407 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) 408 { 409 struct mtk_rxbm_desc *desc; 410 dma_addr_t desc_phys; 411 412 dev->rx_buf_ring.size = dev->wlan.rx_nbuf; 413 desc = dma_alloc_coherent(dev->hw->dev, 414 dev->wlan.rx_nbuf * sizeof(*desc), 415 &desc_phys, GFP_KERNEL); 416 if (!desc) 417 return -ENOMEM; 418 419 dev->rx_buf_ring.desc = desc; 420 dev->rx_buf_ring.desc_phys = desc_phys; 421 dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); 422 423 return 0; 424 } 425 426 static void 427 mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) 428 { 429 struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; 430 431 if (!desc) 432 return; 433 434 dev->wlan.release_rx_buf(dev); 435 dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), 436 desc, dev->rx_buf_ring.desc_phys); 437 } 438 439 static void 440 mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) 441 { 442 wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, 443 FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); 444 wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); 445 wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | 446 FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); 447 wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, 448 FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); 449 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 450 } 451 452 static void 453 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) 454 { 455 if (!ring->desc) 456 return; 457 458 dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, 459 ring->desc, ring->desc_phys); 460 } 461 462 static void 463 mtk_wed_free_rx_rings(struct mtk_wed_device *dev) 464 { 465 mtk_wed_free_rx_buffer(dev); 466 mtk_wed_free_ring(dev, &dev->rro.ring); 467 } 468 469 static void 470 mtk_wed_free_tx_rings(struct mtk_wed_device *dev) 471 { 472 int i; 473 474 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) 475 mtk_wed_free_ring(dev, &dev->tx_ring[i]); 476 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 477 mtk_wed_free_ring(dev, &dev->rx_wdma[i]); 478 } 479 480 static void 481 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) 482 { 483 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 484 485 if (dev->hw->version == 1) 486 mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 487 else 488 mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 489 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 490 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 491 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 492 493 if (!dev->hw->num_flows) 494 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 495 496 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); 497 wed_r32(dev, MTK_WED_EXT_INT_MASK); 498 } 499 500 static void 501 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) 502 { 503 if (enable) { 504 wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 505 wed_w32(dev, MTK_WED_TXP_DW1, 506 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); 507 } else { 508 wed_w32(dev, MTK_WED_TXP_DW1, 509 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); 510 wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 511 } 512 } 513 514 #define MTK_WFMDA_RX_DMA_EN BIT(2) 515 static void 516 mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) 517 { 518 u32 val; 519 int i; 520 521 if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) 522 return; /* queue is not configured by mt76 */ 523 524 for (i = 0; i < 3; i++) { 525 u32 cur_idx; 526 527 cur_idx = wed_r32(dev, 528 MTK_WED_WPDMA_RING_RX_DATA(idx) + 529 MTK_WED_RING_OFS_CPU_IDX); 530 if (cur_idx == MTK_WED_RX_RING_SIZE - 1) 531 break; 532 533 usleep_range(100000, 200000); 534 } 535 536 if (i == 3) { 537 dev_err(dev->hw->dev, "rx dma enable failed\n"); 538 return; 539 } 540 541 val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | 542 MTK_WFMDA_RX_DMA_EN; 543 wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); 544 } 545 546 static void 547 mtk_wed_dma_disable(struct mtk_wed_device *dev) 548 { 549 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 550 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 551 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 552 553 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 554 555 wed_clr(dev, MTK_WED_GLO_CFG, 556 MTK_WED_GLO_CFG_TX_DMA_EN | 557 MTK_WED_GLO_CFG_RX_DMA_EN); 558 559 wdma_clr(dev, MTK_WDMA_GLO_CFG, 560 MTK_WDMA_GLO_CFG_TX_DMA_EN | 561 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 562 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 563 564 if (dev->hw->version == 1) { 565 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); 566 wdma_clr(dev, MTK_WDMA_GLO_CFG, 567 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 568 } else { 569 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 570 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 571 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 572 573 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 574 MTK_WED_WPDMA_RX_D_RX_DRV_EN); 575 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 576 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 577 } 578 579 mtk_wed_set_512_support(dev, false); 580 } 581 582 static void 583 mtk_wed_stop(struct mtk_wed_device *dev) 584 { 585 mtk_wed_set_ext_int(dev, false); 586 587 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); 588 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); 589 wdma_w32(dev, MTK_WDMA_INT_MASK, 0); 590 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); 591 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); 592 593 if (dev->hw->version == 1) 594 return; 595 596 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); 597 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); 598 } 599 600 static void 601 mtk_wed_deinit(struct mtk_wed_device *dev) 602 { 603 mtk_wed_stop(dev); 604 mtk_wed_dma_disable(dev); 605 606 wed_clr(dev, MTK_WED_CTRL, 607 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 608 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 609 MTK_WED_CTRL_WED_TX_BM_EN | 610 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 611 612 if (dev->hw->version == 1) 613 return; 614 615 wed_clr(dev, MTK_WED_CTRL, 616 MTK_WED_CTRL_RX_ROUTE_QM_EN | 617 MTK_WED_CTRL_WED_RX_BM_EN | 618 MTK_WED_CTRL_RX_RRO_QM_EN); 619 } 620 621 static void 622 __mtk_wed_detach(struct mtk_wed_device *dev) 623 { 624 struct mtk_wed_hw *hw = dev->hw; 625 626 mtk_wed_deinit(dev); 627 628 mtk_wdma_rx_reset(dev); 629 mtk_wed_reset(dev, MTK_WED_RESET_WED); 630 mtk_wed_free_tx_buffer(dev); 631 mtk_wed_free_tx_rings(dev); 632 633 if (mtk_wed_get_rx_capa(dev)) { 634 if (hw->wed_wo) 635 mtk_wed_wo_reset(dev); 636 mtk_wed_free_rx_rings(dev); 637 if (hw->wed_wo) 638 mtk_wed_wo_deinit(hw); 639 } 640 641 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 642 struct device_node *wlan_node; 643 644 wlan_node = dev->wlan.pci_dev->dev.of_node; 645 if (of_dma_is_coherent(wlan_node) && hw->hifsys) 646 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 647 BIT(hw->index), BIT(hw->index)); 648 } 649 650 if (!hw_list[!hw->index]->wed_dev && 651 hw->eth->dma_dev != hw->eth->dev) 652 mtk_eth_set_dma_device(hw->eth, hw->eth->dev); 653 654 memset(dev, 0, sizeof(*dev)); 655 module_put(THIS_MODULE); 656 657 hw->wed_dev = NULL; 658 } 659 660 static void 661 mtk_wed_detach(struct mtk_wed_device *dev) 662 { 663 mutex_lock(&hw_lock); 664 __mtk_wed_detach(dev); 665 mutex_unlock(&hw_lock); 666 } 667 668 #define PCIE_BASE_ADDR0 0x11280000 669 static void 670 mtk_wed_bus_init(struct mtk_wed_device *dev) 671 { 672 switch (dev->wlan.bus_type) { 673 case MTK_WED_BUS_PCIE: { 674 struct device_node *np = dev->hw->eth->dev->of_node; 675 struct regmap *regs; 676 677 regs = syscon_regmap_lookup_by_phandle(np, 678 "mediatek,wed-pcie"); 679 if (IS_ERR(regs)) 680 break; 681 682 regmap_update_bits(regs, 0, BIT(0), BIT(0)); 683 684 wed_w32(dev, MTK_WED_PCIE_INT_CTRL, 685 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); 686 687 /* pcie interrupt control: pola/source selection */ 688 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 689 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 690 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); 691 wed_r32(dev, MTK_WED_PCIE_INT_CTRL); 692 693 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); 694 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); 695 696 /* pcie interrupt status trigger register */ 697 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); 698 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); 699 700 /* pola setting */ 701 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 702 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); 703 break; 704 } 705 case MTK_WED_BUS_AXI: 706 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 707 MTK_WED_WPDMA_INT_CTRL_SIG_SRC | 708 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); 709 break; 710 default: 711 break; 712 } 713 } 714 715 static void 716 mtk_wed_set_wpdma(struct mtk_wed_device *dev) 717 { 718 if (dev->hw->version == 1) { 719 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); 720 } else { 721 mtk_wed_bus_init(dev); 722 723 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); 724 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); 725 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); 726 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); 727 wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); 728 wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); 729 } 730 } 731 732 static void 733 mtk_wed_hw_init_early(struct mtk_wed_device *dev) 734 { 735 u32 mask, set; 736 737 mtk_wed_deinit(dev); 738 mtk_wed_reset(dev, MTK_WED_RESET_WED); 739 mtk_wed_set_wpdma(dev); 740 741 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | 742 MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | 743 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; 744 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | 745 MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | 746 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; 747 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); 748 749 if (dev->hw->version == 1) { 750 u32 offset = dev->hw->index ? 0x04000400 : 0; 751 752 wdma_set(dev, MTK_WDMA_GLO_CFG, 753 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 754 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | 755 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 756 757 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); 758 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); 759 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 760 MTK_PCIE_BASE(dev->hw->index)); 761 } else { 762 wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); 763 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); 764 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 765 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, 766 MTK_WDMA_INT_STATUS) | 767 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, 768 MTK_WDMA_GLO_CFG)); 769 770 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 771 FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, 772 MTK_WDMA_RING_TX(0)) | 773 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, 774 MTK_WDMA_RING_RX(0))); 775 } 776 } 777 778 static int 779 mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 780 int size) 781 { 782 ring->desc = dma_alloc_coherent(dev->hw->dev, 783 size * sizeof(*ring->desc), 784 &ring->desc_phys, GFP_KERNEL); 785 if (!ring->desc) 786 return -ENOMEM; 787 788 ring->desc_size = sizeof(*ring->desc); 789 ring->size = size; 790 memset(ring->desc, 0, size); 791 792 return 0; 793 } 794 795 #define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) 796 static int 797 mtk_wed_rro_alloc(struct mtk_wed_device *dev) 798 { 799 struct reserved_mem *rmem; 800 struct device_node *np; 801 int index; 802 803 index = of_property_match_string(dev->hw->node, "memory-region-names", 804 "wo-dlm"); 805 if (index < 0) 806 return index; 807 808 np = of_parse_phandle(dev->hw->node, "memory-region", index); 809 if (!np) 810 return -ENODEV; 811 812 rmem = of_reserved_mem_lookup(np); 813 of_node_put(np); 814 815 if (!rmem) 816 return -ENODEV; 817 818 dev->rro.miod_phys = rmem->base; 819 dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; 820 821 return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, 822 MTK_WED_RRO_QUE_CNT); 823 } 824 825 static int 826 mtk_wed_rro_cfg(struct mtk_wed_device *dev) 827 { 828 struct mtk_wed_wo *wo = dev->hw->wed_wo; 829 struct { 830 struct { 831 __le32 base; 832 __le32 cnt; 833 __le32 unit; 834 } ring[2]; 835 __le32 wed; 836 u8 version; 837 } req = { 838 .ring[0] = { 839 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), 840 .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), 841 .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), 842 }, 843 .ring[1] = { 844 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + 845 MTK_WED_MIOD_COUNT), 846 .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), 847 .unit = cpu_to_le32(4), 848 }, 849 }; 850 851 return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 852 MTK_WED_WO_CMD_WED_CFG, 853 &req, sizeof(req), true); 854 } 855 856 static void 857 mtk_wed_rro_hw_init(struct mtk_wed_device *dev) 858 { 859 wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, 860 FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | 861 FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | 862 FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, 863 MTK_WED_MIOD_ENTRY_CNT >> 2)); 864 865 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); 866 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, 867 FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); 868 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); 869 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, 870 FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); 871 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); 872 wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); 873 874 wed_set(dev, MTK_WED_RROQM_RST_IDX, 875 MTK_WED_RROQM_RST_IDX_MIOD | 876 MTK_WED_RROQM_RST_IDX_FDBK); 877 878 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 879 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); 880 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 881 } 882 883 static void 884 mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) 885 { 886 wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); 887 888 for (;;) { 889 usleep_range(100, 200); 890 if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) 891 break; 892 } 893 894 /* configure RX_ROUTE_QM */ 895 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 896 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); 897 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 898 FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); 899 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 900 /* enable RX_ROUTE_QM */ 901 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 902 } 903 904 static void 905 mtk_wed_hw_init(struct mtk_wed_device *dev) 906 { 907 if (dev->init_done) 908 return; 909 910 dev->init_done = true; 911 mtk_wed_set_ext_int(dev, false); 912 wed_w32(dev, MTK_WED_TX_BM_CTRL, 913 MTK_WED_TX_BM_CTRL_PAUSE | 914 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 915 dev->tx_buf_ring.size / 128) | 916 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 917 MTK_WED_TX_RING_SIZE / 256)); 918 919 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); 920 921 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); 922 923 if (dev->hw->version == 1) { 924 wed_w32(dev, MTK_WED_TX_BM_TKID, 925 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 926 dev->wlan.token_start) | 927 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 928 dev->wlan.token_start + 929 dev->wlan.nbuf - 1)); 930 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 931 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | 932 MTK_WED_TX_BM_DYN_THR_HI); 933 } else { 934 wed_w32(dev, MTK_WED_TX_BM_TKID_V2, 935 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 936 dev->wlan.token_start) | 937 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 938 dev->wlan.token_start + 939 dev->wlan.nbuf - 1)); 940 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 941 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | 942 MTK_WED_TX_BM_DYN_THR_HI_V2); 943 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 944 MTK_WED_TX_TKID_CTRL_PAUSE | 945 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, 946 dev->tx_buf_ring.size / 128) | 947 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, 948 dev->tx_buf_ring.size / 128)); 949 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, 950 FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | 951 MTK_WED_TX_TKID_DYN_THR_HI); 952 } 953 954 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 955 956 if (dev->hw->version == 1) { 957 wed_set(dev, MTK_WED_CTRL, 958 MTK_WED_CTRL_WED_TX_BM_EN | 959 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 960 } else { 961 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); 962 /* rx hw init */ 963 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 964 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 965 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 966 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 967 968 mtk_wed_rx_buffer_hw_init(dev); 969 mtk_wed_rro_hw_init(dev); 970 mtk_wed_route_qm_hw_init(dev); 971 } 972 973 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); 974 } 975 976 static void 977 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) 978 { 979 void *head = (void *)ring->desc; 980 int i; 981 982 for (i = 0; i < size; i++) { 983 struct mtk_wdma_desc *desc; 984 985 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); 986 desc->buf0 = 0; 987 if (tx) 988 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 989 else 990 desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); 991 desc->buf1 = 0; 992 desc->info = 0; 993 } 994 } 995 996 static u32 997 mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 998 { 999 return !!(wed_r32(dev, reg) & mask); 1000 } 1001 1002 static int 1003 mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 1004 { 1005 int sleep = 15000; 1006 int timeout = 100 * sleep; 1007 u32 val; 1008 1009 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, 1010 timeout, false, dev, reg, mask); 1011 } 1012 1013 static int 1014 mtk_wed_rx_reset(struct mtk_wed_device *dev) 1015 { 1016 struct mtk_wed_wo *wo = dev->hw->wed_wo; 1017 u8 val = MTK_WED_WO_STATE_SER_RESET; 1018 int i, ret; 1019 1020 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1021 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1022 sizeof(val), true); 1023 if (ret) 1024 return ret; 1025 1026 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); 1027 ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1028 MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); 1029 if (ret) { 1030 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1031 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); 1032 } else { 1033 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1034 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 1035 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 1036 1037 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1038 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1039 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1040 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1041 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1042 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1043 1044 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 1045 } 1046 1047 /* reset rro qm */ 1048 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 1049 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1050 MTK_WED_CTRL_RX_RRO_QM_BUSY); 1051 if (ret) { 1052 mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); 1053 } else { 1054 wed_set(dev, MTK_WED_RROQM_RST_IDX, 1055 MTK_WED_RROQM_RST_IDX_MIOD | 1056 MTK_WED_RROQM_RST_IDX_FDBK); 1057 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 1058 } 1059 1060 /* reset route qm */ 1061 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 1062 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1063 MTK_WED_CTRL_RX_ROUTE_QM_BUSY); 1064 if (ret) 1065 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); 1066 else 1067 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 1068 MTK_WED_RTQM_Q_RST); 1069 1070 /* reset tx wdma */ 1071 mtk_wdma_tx_reset(dev); 1072 1073 /* reset tx wdma drv */ 1074 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); 1075 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1076 MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); 1077 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); 1078 1079 /* reset wed rx dma */ 1080 ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1081 MTK_WED_GLO_CFG_RX_DMA_BUSY); 1082 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); 1083 if (ret) { 1084 mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); 1085 } else { 1086 struct mtk_eth *eth = dev->hw->eth; 1087 1088 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1089 wed_set(dev, MTK_WED_RESET_IDX, 1090 MTK_WED_RESET_IDX_RX_V2); 1091 else 1092 wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX); 1093 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1094 } 1095 1096 /* reset rx bm */ 1097 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 1098 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1099 MTK_WED_CTRL_WED_RX_BM_BUSY); 1100 mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); 1101 1102 /* wo change to enable state */ 1103 val = MTK_WED_WO_STATE_ENABLE; 1104 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1105 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1106 sizeof(val), true); 1107 if (ret) 1108 return ret; 1109 1110 /* wed_rx_ring_reset */ 1111 for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { 1112 if (!dev->rx_ring[i].desc) 1113 continue; 1114 1115 mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE, 1116 false); 1117 } 1118 mtk_wed_free_rx_buffer(dev); 1119 1120 return 0; 1121 } 1122 1123 static void 1124 mtk_wed_reset_dma(struct mtk_wed_device *dev) 1125 { 1126 bool busy = false; 1127 u32 val; 1128 int i; 1129 1130 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { 1131 if (!dev->tx_ring[i].desc) 1132 continue; 1133 1134 mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, 1135 true); 1136 } 1137 1138 /* 1. reset WED tx DMA */ 1139 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); 1140 busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1141 MTK_WED_GLO_CFG_TX_DMA_BUSY); 1142 if (busy) { 1143 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); 1144 } else { 1145 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX); 1146 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1147 } 1148 1149 /* 2. reset WDMA rx DMA */ 1150 busy = !!mtk_wdma_rx_reset(dev); 1151 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1152 if (!busy) 1153 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, 1154 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); 1155 1156 if (busy) { 1157 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); 1158 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); 1159 } else { 1160 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1161 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); 1162 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); 1163 1164 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1165 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1166 1167 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1168 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1169 } 1170 1171 /* 3. reset WED WPDMA tx */ 1172 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1173 1174 for (i = 0; i < 100; i++) { 1175 val = wed_r32(dev, MTK_WED_TX_BM_INTF); 1176 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) 1177 break; 1178 } 1179 1180 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); 1181 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); 1182 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1183 1184 /* 4. reset WED WPDMA tx */ 1185 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1186 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); 1187 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1188 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1189 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1190 if (!busy) 1191 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1192 MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); 1193 1194 if (busy) { 1195 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1196 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); 1197 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); 1198 } else { 1199 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 1200 MTK_WED_WPDMA_RESET_IDX_TX | 1201 MTK_WED_WPDMA_RESET_IDX_RX); 1202 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); 1203 } 1204 1205 dev->init_done = false; 1206 if (dev->hw->version == 1) 1207 return; 1208 1209 if (!busy) { 1210 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); 1211 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1212 } 1213 1214 mtk_wed_rx_reset(dev); 1215 } 1216 1217 static int 1218 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1219 int size, u32 desc_size, bool tx) 1220 { 1221 ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, 1222 &ring->desc_phys, GFP_KERNEL); 1223 if (!ring->desc) 1224 return -ENOMEM; 1225 1226 ring->desc_size = desc_size; 1227 ring->size = size; 1228 mtk_wed_ring_reset(ring, size, tx); 1229 1230 return 0; 1231 } 1232 1233 static int 1234 mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1235 bool reset) 1236 { 1237 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 1238 struct mtk_wed_ring *wdma; 1239 1240 if (idx >= ARRAY_SIZE(dev->rx_wdma)) 1241 return -EINVAL; 1242 1243 wdma = &dev->rx_wdma[idx]; 1244 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1245 desc_size, true)) 1246 return -ENOMEM; 1247 1248 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1249 wdma->desc_phys); 1250 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1251 size); 1252 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1253 1254 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1255 wdma->desc_phys); 1256 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1257 size); 1258 1259 return 0; 1260 } 1261 1262 static int 1263 mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1264 bool reset) 1265 { 1266 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 1267 struct mtk_wed_ring *wdma; 1268 1269 if (idx >= ARRAY_SIZE(dev->tx_wdma)) 1270 return -EINVAL; 1271 1272 wdma = &dev->tx_wdma[idx]; 1273 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1274 desc_size, true)) 1275 return -ENOMEM; 1276 1277 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1278 wdma->desc_phys); 1279 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1280 size); 1281 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1282 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); 1283 1284 if (reset) 1285 mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true); 1286 1287 if (!idx) { 1288 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, 1289 wdma->desc_phys); 1290 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, 1291 size); 1292 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, 1293 0); 1294 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, 1295 0); 1296 } 1297 1298 return 0; 1299 } 1300 1301 static void 1302 mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, 1303 u32 reason, u32 hash) 1304 { 1305 struct mtk_eth *eth = dev->hw->eth; 1306 struct ethhdr *eh; 1307 1308 if (!skb) 1309 return; 1310 1311 if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 1312 return; 1313 1314 skb_set_mac_header(skb, 0); 1315 eh = eth_hdr(skb); 1316 skb->protocol = eh->h_proto; 1317 mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); 1318 } 1319 1320 static void 1321 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) 1322 { 1323 u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); 1324 1325 /* wed control cr set */ 1326 wed_set(dev, MTK_WED_CTRL, 1327 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1328 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1329 MTK_WED_CTRL_WED_TX_BM_EN | 1330 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1331 1332 if (dev->hw->version == 1) { 1333 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, 1334 MTK_WED_PCIE_INT_TRIGGER_STATUS); 1335 1336 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 1337 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | 1338 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); 1339 1340 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); 1341 } else { 1342 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, 1343 GENMASK(1, 0)); 1344 /* initail tx interrupt trigger */ 1345 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, 1346 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | 1347 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | 1348 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | 1349 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | 1350 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, 1351 dev->wlan.tx_tbit[0]) | 1352 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, 1353 dev->wlan.tx_tbit[1])); 1354 1355 /* initail txfree interrupt trigger */ 1356 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, 1357 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | 1358 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | 1359 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, 1360 dev->wlan.txfree_tbit)); 1361 1362 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, 1363 MTK_WED_WPDMA_INT_CTRL_RX0_EN | 1364 MTK_WED_WPDMA_INT_CTRL_RX0_CLR | 1365 MTK_WED_WPDMA_INT_CTRL_RX1_EN | 1366 MTK_WED_WPDMA_INT_CTRL_RX1_CLR | 1367 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, 1368 dev->wlan.rx_tbit[0]) | 1369 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, 1370 dev->wlan.rx_tbit[1])); 1371 1372 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); 1373 wed_set(dev, MTK_WED_WDMA_INT_CTRL, 1374 FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, 1375 dev->wdma_idx)); 1376 } 1377 1378 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); 1379 1380 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); 1381 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); 1382 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 1383 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 1384 } 1385 1386 static void 1387 mtk_wed_dma_enable(struct mtk_wed_device *dev) 1388 { 1389 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); 1390 1391 wed_set(dev, MTK_WED_GLO_CFG, 1392 MTK_WED_GLO_CFG_TX_DMA_EN | 1393 MTK_WED_GLO_CFG_RX_DMA_EN); 1394 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 1395 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1396 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1397 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1398 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1399 1400 wdma_set(dev, MTK_WDMA_GLO_CFG, 1401 MTK_WDMA_GLO_CFG_TX_DMA_EN | 1402 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1403 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 1404 1405 if (dev->hw->version == 1) { 1406 wdma_set(dev, MTK_WDMA_GLO_CFG, 1407 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1408 } else { 1409 int i; 1410 1411 wed_set(dev, MTK_WED_WPDMA_CTRL, 1412 MTK_WED_WPDMA_CTRL_SDL1_FIXED); 1413 1414 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1415 MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | 1416 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 1417 1418 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 1419 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 1420 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 1421 1422 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1423 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | 1424 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); 1425 1426 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1427 MTK_WED_WPDMA_RX_D_RX_DRV_EN | 1428 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | 1429 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 1430 0x2)); 1431 1432 for (i = 0; i < MTK_WED_RX_QUEUES; i++) 1433 mtk_wed_check_wfdma_rx_fill(dev, i); 1434 } 1435 } 1436 1437 static void 1438 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) 1439 { 1440 int i; 1441 1442 if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) 1443 return; 1444 1445 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 1446 if (!dev->rx_wdma[i].desc) 1447 mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); 1448 1449 mtk_wed_hw_init(dev); 1450 mtk_wed_configure_irq(dev, irq_mask); 1451 1452 mtk_wed_set_ext_int(dev, true); 1453 1454 if (dev->hw->version == 1) { 1455 u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | 1456 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, 1457 dev->hw->index); 1458 1459 val |= BIT(0) | (BIT(1) * !!dev->hw->index); 1460 regmap_write(dev->hw->mirror, dev->hw->index * 4, val); 1461 } else { 1462 /* driver set mid ready and only once */ 1463 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 1464 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 1465 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 1466 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 1467 1468 wed_r32(dev, MTK_WED_EXT_INT_MASK1); 1469 wed_r32(dev, MTK_WED_EXT_INT_MASK2); 1470 1471 if (mtk_wed_rro_cfg(dev)) 1472 return; 1473 1474 } 1475 1476 mtk_wed_set_512_support(dev, dev->wlan.wcid_512); 1477 1478 mtk_wed_dma_enable(dev); 1479 dev->running = true; 1480 } 1481 1482 static int 1483 mtk_wed_attach(struct mtk_wed_device *dev) 1484 __releases(RCU) 1485 { 1486 struct mtk_wed_hw *hw; 1487 struct device *device; 1488 int ret = 0; 1489 1490 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 1491 "mtk_wed_attach without holding the RCU read lock"); 1492 1493 if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && 1494 pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || 1495 !try_module_get(THIS_MODULE)) 1496 ret = -ENODEV; 1497 1498 rcu_read_unlock(); 1499 1500 if (ret) 1501 return ret; 1502 1503 mutex_lock(&hw_lock); 1504 1505 hw = mtk_wed_assign(dev); 1506 if (!hw) { 1507 module_put(THIS_MODULE); 1508 ret = -ENODEV; 1509 goto unlock; 1510 } 1511 1512 device = dev->wlan.bus_type == MTK_WED_BUS_PCIE 1513 ? &dev->wlan.pci_dev->dev 1514 : &dev->wlan.platform_dev->dev; 1515 dev_info(device, "attaching wed device %d version %d\n", 1516 hw->index, hw->version); 1517 1518 dev->hw = hw; 1519 dev->dev = hw->dev; 1520 dev->irq = hw->irq; 1521 dev->wdma_idx = hw->index; 1522 dev->version = hw->version; 1523 1524 if (hw->eth->dma_dev == hw->eth->dev && 1525 of_dma_is_coherent(hw->eth->dev->of_node)) 1526 mtk_eth_set_dma_device(hw->eth, hw->dev); 1527 1528 ret = mtk_wed_tx_buffer_alloc(dev); 1529 if (ret) 1530 goto out; 1531 1532 if (mtk_wed_get_rx_capa(dev)) { 1533 ret = mtk_wed_rro_alloc(dev); 1534 if (ret) 1535 goto out; 1536 } 1537 1538 mtk_wed_hw_init_early(dev); 1539 if (hw->version == 1) { 1540 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 1541 BIT(hw->index), 0); 1542 } else { 1543 dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); 1544 ret = mtk_wed_wo_init(hw); 1545 } 1546 out: 1547 if (ret) { 1548 dev_err(dev->hw->dev, "failed to attach wed device\n"); 1549 __mtk_wed_detach(dev); 1550 } 1551 unlock: 1552 mutex_unlock(&hw_lock); 1553 1554 return ret; 1555 } 1556 1557 static int 1558 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 1559 bool reset) 1560 { 1561 struct mtk_wed_ring *ring = &dev->tx_ring[idx]; 1562 1563 /* 1564 * Tx ring redirection: 1565 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN 1566 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) 1567 * registers. 1568 * 1569 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it 1570 * into MTK_WED_WPDMA_RING_TX(n) registers. 1571 * It gets filled with packets picked up from WED TX ring and from 1572 * WDMA RX. 1573 */ 1574 1575 if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) 1576 return -EINVAL; 1577 1578 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1579 sizeof(*ring->desc), true)) 1580 return -ENOMEM; 1581 1582 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 1583 reset)) 1584 return -ENOMEM; 1585 1586 ring->reg_base = MTK_WED_RING_TX(idx); 1587 ring->wpdma = regs; 1588 1589 /* WED -> WPDMA */ 1590 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 1591 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); 1592 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); 1593 1594 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1595 ring->desc_phys); 1596 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1597 MTK_WED_TX_RING_SIZE); 1598 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1599 1600 return 0; 1601 } 1602 1603 static int 1604 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 1605 { 1606 struct mtk_wed_ring *ring = &dev->txfree_ring; 1607 int i, index = dev->hw->version == 1; 1608 1609 /* 1610 * For txfree event handling, the same DMA ring is shared between WED 1611 * and WLAN. The WLAN driver accesses the ring index registers through 1612 * WED 1613 */ 1614 ring->reg_base = MTK_WED_RING_RX(index); 1615 ring->wpdma = regs; 1616 1617 for (i = 0; i < 12; i += 4) { 1618 u32 val = readl(regs + i); 1619 1620 wed_w32(dev, MTK_WED_RING_RX(index) + i, val); 1621 wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); 1622 } 1623 1624 return 0; 1625 } 1626 1627 static int 1628 mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 1629 bool reset) 1630 { 1631 struct mtk_wed_ring *ring = &dev->rx_ring[idx]; 1632 1633 if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) 1634 return -EINVAL; 1635 1636 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1637 sizeof(*ring->desc), false)) 1638 return -ENOMEM; 1639 1640 if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 1641 reset)) 1642 return -ENOMEM; 1643 1644 ring->reg_base = MTK_WED_RING_RX_DATA(idx); 1645 ring->wpdma = regs; 1646 ring->flags |= MTK_WED_RING_CONFIGURED; 1647 1648 /* WPDMA -> WED */ 1649 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 1650 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); 1651 1652 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, 1653 ring->desc_phys); 1654 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, 1655 MTK_WED_RX_RING_SIZE); 1656 1657 return 0; 1658 } 1659 1660 static u32 1661 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) 1662 { 1663 u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 1664 1665 if (dev->hw->version == 1) 1666 ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 1667 else 1668 ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 1669 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 1670 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 1671 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 1672 1673 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); 1674 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); 1675 val &= ext_mask; 1676 if (!dev->hw->num_flows) 1677 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 1678 if (val && net_ratelimit()) 1679 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); 1680 1681 val = wed_r32(dev, MTK_WED_INT_STATUS); 1682 val &= mask; 1683 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ 1684 1685 return val; 1686 } 1687 1688 static void 1689 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) 1690 { 1691 if (!dev->running) 1692 return; 1693 1694 mtk_wed_set_ext_int(dev, !!mask); 1695 wed_w32(dev, MTK_WED_INT_MASK, mask); 1696 } 1697 1698 int mtk_wed_flow_add(int index) 1699 { 1700 struct mtk_wed_hw *hw = hw_list[index]; 1701 int ret; 1702 1703 if (!hw || !hw->wed_dev) 1704 return -ENODEV; 1705 1706 if (hw->num_flows) { 1707 hw->num_flows++; 1708 return 0; 1709 } 1710 1711 mutex_lock(&hw_lock); 1712 if (!hw->wed_dev) { 1713 ret = -ENODEV; 1714 goto out; 1715 } 1716 1717 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); 1718 if (!ret) 1719 hw->num_flows++; 1720 mtk_wed_set_ext_int(hw->wed_dev, true); 1721 1722 out: 1723 mutex_unlock(&hw_lock); 1724 1725 return ret; 1726 } 1727 1728 void mtk_wed_flow_remove(int index) 1729 { 1730 struct mtk_wed_hw *hw = hw_list[index]; 1731 1732 if (!hw) 1733 return; 1734 1735 if (--hw->num_flows) 1736 return; 1737 1738 mutex_lock(&hw_lock); 1739 if (!hw->wed_dev) 1740 goto out; 1741 1742 hw->wed_dev->wlan.offload_disable(hw->wed_dev); 1743 mtk_wed_set_ext_int(hw->wed_dev, true); 1744 1745 out: 1746 mutex_unlock(&hw_lock); 1747 } 1748 1749 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, 1750 void __iomem *wdma, phys_addr_t wdma_phy, 1751 int index) 1752 { 1753 static const struct mtk_wed_ops wed_ops = { 1754 .attach = mtk_wed_attach, 1755 .tx_ring_setup = mtk_wed_tx_ring_setup, 1756 .rx_ring_setup = mtk_wed_rx_ring_setup, 1757 .txfree_ring_setup = mtk_wed_txfree_ring_setup, 1758 .msg_update = mtk_wed_mcu_msg_update, 1759 .start = mtk_wed_start, 1760 .stop = mtk_wed_stop, 1761 .reset_dma = mtk_wed_reset_dma, 1762 .reg_read = wed_r32, 1763 .reg_write = wed_w32, 1764 .irq_get = mtk_wed_irq_get, 1765 .irq_set_mask = mtk_wed_irq_set_mask, 1766 .detach = mtk_wed_detach, 1767 .ppe_check = mtk_wed_ppe_check, 1768 }; 1769 struct device_node *eth_np = eth->dev->of_node; 1770 struct platform_device *pdev; 1771 struct mtk_wed_hw *hw; 1772 struct regmap *regs; 1773 int irq; 1774 1775 if (!np) 1776 return; 1777 1778 pdev = of_find_device_by_node(np); 1779 if (!pdev) 1780 goto err_of_node_put; 1781 1782 get_device(&pdev->dev); 1783 irq = platform_get_irq(pdev, 0); 1784 if (irq < 0) 1785 goto err_put_device; 1786 1787 regs = syscon_regmap_lookup_by_phandle(np, NULL); 1788 if (IS_ERR(regs)) 1789 goto err_put_device; 1790 1791 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); 1792 1793 mutex_lock(&hw_lock); 1794 1795 if (WARN_ON(hw_list[index])) 1796 goto unlock; 1797 1798 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 1799 if (!hw) 1800 goto unlock; 1801 1802 hw->node = np; 1803 hw->regs = regs; 1804 hw->eth = eth; 1805 hw->dev = &pdev->dev; 1806 hw->wdma_phy = wdma_phy; 1807 hw->wdma = wdma; 1808 hw->index = index; 1809 hw->irq = irq; 1810 hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; 1811 1812 if (hw->version == 1) { 1813 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, 1814 "mediatek,pcie-mirror"); 1815 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, 1816 "mediatek,hifsys"); 1817 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { 1818 kfree(hw); 1819 goto unlock; 1820 } 1821 1822 if (!index) { 1823 regmap_write(hw->mirror, 0, 0); 1824 regmap_write(hw->mirror, 4, 0); 1825 } 1826 } 1827 1828 mtk_wed_hw_add_debugfs(hw); 1829 1830 hw_list[index] = hw; 1831 1832 mutex_unlock(&hw_lock); 1833 1834 return; 1835 1836 unlock: 1837 mutex_unlock(&hw_lock); 1838 err_put_device: 1839 put_device(&pdev->dev); 1840 err_of_node_put: 1841 of_node_put(np); 1842 } 1843 1844 void mtk_wed_exit(void) 1845 { 1846 int i; 1847 1848 rcu_assign_pointer(mtk_soc_wed_ops, NULL); 1849 1850 synchronize_rcu(); 1851 1852 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 1853 struct mtk_wed_hw *hw; 1854 1855 hw = hw_list[i]; 1856 if (!hw) 1857 continue; 1858 1859 hw_list[i] = NULL; 1860 debugfs_remove(hw->debugfs_dir); 1861 put_device(hw->dev); 1862 of_node_put(hw->node); 1863 kfree(hw); 1864 } 1865 } 1866