1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ 3 4 #include <linux/kernel.h> 5 #include <linux/slab.h> 6 #include <linux/module.h> 7 #include <linux/bitfield.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/skbuff.h> 10 #include <linux/of_platform.h> 11 #include <linux/of_address.h> 12 #include <linux/of_reserved_mem.h> 13 #include <linux/mfd/syscon.h> 14 #include <linux/debugfs.h> 15 #include <linux/soc/mediatek/mtk_wed.h> 16 #include "mtk_eth_soc.h" 17 #include "mtk_wed_regs.h" 18 #include "mtk_wed.h" 19 #include "mtk_ppe.h" 20 #include "mtk_wed_wo.h" 21 22 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) 23 24 #define MTK_WED_PKT_SIZE 1900 25 #define MTK_WED_BUF_SIZE 2048 26 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) 27 #define MTK_WED_RX_RING_SIZE 1536 28 29 #define MTK_WED_TX_RING_SIZE 2048 30 #define MTK_WED_WDMA_RING_SIZE 1024 31 #define MTK_WED_MAX_GROUP_SIZE 0x100 32 #define MTK_WED_VLD_GROUP_SIZE 0x40 33 #define MTK_WED_PER_GROUP_PKT 128 34 35 #define MTK_WED_FBUF_SIZE 128 36 #define MTK_WED_MIOD_CNT 16 37 #define MTK_WED_FB_CMD_CNT 1024 38 #define MTK_WED_RRO_QUE_CNT 8192 39 #define MTK_WED_MIOD_ENTRY_CNT 128 40 41 static struct mtk_wed_hw *hw_list[2]; 42 static DEFINE_MUTEX(hw_lock); 43 44 static void 45 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 46 { 47 regmap_update_bits(dev->hw->regs, reg, mask | val, val); 48 } 49 50 static void 51 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 52 { 53 return wed_m32(dev, reg, 0, mask); 54 } 55 56 static void 57 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 58 { 59 return wed_m32(dev, reg, mask, 0); 60 } 61 62 static void 63 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 64 { 65 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); 66 } 67 68 static void 69 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 70 { 71 wdma_m32(dev, reg, 0, mask); 72 } 73 74 static void 75 wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 76 { 77 wdma_m32(dev, reg, mask, 0); 78 } 79 80 static u32 81 wifi_r32(struct mtk_wed_device *dev, u32 reg) 82 { 83 return readl(dev->wlan.base + reg); 84 } 85 86 static void 87 wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) 88 { 89 writel(val, dev->wlan.base + reg); 90 } 91 92 static u32 93 mtk_wed_read_reset(struct mtk_wed_device *dev) 94 { 95 return wed_r32(dev, MTK_WED_RESET); 96 } 97 98 static u32 99 mtk_wdma_read_reset(struct mtk_wed_device *dev) 100 { 101 return wdma_r32(dev, MTK_WDMA_GLO_CFG); 102 } 103 104 static int 105 mtk_wdma_rx_reset(struct mtk_wed_device *dev) 106 { 107 u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; 108 int i, ret; 109 110 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); 111 ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, 112 !(status & mask), 0, 10000); 113 if (ret) 114 dev_err(dev->hw->dev, "rx reset failed\n"); 115 116 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 117 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 118 119 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { 120 if (dev->rx_wdma[i].desc) 121 continue; 122 123 wdma_w32(dev, 124 MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 125 } 126 127 return ret; 128 } 129 130 static void 131 mtk_wdma_tx_reset(struct mtk_wed_device *dev) 132 { 133 u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; 134 int i; 135 136 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 137 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, 138 !(status & mask), 0, 10000)) 139 dev_err(dev->hw->dev, "tx reset failed\n"); 140 141 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); 142 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 143 144 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) 145 wdma_w32(dev, 146 MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 147 } 148 149 static void 150 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) 151 { 152 u32 status; 153 154 wed_w32(dev, MTK_WED_RESET, mask); 155 if (readx_poll_timeout(mtk_wed_read_reset, dev, status, 156 !(status & mask), 0, 1000)) 157 WARN_ON_ONCE(1); 158 } 159 160 static u32 161 mtk_wed_wo_read_status(struct mtk_wed_device *dev) 162 { 163 return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); 164 } 165 166 static void 167 mtk_wed_wo_reset(struct mtk_wed_device *dev) 168 { 169 struct mtk_wed_wo *wo = dev->hw->wed_wo; 170 u8 state = MTK_WED_WO_STATE_DISABLE; 171 void __iomem *reg; 172 u32 val; 173 174 mtk_wdma_tx_reset(dev); 175 mtk_wed_reset(dev, MTK_WED_RESET_WED); 176 177 if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 178 MTK_WED_WO_CMD_CHANGE_STATE, &state, 179 sizeof(state), false)) 180 return; 181 182 if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, 183 val == MTK_WED_WOIF_DISABLE_DONE, 184 100, MTK_WOCPU_TIMEOUT)) 185 dev_err(dev->hw->dev, "failed to disable wed-wo\n"); 186 187 reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); 188 189 val = readl(reg); 190 switch (dev->hw->index) { 191 case 0: 192 val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 193 writel(val, reg); 194 val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 195 writel(val, reg); 196 break; 197 case 1: 198 val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 199 writel(val, reg); 200 val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 201 writel(val, reg); 202 break; 203 default: 204 break; 205 } 206 iounmap(reg); 207 } 208 209 static struct mtk_wed_hw * 210 mtk_wed_assign(struct mtk_wed_device *dev) 211 { 212 struct mtk_wed_hw *hw; 213 int i; 214 215 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 216 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; 217 if (!hw) 218 return NULL; 219 220 if (!hw->wed_dev) 221 goto out; 222 223 if (hw->version == 1) 224 return NULL; 225 226 /* MT7986 WED devices do not have any pcie slot restrictions */ 227 } 228 /* MT7986 PCIE or AXI */ 229 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 230 hw = hw_list[i]; 231 if (hw && !hw->wed_dev) 232 goto out; 233 } 234 235 return NULL; 236 237 out: 238 hw->wed_dev = dev; 239 return hw; 240 } 241 242 static int 243 mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) 244 { 245 struct mtk_wdma_desc *desc; 246 dma_addr_t desc_phys; 247 void **page_list; 248 int token = dev->wlan.token_start; 249 int ring_size; 250 int n_pages; 251 int i, page_idx; 252 253 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 254 n_pages = ring_size / MTK_WED_BUF_PER_PAGE; 255 256 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 257 if (!page_list) 258 return -ENOMEM; 259 260 dev->tx_buf_ring.size = ring_size; 261 dev->tx_buf_ring.pages = page_list; 262 263 desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), 264 &desc_phys, GFP_KERNEL); 265 if (!desc) 266 return -ENOMEM; 267 268 dev->tx_buf_ring.desc = desc; 269 dev->tx_buf_ring.desc_phys = desc_phys; 270 271 for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { 272 dma_addr_t page_phys, buf_phys; 273 struct page *page; 274 void *buf; 275 int s; 276 277 page = __dev_alloc_pages(GFP_KERNEL, 0); 278 if (!page) 279 return -ENOMEM; 280 281 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 282 DMA_BIDIRECTIONAL); 283 if (dma_mapping_error(dev->hw->dev, page_phys)) { 284 __free_page(page); 285 return -ENOMEM; 286 } 287 288 page_list[page_idx++] = page; 289 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 290 DMA_BIDIRECTIONAL); 291 292 buf = page_to_virt(page); 293 buf_phys = page_phys; 294 295 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { 296 u32 txd_size; 297 u32 ctrl; 298 299 txd_size = dev->wlan.init_buf(buf, buf_phys, token++); 300 301 desc->buf0 = cpu_to_le32(buf_phys); 302 desc->buf1 = cpu_to_le32(buf_phys + txd_size); 303 304 if (dev->hw->version == 1) 305 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 306 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, 307 MTK_WED_BUF_SIZE - txd_size) | 308 MTK_WDMA_DESC_CTRL_LAST_SEG1; 309 else 310 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 311 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, 312 MTK_WED_BUF_SIZE - txd_size) | 313 MTK_WDMA_DESC_CTRL_LAST_SEG0; 314 desc->ctrl = cpu_to_le32(ctrl); 315 desc->info = 0; 316 desc++; 317 318 buf += MTK_WED_BUF_SIZE; 319 buf_phys += MTK_WED_BUF_SIZE; 320 } 321 322 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 323 DMA_BIDIRECTIONAL); 324 } 325 326 return 0; 327 } 328 329 static void 330 mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) 331 { 332 struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; 333 void **page_list = dev->tx_buf_ring.pages; 334 int page_idx; 335 int i; 336 337 if (!page_list) 338 return; 339 340 if (!desc) 341 goto free_pagelist; 342 343 for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; 344 i += MTK_WED_BUF_PER_PAGE) { 345 void *page = page_list[page_idx++]; 346 dma_addr_t buf_addr; 347 348 if (!page) 349 break; 350 351 buf_addr = le32_to_cpu(desc[i].buf0); 352 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, 353 DMA_BIDIRECTIONAL); 354 __free_page(page); 355 } 356 357 dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), 358 desc, dev->tx_buf_ring.desc_phys); 359 360 free_pagelist: 361 kfree(page_list); 362 } 363 364 static int 365 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) 366 { 367 struct mtk_rxbm_desc *desc; 368 dma_addr_t desc_phys; 369 370 dev->rx_buf_ring.size = dev->wlan.rx_nbuf; 371 desc = dma_alloc_coherent(dev->hw->dev, 372 dev->wlan.rx_nbuf * sizeof(*desc), 373 &desc_phys, GFP_KERNEL); 374 if (!desc) 375 return -ENOMEM; 376 377 dev->rx_buf_ring.desc = desc; 378 dev->rx_buf_ring.desc_phys = desc_phys; 379 dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); 380 381 return 0; 382 } 383 384 static void 385 mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) 386 { 387 struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; 388 389 if (!desc) 390 return; 391 392 dev->wlan.release_rx_buf(dev); 393 dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), 394 desc, dev->rx_buf_ring.desc_phys); 395 } 396 397 static void 398 mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) 399 { 400 wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, 401 FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); 402 wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); 403 wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | 404 FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); 405 wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, 406 FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); 407 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 408 } 409 410 static void 411 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) 412 { 413 if (!ring->desc) 414 return; 415 416 dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, 417 ring->desc, ring->desc_phys); 418 } 419 420 static void 421 mtk_wed_free_rx_rings(struct mtk_wed_device *dev) 422 { 423 mtk_wed_free_rx_buffer(dev); 424 mtk_wed_free_ring(dev, &dev->rro.ring); 425 } 426 427 static void 428 mtk_wed_free_tx_rings(struct mtk_wed_device *dev) 429 { 430 int i; 431 432 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) 433 mtk_wed_free_ring(dev, &dev->tx_ring[i]); 434 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 435 mtk_wed_free_ring(dev, &dev->rx_wdma[i]); 436 } 437 438 static void 439 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) 440 { 441 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 442 443 if (dev->hw->version == 1) 444 mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 445 else 446 mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 447 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 448 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 449 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 450 451 if (!dev->hw->num_flows) 452 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 453 454 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); 455 wed_r32(dev, MTK_WED_EXT_INT_MASK); 456 } 457 458 static void 459 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) 460 { 461 if (enable) { 462 wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 463 wed_w32(dev, MTK_WED_TXP_DW1, 464 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); 465 } else { 466 wed_w32(dev, MTK_WED_TXP_DW1, 467 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); 468 wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 469 } 470 } 471 472 #define MTK_WFMDA_RX_DMA_EN BIT(2) 473 static void 474 mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) 475 { 476 u32 val; 477 int i; 478 479 if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) 480 return; /* queue is not configured by mt76 */ 481 482 for (i = 0; i < 3; i++) { 483 u32 cur_idx; 484 485 cur_idx = wed_r32(dev, 486 MTK_WED_WPDMA_RING_RX_DATA(idx) + 487 MTK_WED_RING_OFS_CPU_IDX); 488 if (cur_idx == MTK_WED_RX_RING_SIZE - 1) 489 break; 490 491 usleep_range(100000, 200000); 492 } 493 494 if (i == 3) { 495 dev_err(dev->hw->dev, "rx dma enable failed\n"); 496 return; 497 } 498 499 val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | 500 MTK_WFMDA_RX_DMA_EN; 501 wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); 502 } 503 504 static void 505 mtk_wed_dma_disable(struct mtk_wed_device *dev) 506 { 507 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 508 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 509 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 510 511 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 512 513 wed_clr(dev, MTK_WED_GLO_CFG, 514 MTK_WED_GLO_CFG_TX_DMA_EN | 515 MTK_WED_GLO_CFG_RX_DMA_EN); 516 517 wdma_clr(dev, MTK_WDMA_GLO_CFG, 518 MTK_WDMA_GLO_CFG_TX_DMA_EN | 519 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 520 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 521 522 if (dev->hw->version == 1) { 523 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); 524 wdma_clr(dev, MTK_WDMA_GLO_CFG, 525 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 526 } else { 527 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 528 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 529 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 530 531 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 532 MTK_WED_WPDMA_RX_D_RX_DRV_EN); 533 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 534 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 535 } 536 537 mtk_wed_set_512_support(dev, false); 538 } 539 540 static void 541 mtk_wed_stop(struct mtk_wed_device *dev) 542 { 543 mtk_wed_set_ext_int(dev, false); 544 545 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); 546 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); 547 wdma_w32(dev, MTK_WDMA_INT_MASK, 0); 548 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); 549 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); 550 551 if (dev->hw->version == 1) 552 return; 553 554 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); 555 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); 556 } 557 558 static void 559 mtk_wed_deinit(struct mtk_wed_device *dev) 560 { 561 mtk_wed_stop(dev); 562 mtk_wed_dma_disable(dev); 563 564 wed_clr(dev, MTK_WED_CTRL, 565 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 566 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 567 MTK_WED_CTRL_WED_TX_BM_EN | 568 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 569 570 if (dev->hw->version == 1) 571 return; 572 573 wed_clr(dev, MTK_WED_CTRL, 574 MTK_WED_CTRL_RX_ROUTE_QM_EN | 575 MTK_WED_CTRL_WED_RX_BM_EN | 576 MTK_WED_CTRL_RX_RRO_QM_EN); 577 } 578 579 static void 580 __mtk_wed_detach(struct mtk_wed_device *dev) 581 { 582 struct mtk_wed_hw *hw = dev->hw; 583 584 mtk_wed_deinit(dev); 585 586 mtk_wdma_rx_reset(dev); 587 mtk_wed_reset(dev, MTK_WED_RESET_WED); 588 mtk_wed_free_tx_buffer(dev); 589 mtk_wed_free_tx_rings(dev); 590 591 if (mtk_wed_get_rx_capa(dev)) { 592 if (hw->wed_wo) 593 mtk_wed_wo_reset(dev); 594 mtk_wed_free_rx_rings(dev); 595 if (hw->wed_wo) 596 mtk_wed_wo_deinit(hw); 597 } 598 599 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 600 struct device_node *wlan_node; 601 602 wlan_node = dev->wlan.pci_dev->dev.of_node; 603 if (of_dma_is_coherent(wlan_node) && hw->hifsys) 604 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 605 BIT(hw->index), BIT(hw->index)); 606 } 607 608 if (!hw_list[!hw->index]->wed_dev && 609 hw->eth->dma_dev != hw->eth->dev) 610 mtk_eth_set_dma_device(hw->eth, hw->eth->dev); 611 612 memset(dev, 0, sizeof(*dev)); 613 module_put(THIS_MODULE); 614 615 hw->wed_dev = NULL; 616 } 617 618 static void 619 mtk_wed_detach(struct mtk_wed_device *dev) 620 { 621 mutex_lock(&hw_lock); 622 __mtk_wed_detach(dev); 623 mutex_unlock(&hw_lock); 624 } 625 626 #define PCIE_BASE_ADDR0 0x11280000 627 static void 628 mtk_wed_bus_init(struct mtk_wed_device *dev) 629 { 630 switch (dev->wlan.bus_type) { 631 case MTK_WED_BUS_PCIE: { 632 struct device_node *np = dev->hw->eth->dev->of_node; 633 struct regmap *regs; 634 635 regs = syscon_regmap_lookup_by_phandle(np, 636 "mediatek,wed-pcie"); 637 if (IS_ERR(regs)) 638 break; 639 640 regmap_update_bits(regs, 0, BIT(0), BIT(0)); 641 642 wed_w32(dev, MTK_WED_PCIE_INT_CTRL, 643 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); 644 645 /* pcie interrupt control: pola/source selection */ 646 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 647 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 648 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); 649 wed_r32(dev, MTK_WED_PCIE_INT_CTRL); 650 651 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); 652 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); 653 654 /* pcie interrupt status trigger register */ 655 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); 656 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); 657 658 /* pola setting */ 659 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 660 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); 661 break; 662 } 663 case MTK_WED_BUS_AXI: 664 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 665 MTK_WED_WPDMA_INT_CTRL_SIG_SRC | 666 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); 667 break; 668 default: 669 break; 670 } 671 } 672 673 static void 674 mtk_wed_set_wpdma(struct mtk_wed_device *dev) 675 { 676 if (dev->hw->version == 1) { 677 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); 678 } else { 679 mtk_wed_bus_init(dev); 680 681 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); 682 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); 683 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); 684 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); 685 wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); 686 wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); 687 } 688 } 689 690 static void 691 mtk_wed_hw_init_early(struct mtk_wed_device *dev) 692 { 693 u32 mask, set; 694 695 mtk_wed_deinit(dev); 696 mtk_wed_reset(dev, MTK_WED_RESET_WED); 697 mtk_wed_set_wpdma(dev); 698 699 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | 700 MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | 701 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; 702 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | 703 MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | 704 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; 705 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); 706 707 if (dev->hw->version == 1) { 708 u32 offset = dev->hw->index ? 0x04000400 : 0; 709 710 wdma_set(dev, MTK_WDMA_GLO_CFG, 711 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 712 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | 713 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 714 715 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); 716 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); 717 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 718 MTK_PCIE_BASE(dev->hw->index)); 719 } else { 720 wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); 721 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); 722 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 723 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, 724 MTK_WDMA_INT_STATUS) | 725 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, 726 MTK_WDMA_GLO_CFG)); 727 728 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 729 FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, 730 MTK_WDMA_RING_TX(0)) | 731 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, 732 MTK_WDMA_RING_RX(0))); 733 } 734 } 735 736 static int 737 mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 738 int size) 739 { 740 ring->desc = dma_alloc_coherent(dev->hw->dev, 741 size * sizeof(*ring->desc), 742 &ring->desc_phys, GFP_KERNEL); 743 if (!ring->desc) 744 return -ENOMEM; 745 746 ring->desc_size = sizeof(*ring->desc); 747 ring->size = size; 748 memset(ring->desc, 0, size); 749 750 return 0; 751 } 752 753 #define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) 754 static int 755 mtk_wed_rro_alloc(struct mtk_wed_device *dev) 756 { 757 struct reserved_mem *rmem; 758 struct device_node *np; 759 int index; 760 761 index = of_property_match_string(dev->hw->node, "memory-region-names", 762 "wo-dlm"); 763 if (index < 0) 764 return index; 765 766 np = of_parse_phandle(dev->hw->node, "memory-region", index); 767 if (!np) 768 return -ENODEV; 769 770 rmem = of_reserved_mem_lookup(np); 771 of_node_put(np); 772 773 if (!rmem) 774 return -ENODEV; 775 776 dev->rro.miod_phys = rmem->base; 777 dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; 778 779 return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, 780 MTK_WED_RRO_QUE_CNT); 781 } 782 783 static int 784 mtk_wed_rro_cfg(struct mtk_wed_device *dev) 785 { 786 struct mtk_wed_wo *wo = dev->hw->wed_wo; 787 struct { 788 struct { 789 __le32 base; 790 __le32 cnt; 791 __le32 unit; 792 } ring[2]; 793 __le32 wed; 794 u8 version; 795 } req = { 796 .ring[0] = { 797 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), 798 .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), 799 .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), 800 }, 801 .ring[1] = { 802 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + 803 MTK_WED_MIOD_COUNT), 804 .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), 805 .unit = cpu_to_le32(4), 806 }, 807 }; 808 809 return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 810 MTK_WED_WO_CMD_WED_CFG, 811 &req, sizeof(req), true); 812 } 813 814 static void 815 mtk_wed_rro_hw_init(struct mtk_wed_device *dev) 816 { 817 wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, 818 FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | 819 FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | 820 FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, 821 MTK_WED_MIOD_ENTRY_CNT >> 2)); 822 823 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); 824 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, 825 FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); 826 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); 827 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, 828 FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); 829 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); 830 wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); 831 832 wed_set(dev, MTK_WED_RROQM_RST_IDX, 833 MTK_WED_RROQM_RST_IDX_MIOD | 834 MTK_WED_RROQM_RST_IDX_FDBK); 835 836 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 837 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); 838 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 839 } 840 841 static void 842 mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) 843 { 844 wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); 845 846 for (;;) { 847 usleep_range(100, 200); 848 if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) 849 break; 850 } 851 852 /* configure RX_ROUTE_QM */ 853 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 854 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); 855 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 856 FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); 857 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 858 /* enable RX_ROUTE_QM */ 859 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 860 } 861 862 static void 863 mtk_wed_hw_init(struct mtk_wed_device *dev) 864 { 865 if (dev->init_done) 866 return; 867 868 dev->init_done = true; 869 mtk_wed_set_ext_int(dev, false); 870 wed_w32(dev, MTK_WED_TX_BM_CTRL, 871 MTK_WED_TX_BM_CTRL_PAUSE | 872 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 873 dev->tx_buf_ring.size / 128) | 874 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 875 MTK_WED_TX_RING_SIZE / 256)); 876 877 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); 878 879 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); 880 881 if (dev->hw->version == 1) { 882 wed_w32(dev, MTK_WED_TX_BM_TKID, 883 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 884 dev->wlan.token_start) | 885 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 886 dev->wlan.token_start + 887 dev->wlan.nbuf - 1)); 888 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 889 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | 890 MTK_WED_TX_BM_DYN_THR_HI); 891 } else { 892 wed_w32(dev, MTK_WED_TX_BM_TKID_V2, 893 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 894 dev->wlan.token_start) | 895 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 896 dev->wlan.token_start + 897 dev->wlan.nbuf - 1)); 898 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 899 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | 900 MTK_WED_TX_BM_DYN_THR_HI_V2); 901 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 902 MTK_WED_TX_TKID_CTRL_PAUSE | 903 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, 904 dev->tx_buf_ring.size / 128) | 905 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, 906 dev->tx_buf_ring.size / 128)); 907 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, 908 FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | 909 MTK_WED_TX_TKID_DYN_THR_HI); 910 } 911 912 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 913 914 if (dev->hw->version == 1) { 915 wed_set(dev, MTK_WED_CTRL, 916 MTK_WED_CTRL_WED_TX_BM_EN | 917 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 918 } else { 919 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); 920 /* rx hw init */ 921 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 922 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 923 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 924 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 925 926 mtk_wed_rx_buffer_hw_init(dev); 927 mtk_wed_rro_hw_init(dev); 928 mtk_wed_route_qm_hw_init(dev); 929 } 930 931 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); 932 } 933 934 static void 935 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) 936 { 937 void *head = (void *)ring->desc; 938 int i; 939 940 for (i = 0; i < size; i++) { 941 struct mtk_wdma_desc *desc; 942 943 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); 944 desc->buf0 = 0; 945 if (tx) 946 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 947 else 948 desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); 949 desc->buf1 = 0; 950 desc->info = 0; 951 } 952 } 953 954 static u32 955 mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 956 { 957 return !!(wed_r32(dev, reg) & mask); 958 } 959 960 static int 961 mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 962 { 963 int sleep = 15000; 964 int timeout = 100 * sleep; 965 u32 val; 966 967 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, 968 timeout, false, dev, reg, mask); 969 } 970 971 static int 972 mtk_wed_rx_reset(struct mtk_wed_device *dev) 973 { 974 struct mtk_wed_wo *wo = dev->hw->wed_wo; 975 u8 val = MTK_WED_WO_STATE_SER_RESET; 976 int i, ret; 977 978 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 979 MTK_WED_WO_CMD_CHANGE_STATE, &val, 980 sizeof(val), true); 981 if (ret) 982 return ret; 983 984 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); 985 ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 986 MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); 987 if (ret) { 988 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 989 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); 990 } else { 991 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 992 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 993 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 994 995 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 996 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 997 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 998 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 999 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1000 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1001 1002 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 1003 } 1004 1005 /* reset rro qm */ 1006 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 1007 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1008 MTK_WED_CTRL_RX_RRO_QM_BUSY); 1009 if (ret) { 1010 mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); 1011 } else { 1012 wed_set(dev, MTK_WED_RROQM_RST_IDX, 1013 MTK_WED_RROQM_RST_IDX_MIOD | 1014 MTK_WED_RROQM_RST_IDX_FDBK); 1015 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 1016 } 1017 1018 /* reset route qm */ 1019 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 1020 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1021 MTK_WED_CTRL_RX_ROUTE_QM_BUSY); 1022 if (ret) 1023 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); 1024 else 1025 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 1026 MTK_WED_RTQM_Q_RST); 1027 1028 /* reset tx wdma */ 1029 mtk_wdma_tx_reset(dev); 1030 1031 /* reset tx wdma drv */ 1032 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); 1033 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1034 MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); 1035 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); 1036 1037 /* reset wed rx dma */ 1038 ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1039 MTK_WED_GLO_CFG_RX_DMA_BUSY); 1040 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); 1041 if (ret) { 1042 mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); 1043 } else { 1044 struct mtk_eth *eth = dev->hw->eth; 1045 1046 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 1047 wed_set(dev, MTK_WED_RESET_IDX, 1048 MTK_WED_RESET_IDX_RX_V2); 1049 else 1050 wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX); 1051 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1052 } 1053 1054 /* reset rx bm */ 1055 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 1056 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1057 MTK_WED_CTRL_WED_RX_BM_BUSY); 1058 mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); 1059 1060 /* wo change to enable state */ 1061 val = MTK_WED_WO_STATE_ENABLE; 1062 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1063 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1064 sizeof(val), true); 1065 if (ret) 1066 return ret; 1067 1068 /* wed_rx_ring_reset */ 1069 for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { 1070 if (!dev->rx_ring[i].desc) 1071 continue; 1072 1073 mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE, 1074 false); 1075 } 1076 mtk_wed_free_rx_buffer(dev); 1077 1078 return 0; 1079 } 1080 1081 static void 1082 mtk_wed_reset_dma(struct mtk_wed_device *dev) 1083 { 1084 bool busy = false; 1085 u32 val; 1086 int i; 1087 1088 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { 1089 if (!dev->tx_ring[i].desc) 1090 continue; 1091 1092 mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, 1093 true); 1094 } 1095 1096 /* 1. reset WED tx DMA */ 1097 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); 1098 busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1099 MTK_WED_GLO_CFG_TX_DMA_BUSY); 1100 if (busy) { 1101 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); 1102 } else { 1103 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX); 1104 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1105 } 1106 1107 /* 2. reset WDMA rx DMA */ 1108 busy = !!mtk_wdma_rx_reset(dev); 1109 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1110 if (!busy) 1111 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, 1112 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); 1113 1114 if (busy) { 1115 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); 1116 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); 1117 } else { 1118 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1119 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); 1120 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); 1121 1122 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1123 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1124 1125 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1126 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1127 } 1128 1129 /* 3. reset WED WPDMA tx */ 1130 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1131 1132 for (i = 0; i < 100; i++) { 1133 val = wed_r32(dev, MTK_WED_TX_BM_INTF); 1134 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) 1135 break; 1136 } 1137 1138 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); 1139 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); 1140 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1141 1142 /* 4. reset WED WPDMA tx */ 1143 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1144 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); 1145 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1146 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1147 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1148 if (!busy) 1149 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1150 MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); 1151 1152 if (busy) { 1153 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1154 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); 1155 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); 1156 } else { 1157 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 1158 MTK_WED_WPDMA_RESET_IDX_TX | 1159 MTK_WED_WPDMA_RESET_IDX_RX); 1160 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); 1161 } 1162 1163 dev->init_done = false; 1164 if (dev->hw->version == 1) 1165 return; 1166 1167 if (!busy) { 1168 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); 1169 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1170 } 1171 1172 mtk_wed_rx_reset(dev); 1173 } 1174 1175 static int 1176 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1177 int size, u32 desc_size, bool tx) 1178 { 1179 ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, 1180 &ring->desc_phys, GFP_KERNEL); 1181 if (!ring->desc) 1182 return -ENOMEM; 1183 1184 ring->desc_size = desc_size; 1185 ring->size = size; 1186 mtk_wed_ring_reset(ring, size, tx); 1187 1188 return 0; 1189 } 1190 1191 static int 1192 mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1193 bool reset) 1194 { 1195 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 1196 struct mtk_wed_ring *wdma; 1197 1198 if (idx >= ARRAY_SIZE(dev->rx_wdma)) 1199 return -EINVAL; 1200 1201 wdma = &dev->rx_wdma[idx]; 1202 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1203 desc_size, true)) 1204 return -ENOMEM; 1205 1206 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1207 wdma->desc_phys); 1208 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1209 size); 1210 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1211 1212 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1213 wdma->desc_phys); 1214 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1215 size); 1216 1217 return 0; 1218 } 1219 1220 static int 1221 mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1222 bool reset) 1223 { 1224 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 1225 struct mtk_wed_ring *wdma; 1226 1227 if (idx >= ARRAY_SIZE(dev->tx_wdma)) 1228 return -EINVAL; 1229 1230 wdma = &dev->tx_wdma[idx]; 1231 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1232 desc_size, true)) 1233 return -ENOMEM; 1234 1235 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1236 wdma->desc_phys); 1237 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1238 size); 1239 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1240 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); 1241 1242 if (reset) 1243 mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true); 1244 1245 if (!idx) { 1246 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, 1247 wdma->desc_phys); 1248 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, 1249 size); 1250 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, 1251 0); 1252 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, 1253 0); 1254 } 1255 1256 return 0; 1257 } 1258 1259 static void 1260 mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, 1261 u32 reason, u32 hash) 1262 { 1263 struct mtk_eth *eth = dev->hw->eth; 1264 struct ethhdr *eh; 1265 1266 if (!skb) 1267 return; 1268 1269 if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 1270 return; 1271 1272 skb_set_mac_header(skb, 0); 1273 eh = eth_hdr(skb); 1274 skb->protocol = eh->h_proto; 1275 mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); 1276 } 1277 1278 static void 1279 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) 1280 { 1281 u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); 1282 1283 /* wed control cr set */ 1284 wed_set(dev, MTK_WED_CTRL, 1285 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1286 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1287 MTK_WED_CTRL_WED_TX_BM_EN | 1288 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1289 1290 if (dev->hw->version == 1) { 1291 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, 1292 MTK_WED_PCIE_INT_TRIGGER_STATUS); 1293 1294 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 1295 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | 1296 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); 1297 1298 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); 1299 } else { 1300 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, 1301 GENMASK(1, 0)); 1302 /* initail tx interrupt trigger */ 1303 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, 1304 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | 1305 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | 1306 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | 1307 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | 1308 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, 1309 dev->wlan.tx_tbit[0]) | 1310 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, 1311 dev->wlan.tx_tbit[1])); 1312 1313 /* initail txfree interrupt trigger */ 1314 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, 1315 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | 1316 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | 1317 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, 1318 dev->wlan.txfree_tbit)); 1319 1320 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, 1321 MTK_WED_WPDMA_INT_CTRL_RX0_EN | 1322 MTK_WED_WPDMA_INT_CTRL_RX0_CLR | 1323 MTK_WED_WPDMA_INT_CTRL_RX1_EN | 1324 MTK_WED_WPDMA_INT_CTRL_RX1_CLR | 1325 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, 1326 dev->wlan.rx_tbit[0]) | 1327 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, 1328 dev->wlan.rx_tbit[1])); 1329 1330 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); 1331 wed_set(dev, MTK_WED_WDMA_INT_CTRL, 1332 FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, 1333 dev->wdma_idx)); 1334 } 1335 1336 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); 1337 1338 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); 1339 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); 1340 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 1341 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 1342 } 1343 1344 static void 1345 mtk_wed_dma_enable(struct mtk_wed_device *dev) 1346 { 1347 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); 1348 1349 wed_set(dev, MTK_WED_GLO_CFG, 1350 MTK_WED_GLO_CFG_TX_DMA_EN | 1351 MTK_WED_GLO_CFG_RX_DMA_EN); 1352 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 1353 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1354 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1355 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1356 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1357 1358 wdma_set(dev, MTK_WDMA_GLO_CFG, 1359 MTK_WDMA_GLO_CFG_TX_DMA_EN | 1360 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1361 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 1362 1363 if (dev->hw->version == 1) { 1364 wdma_set(dev, MTK_WDMA_GLO_CFG, 1365 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1366 } else { 1367 int i; 1368 1369 wed_set(dev, MTK_WED_WPDMA_CTRL, 1370 MTK_WED_WPDMA_CTRL_SDL1_FIXED); 1371 1372 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1373 MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | 1374 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 1375 1376 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 1377 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 1378 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 1379 1380 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1381 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | 1382 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); 1383 1384 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1385 MTK_WED_WPDMA_RX_D_RX_DRV_EN | 1386 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | 1387 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 1388 0x2)); 1389 1390 for (i = 0; i < MTK_WED_RX_QUEUES; i++) 1391 mtk_wed_check_wfdma_rx_fill(dev, i); 1392 } 1393 } 1394 1395 static void 1396 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) 1397 { 1398 int i; 1399 1400 if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) 1401 return; 1402 1403 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 1404 if (!dev->rx_wdma[i].desc) 1405 mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); 1406 1407 mtk_wed_hw_init(dev); 1408 mtk_wed_configure_irq(dev, irq_mask); 1409 1410 mtk_wed_set_ext_int(dev, true); 1411 1412 if (dev->hw->version == 1) { 1413 u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | 1414 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, 1415 dev->hw->index); 1416 1417 val |= BIT(0) | (BIT(1) * !!dev->hw->index); 1418 regmap_write(dev->hw->mirror, dev->hw->index * 4, val); 1419 } else { 1420 /* driver set mid ready and only once */ 1421 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 1422 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 1423 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 1424 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 1425 1426 wed_r32(dev, MTK_WED_EXT_INT_MASK1); 1427 wed_r32(dev, MTK_WED_EXT_INT_MASK2); 1428 1429 if (mtk_wed_rro_cfg(dev)) 1430 return; 1431 1432 } 1433 1434 mtk_wed_set_512_support(dev, dev->wlan.wcid_512); 1435 1436 mtk_wed_dma_enable(dev); 1437 dev->running = true; 1438 } 1439 1440 static int 1441 mtk_wed_attach(struct mtk_wed_device *dev) 1442 __releases(RCU) 1443 { 1444 struct mtk_wed_hw *hw; 1445 struct device *device; 1446 int ret = 0; 1447 1448 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 1449 "mtk_wed_attach without holding the RCU read lock"); 1450 1451 if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && 1452 pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || 1453 !try_module_get(THIS_MODULE)) 1454 ret = -ENODEV; 1455 1456 rcu_read_unlock(); 1457 1458 if (ret) 1459 return ret; 1460 1461 mutex_lock(&hw_lock); 1462 1463 hw = mtk_wed_assign(dev); 1464 if (!hw) { 1465 module_put(THIS_MODULE); 1466 ret = -ENODEV; 1467 goto unlock; 1468 } 1469 1470 device = dev->wlan.bus_type == MTK_WED_BUS_PCIE 1471 ? &dev->wlan.pci_dev->dev 1472 : &dev->wlan.platform_dev->dev; 1473 dev_info(device, "attaching wed device %d version %d\n", 1474 hw->index, hw->version); 1475 1476 dev->hw = hw; 1477 dev->dev = hw->dev; 1478 dev->irq = hw->irq; 1479 dev->wdma_idx = hw->index; 1480 dev->version = hw->version; 1481 1482 if (hw->eth->dma_dev == hw->eth->dev && 1483 of_dma_is_coherent(hw->eth->dev->of_node)) 1484 mtk_eth_set_dma_device(hw->eth, hw->dev); 1485 1486 ret = mtk_wed_tx_buffer_alloc(dev); 1487 if (ret) 1488 goto out; 1489 1490 if (mtk_wed_get_rx_capa(dev)) { 1491 ret = mtk_wed_rro_alloc(dev); 1492 if (ret) 1493 goto out; 1494 } 1495 1496 mtk_wed_hw_init_early(dev); 1497 if (hw->version == 1) { 1498 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 1499 BIT(hw->index), 0); 1500 } else { 1501 dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); 1502 ret = mtk_wed_wo_init(hw); 1503 } 1504 out: 1505 if (ret) { 1506 dev_err(dev->hw->dev, "failed to attach wed device\n"); 1507 __mtk_wed_detach(dev); 1508 } 1509 unlock: 1510 mutex_unlock(&hw_lock); 1511 1512 return ret; 1513 } 1514 1515 static int 1516 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 1517 bool reset) 1518 { 1519 struct mtk_wed_ring *ring = &dev->tx_ring[idx]; 1520 1521 /* 1522 * Tx ring redirection: 1523 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN 1524 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) 1525 * registers. 1526 * 1527 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it 1528 * into MTK_WED_WPDMA_RING_TX(n) registers. 1529 * It gets filled with packets picked up from WED TX ring and from 1530 * WDMA RX. 1531 */ 1532 1533 if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) 1534 return -EINVAL; 1535 1536 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1537 sizeof(*ring->desc), true)) 1538 return -ENOMEM; 1539 1540 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 1541 reset)) 1542 return -ENOMEM; 1543 1544 ring->reg_base = MTK_WED_RING_TX(idx); 1545 ring->wpdma = regs; 1546 1547 /* WED -> WPDMA */ 1548 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 1549 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); 1550 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); 1551 1552 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1553 ring->desc_phys); 1554 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1555 MTK_WED_TX_RING_SIZE); 1556 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1557 1558 return 0; 1559 } 1560 1561 static int 1562 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 1563 { 1564 struct mtk_wed_ring *ring = &dev->txfree_ring; 1565 int i, index = dev->hw->version == 1; 1566 1567 /* 1568 * For txfree event handling, the same DMA ring is shared between WED 1569 * and WLAN. The WLAN driver accesses the ring index registers through 1570 * WED 1571 */ 1572 ring->reg_base = MTK_WED_RING_RX(index); 1573 ring->wpdma = regs; 1574 1575 for (i = 0; i < 12; i += 4) { 1576 u32 val = readl(regs + i); 1577 1578 wed_w32(dev, MTK_WED_RING_RX(index) + i, val); 1579 wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); 1580 } 1581 1582 return 0; 1583 } 1584 1585 static int 1586 mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 1587 bool reset) 1588 { 1589 struct mtk_wed_ring *ring = &dev->rx_ring[idx]; 1590 1591 if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) 1592 return -EINVAL; 1593 1594 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1595 sizeof(*ring->desc), false)) 1596 return -ENOMEM; 1597 1598 if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 1599 reset)) 1600 return -ENOMEM; 1601 1602 ring->reg_base = MTK_WED_RING_RX_DATA(idx); 1603 ring->wpdma = regs; 1604 ring->flags |= MTK_WED_RING_CONFIGURED; 1605 1606 /* WPDMA -> WED */ 1607 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 1608 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); 1609 1610 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, 1611 ring->desc_phys); 1612 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, 1613 MTK_WED_RX_RING_SIZE); 1614 1615 return 0; 1616 } 1617 1618 static u32 1619 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) 1620 { 1621 u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 1622 1623 if (dev->hw->version == 1) 1624 ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 1625 else 1626 ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 1627 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 1628 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 1629 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 1630 1631 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); 1632 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); 1633 val &= ext_mask; 1634 if (!dev->hw->num_flows) 1635 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 1636 if (val && net_ratelimit()) 1637 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); 1638 1639 val = wed_r32(dev, MTK_WED_INT_STATUS); 1640 val &= mask; 1641 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ 1642 1643 return val; 1644 } 1645 1646 static void 1647 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) 1648 { 1649 if (!dev->running) 1650 return; 1651 1652 mtk_wed_set_ext_int(dev, !!mask); 1653 wed_w32(dev, MTK_WED_INT_MASK, mask); 1654 } 1655 1656 int mtk_wed_flow_add(int index) 1657 { 1658 struct mtk_wed_hw *hw = hw_list[index]; 1659 int ret; 1660 1661 if (!hw || !hw->wed_dev) 1662 return -ENODEV; 1663 1664 if (hw->num_flows) { 1665 hw->num_flows++; 1666 return 0; 1667 } 1668 1669 mutex_lock(&hw_lock); 1670 if (!hw->wed_dev) { 1671 ret = -ENODEV; 1672 goto out; 1673 } 1674 1675 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); 1676 if (!ret) 1677 hw->num_flows++; 1678 mtk_wed_set_ext_int(hw->wed_dev, true); 1679 1680 out: 1681 mutex_unlock(&hw_lock); 1682 1683 return ret; 1684 } 1685 1686 void mtk_wed_flow_remove(int index) 1687 { 1688 struct mtk_wed_hw *hw = hw_list[index]; 1689 1690 if (!hw) 1691 return; 1692 1693 if (--hw->num_flows) 1694 return; 1695 1696 mutex_lock(&hw_lock); 1697 if (!hw->wed_dev) 1698 goto out; 1699 1700 hw->wed_dev->wlan.offload_disable(hw->wed_dev); 1701 mtk_wed_set_ext_int(hw->wed_dev, true); 1702 1703 out: 1704 mutex_unlock(&hw_lock); 1705 } 1706 1707 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, 1708 void __iomem *wdma, phys_addr_t wdma_phy, 1709 int index) 1710 { 1711 static const struct mtk_wed_ops wed_ops = { 1712 .attach = mtk_wed_attach, 1713 .tx_ring_setup = mtk_wed_tx_ring_setup, 1714 .rx_ring_setup = mtk_wed_rx_ring_setup, 1715 .txfree_ring_setup = mtk_wed_txfree_ring_setup, 1716 .msg_update = mtk_wed_mcu_msg_update, 1717 .start = mtk_wed_start, 1718 .stop = mtk_wed_stop, 1719 .reset_dma = mtk_wed_reset_dma, 1720 .reg_read = wed_r32, 1721 .reg_write = wed_w32, 1722 .irq_get = mtk_wed_irq_get, 1723 .irq_set_mask = mtk_wed_irq_set_mask, 1724 .detach = mtk_wed_detach, 1725 .ppe_check = mtk_wed_ppe_check, 1726 }; 1727 struct device_node *eth_np = eth->dev->of_node; 1728 struct platform_device *pdev; 1729 struct mtk_wed_hw *hw; 1730 struct regmap *regs; 1731 int irq; 1732 1733 if (!np) 1734 return; 1735 1736 pdev = of_find_device_by_node(np); 1737 if (!pdev) 1738 goto err_of_node_put; 1739 1740 get_device(&pdev->dev); 1741 irq = platform_get_irq(pdev, 0); 1742 if (irq < 0) 1743 goto err_put_device; 1744 1745 regs = syscon_regmap_lookup_by_phandle(np, NULL); 1746 if (IS_ERR(regs)) 1747 goto err_put_device; 1748 1749 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); 1750 1751 mutex_lock(&hw_lock); 1752 1753 if (WARN_ON(hw_list[index])) 1754 goto unlock; 1755 1756 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 1757 if (!hw) 1758 goto unlock; 1759 1760 hw->node = np; 1761 hw->regs = regs; 1762 hw->eth = eth; 1763 hw->dev = &pdev->dev; 1764 hw->wdma_phy = wdma_phy; 1765 hw->wdma = wdma; 1766 hw->index = index; 1767 hw->irq = irq; 1768 hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; 1769 1770 if (hw->version == 1) { 1771 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, 1772 "mediatek,pcie-mirror"); 1773 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, 1774 "mediatek,hifsys"); 1775 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { 1776 kfree(hw); 1777 goto unlock; 1778 } 1779 1780 if (!index) { 1781 regmap_write(hw->mirror, 0, 0); 1782 regmap_write(hw->mirror, 4, 0); 1783 } 1784 } 1785 1786 mtk_wed_hw_add_debugfs(hw); 1787 1788 hw_list[index] = hw; 1789 1790 mutex_unlock(&hw_lock); 1791 1792 return; 1793 1794 unlock: 1795 mutex_unlock(&hw_lock); 1796 err_put_device: 1797 put_device(&pdev->dev); 1798 err_of_node_put: 1799 of_node_put(np); 1800 } 1801 1802 void mtk_wed_exit(void) 1803 { 1804 int i; 1805 1806 rcu_assign_pointer(mtk_soc_wed_ops, NULL); 1807 1808 synchronize_rcu(); 1809 1810 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 1811 struct mtk_wed_hw *hw; 1812 1813 hw = hw_list[i]; 1814 if (!hw) 1815 continue; 1816 1817 hw_list[i] = NULL; 1818 debugfs_remove(hw->debugfs_dir); 1819 put_device(hw->dev); 1820 of_node_put(hw->node); 1821 kfree(hw); 1822 } 1823 } 1824