1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ 3 4 #include <linux/kernel.h> 5 #include <linux/platform_device.h> 6 #include <linux/slab.h> 7 #include <linux/module.h> 8 #include <linux/bitfield.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/skbuff.h> 11 #include <linux/of_platform.h> 12 #include <linux/of_address.h> 13 #include <linux/of_reserved_mem.h> 14 #include <linux/mfd/syscon.h> 15 #include <linux/debugfs.h> 16 #include <linux/soc/mediatek/mtk_wed.h> 17 #include <net/flow_offload.h> 18 #include <net/pkt_cls.h> 19 #include "mtk_eth_soc.h" 20 #include "mtk_wed_regs.h" 21 #include "mtk_wed.h" 22 #include "mtk_ppe.h" 23 #include "mtk_wed_wo.h" 24 25 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) 26 27 #define MTK_WED_PKT_SIZE 1900 28 #define MTK_WED_BUF_SIZE 2048 29 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) 30 #define MTK_WED_RX_RING_SIZE 1536 31 32 #define MTK_WED_TX_RING_SIZE 2048 33 #define MTK_WED_WDMA_RING_SIZE 1024 34 #define MTK_WED_MAX_GROUP_SIZE 0x100 35 #define MTK_WED_VLD_GROUP_SIZE 0x40 36 #define MTK_WED_PER_GROUP_PKT 128 37 38 #define MTK_WED_FBUF_SIZE 128 39 #define MTK_WED_MIOD_CNT 16 40 #define MTK_WED_FB_CMD_CNT 1024 41 #define MTK_WED_RRO_QUE_CNT 8192 42 #define MTK_WED_MIOD_ENTRY_CNT 128 43 44 static struct mtk_wed_hw *hw_list[2]; 45 static DEFINE_MUTEX(hw_lock); 46 47 struct mtk_wed_flow_block_priv { 48 struct mtk_wed_hw *hw; 49 struct net_device *dev; 50 }; 51 52 static void 53 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 54 { 55 regmap_update_bits(dev->hw->regs, reg, mask | val, val); 56 } 57 58 static void 59 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 60 { 61 return wed_m32(dev, reg, 0, mask); 62 } 63 64 static void 65 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 66 { 67 return wed_m32(dev, reg, mask, 0); 68 } 69 70 static void 71 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 72 { 73 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); 74 } 75 76 static void 77 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 78 { 79 wdma_m32(dev, reg, 0, mask); 80 } 81 82 static void 83 wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 84 { 85 wdma_m32(dev, reg, mask, 0); 86 } 87 88 static u32 89 wifi_r32(struct mtk_wed_device *dev, u32 reg) 90 { 91 return readl(dev->wlan.base + reg); 92 } 93 94 static void 95 wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) 96 { 97 writel(val, dev->wlan.base + reg); 98 } 99 100 static u32 101 mtk_wed_read_reset(struct mtk_wed_device *dev) 102 { 103 return wed_r32(dev, MTK_WED_RESET); 104 } 105 106 static u32 107 mtk_wdma_read_reset(struct mtk_wed_device *dev) 108 { 109 return wdma_r32(dev, MTK_WDMA_GLO_CFG); 110 } 111 112 static int 113 mtk_wdma_rx_reset(struct mtk_wed_device *dev) 114 { 115 u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; 116 int i, ret; 117 118 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); 119 ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, 120 !(status & mask), 0, 10000); 121 if (ret) 122 dev_err(dev->hw->dev, "rx reset failed\n"); 123 124 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 125 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 126 127 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { 128 if (dev->rx_wdma[i].desc) 129 continue; 130 131 wdma_w32(dev, 132 MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 133 } 134 135 return ret; 136 } 137 138 static void 139 mtk_wdma_tx_reset(struct mtk_wed_device *dev) 140 { 141 u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; 142 int i; 143 144 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 145 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, 146 !(status & mask), 0, 10000)) 147 dev_err(dev->hw->dev, "tx reset failed\n"); 148 149 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); 150 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 151 152 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) 153 wdma_w32(dev, 154 MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 155 } 156 157 static void 158 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) 159 { 160 u32 status; 161 162 wed_w32(dev, MTK_WED_RESET, mask); 163 if (readx_poll_timeout(mtk_wed_read_reset, dev, status, 164 !(status & mask), 0, 1000)) 165 WARN_ON_ONCE(1); 166 } 167 168 static u32 169 mtk_wed_wo_read_status(struct mtk_wed_device *dev) 170 { 171 return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); 172 } 173 174 static void 175 mtk_wed_wo_reset(struct mtk_wed_device *dev) 176 { 177 struct mtk_wed_wo *wo = dev->hw->wed_wo; 178 u8 state = MTK_WED_WO_STATE_DISABLE; 179 void __iomem *reg; 180 u32 val; 181 182 mtk_wdma_tx_reset(dev); 183 mtk_wed_reset(dev, MTK_WED_RESET_WED); 184 185 if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 186 MTK_WED_WO_CMD_CHANGE_STATE, &state, 187 sizeof(state), false)) 188 return; 189 190 if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, 191 val == MTK_WED_WOIF_DISABLE_DONE, 192 100, MTK_WOCPU_TIMEOUT)) 193 dev_err(dev->hw->dev, "failed to disable wed-wo\n"); 194 195 reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); 196 197 val = readl(reg); 198 switch (dev->hw->index) { 199 case 0: 200 val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 201 writel(val, reg); 202 val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 203 writel(val, reg); 204 break; 205 case 1: 206 val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 207 writel(val, reg); 208 val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 209 writel(val, reg); 210 break; 211 default: 212 break; 213 } 214 iounmap(reg); 215 } 216 217 void mtk_wed_fe_reset(void) 218 { 219 int i; 220 221 mutex_lock(&hw_lock); 222 223 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 224 struct mtk_wed_hw *hw = hw_list[i]; 225 struct mtk_wed_device *dev = hw->wed_dev; 226 int err; 227 228 if (!dev || !dev->wlan.reset) 229 continue; 230 231 /* reset callback blocks until WLAN reset is completed */ 232 err = dev->wlan.reset(dev); 233 if (err) 234 dev_err(dev->dev, "wlan reset failed: %d\n", err); 235 } 236 237 mutex_unlock(&hw_lock); 238 } 239 240 void mtk_wed_fe_reset_complete(void) 241 { 242 int i; 243 244 mutex_lock(&hw_lock); 245 246 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 247 struct mtk_wed_hw *hw = hw_list[i]; 248 struct mtk_wed_device *dev = hw->wed_dev; 249 250 if (!dev || !dev->wlan.reset_complete) 251 continue; 252 253 dev->wlan.reset_complete(dev); 254 } 255 256 mutex_unlock(&hw_lock); 257 } 258 259 static struct mtk_wed_hw * 260 mtk_wed_assign(struct mtk_wed_device *dev) 261 { 262 struct mtk_wed_hw *hw; 263 int i; 264 265 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 266 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; 267 if (!hw) 268 return NULL; 269 270 if (!hw->wed_dev) 271 goto out; 272 273 if (hw->version == 1) 274 return NULL; 275 276 /* MT7986 WED devices do not have any pcie slot restrictions */ 277 } 278 /* MT7986 PCIE or AXI */ 279 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 280 hw = hw_list[i]; 281 if (hw && !hw->wed_dev) 282 goto out; 283 } 284 285 return NULL; 286 287 out: 288 hw->wed_dev = dev; 289 return hw; 290 } 291 292 static int 293 mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) 294 { 295 struct mtk_wdma_desc *desc; 296 dma_addr_t desc_phys; 297 void **page_list; 298 int token = dev->wlan.token_start; 299 int ring_size; 300 int n_pages; 301 int i, page_idx; 302 303 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 304 n_pages = ring_size / MTK_WED_BUF_PER_PAGE; 305 306 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 307 if (!page_list) 308 return -ENOMEM; 309 310 dev->tx_buf_ring.size = ring_size; 311 dev->tx_buf_ring.pages = page_list; 312 313 desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), 314 &desc_phys, GFP_KERNEL); 315 if (!desc) 316 return -ENOMEM; 317 318 dev->tx_buf_ring.desc = desc; 319 dev->tx_buf_ring.desc_phys = desc_phys; 320 321 for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { 322 dma_addr_t page_phys, buf_phys; 323 struct page *page; 324 void *buf; 325 int s; 326 327 page = __dev_alloc_pages(GFP_KERNEL, 0); 328 if (!page) 329 return -ENOMEM; 330 331 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 332 DMA_BIDIRECTIONAL); 333 if (dma_mapping_error(dev->hw->dev, page_phys)) { 334 __free_page(page); 335 return -ENOMEM; 336 } 337 338 page_list[page_idx++] = page; 339 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 340 DMA_BIDIRECTIONAL); 341 342 buf = page_to_virt(page); 343 buf_phys = page_phys; 344 345 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { 346 u32 txd_size; 347 u32 ctrl; 348 349 txd_size = dev->wlan.init_buf(buf, buf_phys, token++); 350 351 desc->buf0 = cpu_to_le32(buf_phys); 352 desc->buf1 = cpu_to_le32(buf_phys + txd_size); 353 354 if (dev->hw->version == 1) 355 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 356 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, 357 MTK_WED_BUF_SIZE - txd_size) | 358 MTK_WDMA_DESC_CTRL_LAST_SEG1; 359 else 360 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 361 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, 362 MTK_WED_BUF_SIZE - txd_size) | 363 MTK_WDMA_DESC_CTRL_LAST_SEG0; 364 desc->ctrl = cpu_to_le32(ctrl); 365 desc->info = 0; 366 desc++; 367 368 buf += MTK_WED_BUF_SIZE; 369 buf_phys += MTK_WED_BUF_SIZE; 370 } 371 372 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 373 DMA_BIDIRECTIONAL); 374 } 375 376 return 0; 377 } 378 379 static void 380 mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) 381 { 382 struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; 383 void **page_list = dev->tx_buf_ring.pages; 384 int page_idx; 385 int i; 386 387 if (!page_list) 388 return; 389 390 if (!desc) 391 goto free_pagelist; 392 393 for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; 394 i += MTK_WED_BUF_PER_PAGE) { 395 void *page = page_list[page_idx++]; 396 dma_addr_t buf_addr; 397 398 if (!page) 399 break; 400 401 buf_addr = le32_to_cpu(desc[i].buf0); 402 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, 403 DMA_BIDIRECTIONAL); 404 __free_page(page); 405 } 406 407 dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), 408 desc, dev->tx_buf_ring.desc_phys); 409 410 free_pagelist: 411 kfree(page_list); 412 } 413 414 static int 415 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) 416 { 417 struct mtk_rxbm_desc *desc; 418 dma_addr_t desc_phys; 419 420 dev->rx_buf_ring.size = dev->wlan.rx_nbuf; 421 desc = dma_alloc_coherent(dev->hw->dev, 422 dev->wlan.rx_nbuf * sizeof(*desc), 423 &desc_phys, GFP_KERNEL); 424 if (!desc) 425 return -ENOMEM; 426 427 dev->rx_buf_ring.desc = desc; 428 dev->rx_buf_ring.desc_phys = desc_phys; 429 dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); 430 431 return 0; 432 } 433 434 static void 435 mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) 436 { 437 struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; 438 439 if (!desc) 440 return; 441 442 dev->wlan.release_rx_buf(dev); 443 dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), 444 desc, dev->rx_buf_ring.desc_phys); 445 } 446 447 static void 448 mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) 449 { 450 wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, 451 FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); 452 wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); 453 wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | 454 FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); 455 wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, 456 FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); 457 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 458 } 459 460 static void 461 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) 462 { 463 if (!ring->desc) 464 return; 465 466 dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, 467 ring->desc, ring->desc_phys); 468 } 469 470 static void 471 mtk_wed_free_rx_rings(struct mtk_wed_device *dev) 472 { 473 mtk_wed_free_rx_buffer(dev); 474 mtk_wed_free_ring(dev, &dev->rro.ring); 475 } 476 477 static void 478 mtk_wed_free_tx_rings(struct mtk_wed_device *dev) 479 { 480 int i; 481 482 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) 483 mtk_wed_free_ring(dev, &dev->tx_ring[i]); 484 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 485 mtk_wed_free_ring(dev, &dev->rx_wdma[i]); 486 } 487 488 static void 489 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) 490 { 491 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 492 493 if (dev->hw->version == 1) 494 mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 495 else 496 mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 497 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 498 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 499 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 500 501 if (!dev->hw->num_flows) 502 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 503 504 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); 505 wed_r32(dev, MTK_WED_EXT_INT_MASK); 506 } 507 508 static void 509 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) 510 { 511 if (enable) { 512 wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 513 wed_w32(dev, MTK_WED_TXP_DW1, 514 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); 515 } else { 516 wed_w32(dev, MTK_WED_TXP_DW1, 517 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); 518 wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 519 } 520 } 521 522 #define MTK_WFMDA_RX_DMA_EN BIT(2) 523 static void 524 mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) 525 { 526 u32 val; 527 int i; 528 529 if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) 530 return; /* queue is not configured by mt76 */ 531 532 for (i = 0; i < 3; i++) { 533 u32 cur_idx; 534 535 cur_idx = wed_r32(dev, 536 MTK_WED_WPDMA_RING_RX_DATA(idx) + 537 MTK_WED_RING_OFS_CPU_IDX); 538 if (cur_idx == MTK_WED_RX_RING_SIZE - 1) 539 break; 540 541 usleep_range(100000, 200000); 542 } 543 544 if (i == 3) { 545 dev_err(dev->hw->dev, "rx dma enable failed\n"); 546 return; 547 } 548 549 val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | 550 MTK_WFMDA_RX_DMA_EN; 551 wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); 552 } 553 554 static void 555 mtk_wed_dma_disable(struct mtk_wed_device *dev) 556 { 557 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 558 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 559 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 560 561 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 562 563 wed_clr(dev, MTK_WED_GLO_CFG, 564 MTK_WED_GLO_CFG_TX_DMA_EN | 565 MTK_WED_GLO_CFG_RX_DMA_EN); 566 567 wdma_clr(dev, MTK_WDMA_GLO_CFG, 568 MTK_WDMA_GLO_CFG_TX_DMA_EN | 569 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 570 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 571 572 if (dev->hw->version == 1) { 573 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); 574 wdma_clr(dev, MTK_WDMA_GLO_CFG, 575 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 576 } else { 577 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 578 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 579 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 580 581 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 582 MTK_WED_WPDMA_RX_D_RX_DRV_EN); 583 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 584 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 585 } 586 587 mtk_wed_set_512_support(dev, false); 588 } 589 590 static void 591 mtk_wed_stop(struct mtk_wed_device *dev) 592 { 593 mtk_wed_set_ext_int(dev, false); 594 595 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); 596 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); 597 wdma_w32(dev, MTK_WDMA_INT_MASK, 0); 598 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); 599 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); 600 601 if (dev->hw->version == 1) 602 return; 603 604 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); 605 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); 606 } 607 608 static void 609 mtk_wed_deinit(struct mtk_wed_device *dev) 610 { 611 mtk_wed_stop(dev); 612 mtk_wed_dma_disable(dev); 613 614 wed_clr(dev, MTK_WED_CTRL, 615 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 616 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 617 MTK_WED_CTRL_WED_TX_BM_EN | 618 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 619 620 if (dev->hw->version == 1) 621 return; 622 623 wed_clr(dev, MTK_WED_CTRL, 624 MTK_WED_CTRL_RX_ROUTE_QM_EN | 625 MTK_WED_CTRL_WED_RX_BM_EN | 626 MTK_WED_CTRL_RX_RRO_QM_EN); 627 } 628 629 static void 630 __mtk_wed_detach(struct mtk_wed_device *dev) 631 { 632 struct mtk_wed_hw *hw = dev->hw; 633 634 mtk_wed_deinit(dev); 635 636 mtk_wdma_rx_reset(dev); 637 mtk_wed_reset(dev, MTK_WED_RESET_WED); 638 mtk_wed_free_tx_buffer(dev); 639 mtk_wed_free_tx_rings(dev); 640 641 if (mtk_wed_get_rx_capa(dev)) { 642 if (hw->wed_wo) 643 mtk_wed_wo_reset(dev); 644 mtk_wed_free_rx_rings(dev); 645 if (hw->wed_wo) 646 mtk_wed_wo_deinit(hw); 647 } 648 649 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 650 struct device_node *wlan_node; 651 652 wlan_node = dev->wlan.pci_dev->dev.of_node; 653 if (of_dma_is_coherent(wlan_node) && hw->hifsys) 654 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 655 BIT(hw->index), BIT(hw->index)); 656 } 657 658 if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) && 659 hw->eth->dma_dev != hw->eth->dev) 660 mtk_eth_set_dma_device(hw->eth, hw->eth->dev); 661 662 memset(dev, 0, sizeof(*dev)); 663 module_put(THIS_MODULE); 664 665 hw->wed_dev = NULL; 666 } 667 668 static void 669 mtk_wed_detach(struct mtk_wed_device *dev) 670 { 671 mutex_lock(&hw_lock); 672 __mtk_wed_detach(dev); 673 mutex_unlock(&hw_lock); 674 } 675 676 #define PCIE_BASE_ADDR0 0x11280000 677 static void 678 mtk_wed_bus_init(struct mtk_wed_device *dev) 679 { 680 switch (dev->wlan.bus_type) { 681 case MTK_WED_BUS_PCIE: { 682 struct device_node *np = dev->hw->eth->dev->of_node; 683 struct regmap *regs; 684 685 regs = syscon_regmap_lookup_by_phandle(np, 686 "mediatek,wed-pcie"); 687 if (IS_ERR(regs)) 688 break; 689 690 regmap_update_bits(regs, 0, BIT(0), BIT(0)); 691 692 wed_w32(dev, MTK_WED_PCIE_INT_CTRL, 693 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); 694 695 /* pcie interrupt control: pola/source selection */ 696 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 697 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 698 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); 699 wed_r32(dev, MTK_WED_PCIE_INT_CTRL); 700 701 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); 702 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); 703 704 /* pcie interrupt status trigger register */ 705 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); 706 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); 707 708 /* pola setting */ 709 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 710 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); 711 break; 712 } 713 case MTK_WED_BUS_AXI: 714 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 715 MTK_WED_WPDMA_INT_CTRL_SIG_SRC | 716 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); 717 break; 718 default: 719 break; 720 } 721 } 722 723 static void 724 mtk_wed_set_wpdma(struct mtk_wed_device *dev) 725 { 726 if (dev->hw->version == 1) { 727 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); 728 } else { 729 mtk_wed_bus_init(dev); 730 731 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); 732 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); 733 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); 734 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); 735 wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); 736 wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); 737 } 738 } 739 740 static void 741 mtk_wed_hw_init_early(struct mtk_wed_device *dev) 742 { 743 u32 mask, set; 744 745 mtk_wed_deinit(dev); 746 mtk_wed_reset(dev, MTK_WED_RESET_WED); 747 mtk_wed_set_wpdma(dev); 748 749 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | 750 MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | 751 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; 752 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | 753 MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | 754 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; 755 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); 756 757 if (dev->hw->version == 1) { 758 u32 offset = dev->hw->index ? 0x04000400 : 0; 759 760 wdma_set(dev, MTK_WDMA_GLO_CFG, 761 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 762 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | 763 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 764 765 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); 766 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); 767 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 768 MTK_PCIE_BASE(dev->hw->index)); 769 } else { 770 wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); 771 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); 772 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 773 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, 774 MTK_WDMA_INT_STATUS) | 775 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, 776 MTK_WDMA_GLO_CFG)); 777 778 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 779 FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, 780 MTK_WDMA_RING_TX(0)) | 781 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, 782 MTK_WDMA_RING_RX(0))); 783 } 784 } 785 786 static int 787 mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 788 int size) 789 { 790 ring->desc = dma_alloc_coherent(dev->hw->dev, 791 size * sizeof(*ring->desc), 792 &ring->desc_phys, GFP_KERNEL); 793 if (!ring->desc) 794 return -ENOMEM; 795 796 ring->desc_size = sizeof(*ring->desc); 797 ring->size = size; 798 799 return 0; 800 } 801 802 #define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) 803 static int 804 mtk_wed_rro_alloc(struct mtk_wed_device *dev) 805 { 806 struct reserved_mem *rmem; 807 struct device_node *np; 808 int index; 809 810 index = of_property_match_string(dev->hw->node, "memory-region-names", 811 "wo-dlm"); 812 if (index < 0) 813 return index; 814 815 np = of_parse_phandle(dev->hw->node, "memory-region", index); 816 if (!np) 817 return -ENODEV; 818 819 rmem = of_reserved_mem_lookup(np); 820 of_node_put(np); 821 822 if (!rmem) 823 return -ENODEV; 824 825 dev->rro.miod_phys = rmem->base; 826 dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; 827 828 return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, 829 MTK_WED_RRO_QUE_CNT); 830 } 831 832 static int 833 mtk_wed_rro_cfg(struct mtk_wed_device *dev) 834 { 835 struct mtk_wed_wo *wo = dev->hw->wed_wo; 836 struct { 837 struct { 838 __le32 base; 839 __le32 cnt; 840 __le32 unit; 841 } ring[2]; 842 __le32 wed; 843 u8 version; 844 } req = { 845 .ring[0] = { 846 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), 847 .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), 848 .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), 849 }, 850 .ring[1] = { 851 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + 852 MTK_WED_MIOD_COUNT), 853 .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), 854 .unit = cpu_to_le32(4), 855 }, 856 }; 857 858 return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 859 MTK_WED_WO_CMD_WED_CFG, 860 &req, sizeof(req), true); 861 } 862 863 static void 864 mtk_wed_rro_hw_init(struct mtk_wed_device *dev) 865 { 866 wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, 867 FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | 868 FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | 869 FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, 870 MTK_WED_MIOD_ENTRY_CNT >> 2)); 871 872 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); 873 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, 874 FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); 875 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); 876 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, 877 FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); 878 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); 879 wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); 880 881 wed_set(dev, MTK_WED_RROQM_RST_IDX, 882 MTK_WED_RROQM_RST_IDX_MIOD | 883 MTK_WED_RROQM_RST_IDX_FDBK); 884 885 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 886 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); 887 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 888 } 889 890 static void 891 mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) 892 { 893 wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); 894 895 for (;;) { 896 usleep_range(100, 200); 897 if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) 898 break; 899 } 900 901 /* configure RX_ROUTE_QM */ 902 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 903 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); 904 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 905 FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); 906 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 907 /* enable RX_ROUTE_QM */ 908 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 909 } 910 911 static void 912 mtk_wed_hw_init(struct mtk_wed_device *dev) 913 { 914 if (dev->init_done) 915 return; 916 917 dev->init_done = true; 918 mtk_wed_set_ext_int(dev, false); 919 wed_w32(dev, MTK_WED_TX_BM_CTRL, 920 MTK_WED_TX_BM_CTRL_PAUSE | 921 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 922 dev->tx_buf_ring.size / 128) | 923 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 924 MTK_WED_TX_RING_SIZE / 256)); 925 926 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); 927 928 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); 929 930 if (dev->hw->version == 1) { 931 wed_w32(dev, MTK_WED_TX_BM_TKID, 932 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 933 dev->wlan.token_start) | 934 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 935 dev->wlan.token_start + 936 dev->wlan.nbuf - 1)); 937 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 938 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | 939 MTK_WED_TX_BM_DYN_THR_HI); 940 } else { 941 wed_w32(dev, MTK_WED_TX_BM_TKID_V2, 942 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 943 dev->wlan.token_start) | 944 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 945 dev->wlan.token_start + 946 dev->wlan.nbuf - 1)); 947 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 948 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | 949 MTK_WED_TX_BM_DYN_THR_HI_V2); 950 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 951 MTK_WED_TX_TKID_CTRL_PAUSE | 952 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, 953 dev->tx_buf_ring.size / 128) | 954 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, 955 dev->tx_buf_ring.size / 128)); 956 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, 957 FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | 958 MTK_WED_TX_TKID_DYN_THR_HI); 959 } 960 961 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 962 963 if (dev->hw->version == 1) { 964 wed_set(dev, MTK_WED_CTRL, 965 MTK_WED_CTRL_WED_TX_BM_EN | 966 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 967 } else { 968 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); 969 /* rx hw init */ 970 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 971 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 972 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 973 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 974 975 mtk_wed_rx_buffer_hw_init(dev); 976 mtk_wed_rro_hw_init(dev); 977 mtk_wed_route_qm_hw_init(dev); 978 } 979 980 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); 981 } 982 983 static void 984 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) 985 { 986 void *head = (void *)ring->desc; 987 int i; 988 989 for (i = 0; i < size; i++) { 990 struct mtk_wdma_desc *desc; 991 992 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); 993 desc->buf0 = 0; 994 if (tx) 995 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 996 else 997 desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); 998 desc->buf1 = 0; 999 desc->info = 0; 1000 } 1001 } 1002 1003 static u32 1004 mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 1005 { 1006 return !!(wed_r32(dev, reg) & mask); 1007 } 1008 1009 static int 1010 mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) 1011 { 1012 int sleep = 15000; 1013 int timeout = 100 * sleep; 1014 u32 val; 1015 1016 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, 1017 timeout, false, dev, reg, mask); 1018 } 1019 1020 static int 1021 mtk_wed_rx_reset(struct mtk_wed_device *dev) 1022 { 1023 struct mtk_wed_wo *wo = dev->hw->wed_wo; 1024 u8 val = MTK_WED_WO_STATE_SER_RESET; 1025 int i, ret; 1026 1027 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1028 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1029 sizeof(val), true); 1030 if (ret) 1031 return ret; 1032 1033 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); 1034 ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1035 MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); 1036 if (ret) { 1037 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1038 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); 1039 } else { 1040 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 1041 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 1042 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 1043 1044 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1045 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1046 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1047 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1048 MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | 1049 MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); 1050 1051 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 1052 } 1053 1054 /* reset rro qm */ 1055 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 1056 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1057 MTK_WED_CTRL_RX_RRO_QM_BUSY); 1058 if (ret) { 1059 mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); 1060 } else { 1061 wed_set(dev, MTK_WED_RROQM_RST_IDX, 1062 MTK_WED_RROQM_RST_IDX_MIOD | 1063 MTK_WED_RROQM_RST_IDX_FDBK); 1064 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 1065 } 1066 1067 /* reset route qm */ 1068 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 1069 ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1070 MTK_WED_CTRL_RX_ROUTE_QM_BUSY); 1071 if (ret) 1072 mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); 1073 else 1074 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 1075 MTK_WED_RTQM_Q_RST); 1076 1077 /* reset tx wdma */ 1078 mtk_wdma_tx_reset(dev); 1079 1080 /* reset tx wdma drv */ 1081 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); 1082 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1083 MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); 1084 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); 1085 1086 /* reset wed rx dma */ 1087 ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1088 MTK_WED_GLO_CFG_RX_DMA_BUSY); 1089 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); 1090 if (ret) { 1091 mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); 1092 } else { 1093 struct mtk_eth *eth = dev->hw->eth; 1094 1095 if (mtk_is_netsys_v2_or_greater(eth)) 1096 wed_set(dev, MTK_WED_RESET_IDX, 1097 MTK_WED_RESET_IDX_RX_V2); 1098 else 1099 wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX); 1100 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1101 } 1102 1103 /* reset rx bm */ 1104 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 1105 mtk_wed_poll_busy(dev, MTK_WED_CTRL, 1106 MTK_WED_CTRL_WED_RX_BM_BUSY); 1107 mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); 1108 1109 /* wo change to enable state */ 1110 val = MTK_WED_WO_STATE_ENABLE; 1111 ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 1112 MTK_WED_WO_CMD_CHANGE_STATE, &val, 1113 sizeof(val), true); 1114 if (ret) 1115 return ret; 1116 1117 /* wed_rx_ring_reset */ 1118 for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { 1119 if (!dev->rx_ring[i].desc) 1120 continue; 1121 1122 mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE, 1123 false); 1124 } 1125 mtk_wed_free_rx_buffer(dev); 1126 1127 return 0; 1128 } 1129 1130 static void 1131 mtk_wed_reset_dma(struct mtk_wed_device *dev) 1132 { 1133 bool busy = false; 1134 u32 val; 1135 int i; 1136 1137 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { 1138 if (!dev->tx_ring[i].desc) 1139 continue; 1140 1141 mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, 1142 true); 1143 } 1144 1145 /* 1. reset WED tx DMA */ 1146 wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); 1147 busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, 1148 MTK_WED_GLO_CFG_TX_DMA_BUSY); 1149 if (busy) { 1150 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); 1151 } else { 1152 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX); 1153 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1154 } 1155 1156 /* 2. reset WDMA rx DMA */ 1157 busy = !!mtk_wdma_rx_reset(dev); 1158 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1159 if (!busy) 1160 busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, 1161 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); 1162 1163 if (busy) { 1164 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); 1165 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); 1166 } else { 1167 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1168 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); 1169 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); 1170 1171 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1172 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1173 1174 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1175 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1176 } 1177 1178 /* 3. reset WED WPDMA tx */ 1179 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1180 1181 for (i = 0; i < 100; i++) { 1182 val = wed_r32(dev, MTK_WED_TX_BM_INTF); 1183 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) 1184 break; 1185 } 1186 1187 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); 1188 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); 1189 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1190 1191 /* 4. reset WED WPDMA tx */ 1192 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1193 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); 1194 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1195 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1196 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1197 if (!busy) 1198 busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, 1199 MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); 1200 1201 if (busy) { 1202 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1203 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); 1204 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); 1205 } else { 1206 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 1207 MTK_WED_WPDMA_RESET_IDX_TX | 1208 MTK_WED_WPDMA_RESET_IDX_RX); 1209 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); 1210 } 1211 1212 dev->init_done = false; 1213 if (dev->hw->version == 1) 1214 return; 1215 1216 if (!busy) { 1217 wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); 1218 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1219 } 1220 1221 mtk_wed_rx_reset(dev); 1222 } 1223 1224 static int 1225 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1226 int size, u32 desc_size, bool tx) 1227 { 1228 ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, 1229 &ring->desc_phys, GFP_KERNEL); 1230 if (!ring->desc) 1231 return -ENOMEM; 1232 1233 ring->desc_size = desc_size; 1234 ring->size = size; 1235 mtk_wed_ring_reset(ring, size, tx); 1236 1237 return 0; 1238 } 1239 1240 static int 1241 mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1242 bool reset) 1243 { 1244 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 1245 struct mtk_wed_ring *wdma; 1246 1247 if (idx >= ARRAY_SIZE(dev->rx_wdma)) 1248 return -EINVAL; 1249 1250 wdma = &dev->rx_wdma[idx]; 1251 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1252 desc_size, true)) 1253 return -ENOMEM; 1254 1255 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1256 wdma->desc_phys); 1257 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1258 size); 1259 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1260 1261 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1262 wdma->desc_phys); 1263 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1264 size); 1265 1266 return 0; 1267 } 1268 1269 static int 1270 mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, 1271 bool reset) 1272 { 1273 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 1274 struct mtk_wed_ring *wdma; 1275 1276 if (idx >= ARRAY_SIZE(dev->tx_wdma)) 1277 return -EINVAL; 1278 1279 wdma = &dev->tx_wdma[idx]; 1280 if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, 1281 desc_size, true)) 1282 return -ENOMEM; 1283 1284 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1285 wdma->desc_phys); 1286 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1287 size); 1288 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1289 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); 1290 1291 if (reset) 1292 mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true); 1293 1294 if (!idx) { 1295 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, 1296 wdma->desc_phys); 1297 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, 1298 size); 1299 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, 1300 0); 1301 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, 1302 0); 1303 } 1304 1305 return 0; 1306 } 1307 1308 static void 1309 mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, 1310 u32 reason, u32 hash) 1311 { 1312 struct mtk_eth *eth = dev->hw->eth; 1313 struct ethhdr *eh; 1314 1315 if (!skb) 1316 return; 1317 1318 if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 1319 return; 1320 1321 skb_set_mac_header(skb, 0); 1322 eh = eth_hdr(skb); 1323 skb->protocol = eh->h_proto; 1324 mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); 1325 } 1326 1327 static void 1328 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) 1329 { 1330 u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); 1331 1332 /* wed control cr set */ 1333 wed_set(dev, MTK_WED_CTRL, 1334 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1335 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1336 MTK_WED_CTRL_WED_TX_BM_EN | 1337 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1338 1339 if (dev->hw->version == 1) { 1340 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, 1341 MTK_WED_PCIE_INT_TRIGGER_STATUS); 1342 1343 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 1344 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | 1345 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); 1346 1347 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); 1348 } else { 1349 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, 1350 GENMASK(1, 0)); 1351 /* initail tx interrupt trigger */ 1352 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, 1353 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | 1354 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | 1355 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | 1356 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | 1357 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, 1358 dev->wlan.tx_tbit[0]) | 1359 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, 1360 dev->wlan.tx_tbit[1])); 1361 1362 /* initail txfree interrupt trigger */ 1363 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, 1364 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | 1365 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | 1366 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, 1367 dev->wlan.txfree_tbit)); 1368 1369 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, 1370 MTK_WED_WPDMA_INT_CTRL_RX0_EN | 1371 MTK_WED_WPDMA_INT_CTRL_RX0_CLR | 1372 MTK_WED_WPDMA_INT_CTRL_RX1_EN | 1373 MTK_WED_WPDMA_INT_CTRL_RX1_CLR | 1374 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, 1375 dev->wlan.rx_tbit[0]) | 1376 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, 1377 dev->wlan.rx_tbit[1])); 1378 1379 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); 1380 wed_set(dev, MTK_WED_WDMA_INT_CTRL, 1381 FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, 1382 dev->wdma_idx)); 1383 } 1384 1385 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); 1386 1387 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); 1388 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); 1389 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 1390 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 1391 } 1392 1393 static void 1394 mtk_wed_dma_enable(struct mtk_wed_device *dev) 1395 { 1396 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); 1397 1398 wed_set(dev, MTK_WED_GLO_CFG, 1399 MTK_WED_GLO_CFG_TX_DMA_EN | 1400 MTK_WED_GLO_CFG_RX_DMA_EN); 1401 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 1402 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1403 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1404 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1405 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1406 1407 wdma_set(dev, MTK_WDMA_GLO_CFG, 1408 MTK_WDMA_GLO_CFG_TX_DMA_EN | 1409 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1410 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 1411 1412 if (dev->hw->version == 1) { 1413 wdma_set(dev, MTK_WDMA_GLO_CFG, 1414 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1415 } else { 1416 int i; 1417 1418 wed_set(dev, MTK_WED_WPDMA_CTRL, 1419 MTK_WED_WPDMA_CTRL_SDL1_FIXED); 1420 1421 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1422 MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | 1423 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 1424 1425 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 1426 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 1427 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 1428 1429 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1430 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | 1431 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); 1432 1433 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1434 MTK_WED_WPDMA_RX_D_RX_DRV_EN | 1435 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | 1436 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 1437 0x2)); 1438 1439 for (i = 0; i < MTK_WED_RX_QUEUES; i++) 1440 mtk_wed_check_wfdma_rx_fill(dev, i); 1441 } 1442 } 1443 1444 static void 1445 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) 1446 { 1447 int i; 1448 1449 if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) 1450 return; 1451 1452 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 1453 if (!dev->rx_wdma[i].desc) 1454 mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); 1455 1456 mtk_wed_hw_init(dev); 1457 mtk_wed_configure_irq(dev, irq_mask); 1458 1459 mtk_wed_set_ext_int(dev, true); 1460 1461 if (dev->hw->version == 1) { 1462 u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | 1463 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, 1464 dev->hw->index); 1465 1466 val |= BIT(0) | (BIT(1) * !!dev->hw->index); 1467 regmap_write(dev->hw->mirror, dev->hw->index * 4, val); 1468 } else { 1469 /* driver set mid ready and only once */ 1470 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 1471 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 1472 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 1473 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 1474 1475 wed_r32(dev, MTK_WED_EXT_INT_MASK1); 1476 wed_r32(dev, MTK_WED_EXT_INT_MASK2); 1477 1478 if (mtk_wed_rro_cfg(dev)) 1479 return; 1480 1481 } 1482 1483 mtk_wed_set_512_support(dev, dev->wlan.wcid_512); 1484 1485 mtk_wed_dma_enable(dev); 1486 dev->running = true; 1487 } 1488 1489 static int 1490 mtk_wed_attach(struct mtk_wed_device *dev) 1491 __releases(RCU) 1492 { 1493 struct mtk_wed_hw *hw; 1494 struct device *device; 1495 int ret = 0; 1496 1497 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 1498 "mtk_wed_attach without holding the RCU read lock"); 1499 1500 if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && 1501 pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || 1502 !try_module_get(THIS_MODULE)) 1503 ret = -ENODEV; 1504 1505 rcu_read_unlock(); 1506 1507 if (ret) 1508 return ret; 1509 1510 mutex_lock(&hw_lock); 1511 1512 hw = mtk_wed_assign(dev); 1513 if (!hw) { 1514 module_put(THIS_MODULE); 1515 ret = -ENODEV; 1516 goto unlock; 1517 } 1518 1519 device = dev->wlan.bus_type == MTK_WED_BUS_PCIE 1520 ? &dev->wlan.pci_dev->dev 1521 : &dev->wlan.platform_dev->dev; 1522 dev_info(device, "attaching wed device %d version %d\n", 1523 hw->index, hw->version); 1524 1525 dev->hw = hw; 1526 dev->dev = hw->dev; 1527 dev->irq = hw->irq; 1528 dev->wdma_idx = hw->index; 1529 dev->version = hw->version; 1530 1531 if (hw->eth->dma_dev == hw->eth->dev && 1532 of_dma_is_coherent(hw->eth->dev->of_node)) 1533 mtk_eth_set_dma_device(hw->eth, hw->dev); 1534 1535 ret = mtk_wed_tx_buffer_alloc(dev); 1536 if (ret) 1537 goto out; 1538 1539 if (mtk_wed_get_rx_capa(dev)) { 1540 ret = mtk_wed_rro_alloc(dev); 1541 if (ret) 1542 goto out; 1543 } 1544 1545 mtk_wed_hw_init_early(dev); 1546 if (hw->version == 1) { 1547 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 1548 BIT(hw->index), 0); 1549 } else { 1550 dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); 1551 ret = mtk_wed_wo_init(hw); 1552 } 1553 out: 1554 if (ret) { 1555 dev_err(dev->hw->dev, "failed to attach wed device\n"); 1556 __mtk_wed_detach(dev); 1557 } 1558 unlock: 1559 mutex_unlock(&hw_lock); 1560 1561 return ret; 1562 } 1563 1564 static int 1565 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 1566 bool reset) 1567 { 1568 struct mtk_wed_ring *ring = &dev->tx_ring[idx]; 1569 1570 /* 1571 * Tx ring redirection: 1572 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN 1573 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) 1574 * registers. 1575 * 1576 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it 1577 * into MTK_WED_WPDMA_RING_TX(n) registers. 1578 * It gets filled with packets picked up from WED TX ring and from 1579 * WDMA RX. 1580 */ 1581 1582 if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) 1583 return -EINVAL; 1584 1585 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1586 sizeof(*ring->desc), true)) 1587 return -ENOMEM; 1588 1589 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 1590 reset)) 1591 return -ENOMEM; 1592 1593 ring->reg_base = MTK_WED_RING_TX(idx); 1594 ring->wpdma = regs; 1595 1596 /* WED -> WPDMA */ 1597 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 1598 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); 1599 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); 1600 1601 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1602 ring->desc_phys); 1603 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1604 MTK_WED_TX_RING_SIZE); 1605 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1606 1607 return 0; 1608 } 1609 1610 static int 1611 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 1612 { 1613 struct mtk_wed_ring *ring = &dev->txfree_ring; 1614 int i, index = dev->hw->version == 1; 1615 1616 /* 1617 * For txfree event handling, the same DMA ring is shared between WED 1618 * and WLAN. The WLAN driver accesses the ring index registers through 1619 * WED 1620 */ 1621 ring->reg_base = MTK_WED_RING_RX(index); 1622 ring->wpdma = regs; 1623 1624 for (i = 0; i < 12; i += 4) { 1625 u32 val = readl(regs + i); 1626 1627 wed_w32(dev, MTK_WED_RING_RX(index) + i, val); 1628 wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); 1629 } 1630 1631 return 0; 1632 } 1633 1634 static int 1635 mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, 1636 bool reset) 1637 { 1638 struct mtk_wed_ring *ring = &dev->rx_ring[idx]; 1639 1640 if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) 1641 return -EINVAL; 1642 1643 if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1644 sizeof(*ring->desc), false)) 1645 return -ENOMEM; 1646 1647 if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, 1648 reset)) 1649 return -ENOMEM; 1650 1651 ring->reg_base = MTK_WED_RING_RX_DATA(idx); 1652 ring->wpdma = regs; 1653 ring->flags |= MTK_WED_RING_CONFIGURED; 1654 1655 /* WPDMA -> WED */ 1656 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 1657 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); 1658 1659 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, 1660 ring->desc_phys); 1661 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, 1662 MTK_WED_RX_RING_SIZE); 1663 1664 return 0; 1665 } 1666 1667 static u32 1668 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) 1669 { 1670 u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 1671 1672 if (dev->hw->version == 1) 1673 ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 1674 else 1675 ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 1676 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 1677 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 1678 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 1679 1680 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); 1681 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); 1682 val &= ext_mask; 1683 if (!dev->hw->num_flows) 1684 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 1685 if (val && net_ratelimit()) 1686 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); 1687 1688 val = wed_r32(dev, MTK_WED_INT_STATUS); 1689 val &= mask; 1690 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ 1691 1692 return val; 1693 } 1694 1695 static void 1696 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) 1697 { 1698 if (!dev->running) 1699 return; 1700 1701 mtk_wed_set_ext_int(dev, !!mask); 1702 wed_w32(dev, MTK_WED_INT_MASK, mask); 1703 } 1704 1705 int mtk_wed_flow_add(int index) 1706 { 1707 struct mtk_wed_hw *hw = hw_list[index]; 1708 int ret; 1709 1710 if (!hw || !hw->wed_dev) 1711 return -ENODEV; 1712 1713 if (hw->num_flows) { 1714 hw->num_flows++; 1715 return 0; 1716 } 1717 1718 mutex_lock(&hw_lock); 1719 if (!hw->wed_dev) { 1720 ret = -ENODEV; 1721 goto out; 1722 } 1723 1724 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); 1725 if (!ret) 1726 hw->num_flows++; 1727 mtk_wed_set_ext_int(hw->wed_dev, true); 1728 1729 out: 1730 mutex_unlock(&hw_lock); 1731 1732 return ret; 1733 } 1734 1735 void mtk_wed_flow_remove(int index) 1736 { 1737 struct mtk_wed_hw *hw = hw_list[index]; 1738 1739 if (!hw) 1740 return; 1741 1742 if (--hw->num_flows) 1743 return; 1744 1745 mutex_lock(&hw_lock); 1746 if (!hw->wed_dev) 1747 goto out; 1748 1749 hw->wed_dev->wlan.offload_disable(hw->wed_dev); 1750 mtk_wed_set_ext_int(hw->wed_dev, true); 1751 1752 out: 1753 mutex_unlock(&hw_lock); 1754 } 1755 1756 static int 1757 mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) 1758 { 1759 struct mtk_wed_flow_block_priv *priv = cb_priv; 1760 struct flow_cls_offload *cls = type_data; 1761 struct mtk_wed_hw *hw = priv->hw; 1762 1763 if (!tc_can_offload(priv->dev)) 1764 return -EOPNOTSUPP; 1765 1766 if (type != TC_SETUP_CLSFLOWER) 1767 return -EOPNOTSUPP; 1768 1769 return mtk_flow_offload_cmd(hw->eth, cls, hw->index); 1770 } 1771 1772 static int 1773 mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev, 1774 struct flow_block_offload *f) 1775 { 1776 struct mtk_wed_flow_block_priv *priv; 1777 static LIST_HEAD(block_cb_list); 1778 struct flow_block_cb *block_cb; 1779 struct mtk_eth *eth = hw->eth; 1780 flow_setup_cb_t *cb; 1781 1782 if (!eth->soc->offload_version) 1783 return -EOPNOTSUPP; 1784 1785 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 1786 return -EOPNOTSUPP; 1787 1788 cb = mtk_wed_setup_tc_block_cb; 1789 f->driver_block_list = &block_cb_list; 1790 1791 switch (f->command) { 1792 case FLOW_BLOCK_BIND: 1793 block_cb = flow_block_cb_lookup(f->block, cb, dev); 1794 if (block_cb) { 1795 flow_block_cb_incref(block_cb); 1796 return 0; 1797 } 1798 1799 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1800 if (!priv) 1801 return -ENOMEM; 1802 1803 priv->hw = hw; 1804 priv->dev = dev; 1805 block_cb = flow_block_cb_alloc(cb, dev, priv, NULL); 1806 if (IS_ERR(block_cb)) { 1807 kfree(priv); 1808 return PTR_ERR(block_cb); 1809 } 1810 1811 flow_block_cb_incref(block_cb); 1812 flow_block_cb_add(block_cb, f); 1813 list_add_tail(&block_cb->driver_list, &block_cb_list); 1814 return 0; 1815 case FLOW_BLOCK_UNBIND: 1816 block_cb = flow_block_cb_lookup(f->block, cb, dev); 1817 if (!block_cb) 1818 return -ENOENT; 1819 1820 if (!flow_block_cb_decref(block_cb)) { 1821 flow_block_cb_remove(block_cb, f); 1822 list_del(&block_cb->driver_list); 1823 kfree(block_cb->cb_priv); 1824 } 1825 return 0; 1826 default: 1827 return -EOPNOTSUPP; 1828 } 1829 } 1830 1831 static int 1832 mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, 1833 enum tc_setup_type type, void *type_data) 1834 { 1835 struct mtk_wed_hw *hw = wed->hw; 1836 1837 if (hw->version < 2) 1838 return -EOPNOTSUPP; 1839 1840 switch (type) { 1841 case TC_SETUP_BLOCK: 1842 case TC_SETUP_FT: 1843 return mtk_wed_setup_tc_block(hw, dev, type_data); 1844 default: 1845 return -EOPNOTSUPP; 1846 } 1847 } 1848 1849 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, 1850 void __iomem *wdma, phys_addr_t wdma_phy, 1851 int index) 1852 { 1853 static const struct mtk_wed_ops wed_ops = { 1854 .attach = mtk_wed_attach, 1855 .tx_ring_setup = mtk_wed_tx_ring_setup, 1856 .rx_ring_setup = mtk_wed_rx_ring_setup, 1857 .txfree_ring_setup = mtk_wed_txfree_ring_setup, 1858 .msg_update = mtk_wed_mcu_msg_update, 1859 .start = mtk_wed_start, 1860 .stop = mtk_wed_stop, 1861 .reset_dma = mtk_wed_reset_dma, 1862 .reg_read = wed_r32, 1863 .reg_write = wed_w32, 1864 .irq_get = mtk_wed_irq_get, 1865 .irq_set_mask = mtk_wed_irq_set_mask, 1866 .detach = mtk_wed_detach, 1867 .ppe_check = mtk_wed_ppe_check, 1868 .setup_tc = mtk_wed_setup_tc, 1869 }; 1870 struct device_node *eth_np = eth->dev->of_node; 1871 struct platform_device *pdev; 1872 struct mtk_wed_hw *hw; 1873 struct regmap *regs; 1874 int irq; 1875 1876 if (!np) 1877 return; 1878 1879 pdev = of_find_device_by_node(np); 1880 if (!pdev) 1881 goto err_of_node_put; 1882 1883 get_device(&pdev->dev); 1884 irq = platform_get_irq(pdev, 0); 1885 if (irq < 0) 1886 goto err_put_device; 1887 1888 regs = syscon_regmap_lookup_by_phandle(np, NULL); 1889 if (IS_ERR(regs)) 1890 goto err_put_device; 1891 1892 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); 1893 1894 mutex_lock(&hw_lock); 1895 1896 if (WARN_ON(hw_list[index])) 1897 goto unlock; 1898 1899 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 1900 if (!hw) 1901 goto unlock; 1902 1903 hw->node = np; 1904 hw->regs = regs; 1905 hw->eth = eth; 1906 hw->dev = &pdev->dev; 1907 hw->wdma_phy = wdma_phy; 1908 hw->wdma = wdma; 1909 hw->index = index; 1910 hw->irq = irq; 1911 hw->version = mtk_is_netsys_v1(eth) ? 1 : 2; 1912 1913 if (hw->version == 1) { 1914 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, 1915 "mediatek,pcie-mirror"); 1916 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, 1917 "mediatek,hifsys"); 1918 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { 1919 kfree(hw); 1920 goto unlock; 1921 } 1922 1923 if (!index) { 1924 regmap_write(hw->mirror, 0, 0); 1925 regmap_write(hw->mirror, 4, 0); 1926 } 1927 } 1928 1929 mtk_wed_hw_add_debugfs(hw); 1930 1931 hw_list[index] = hw; 1932 1933 mutex_unlock(&hw_lock); 1934 1935 return; 1936 1937 unlock: 1938 mutex_unlock(&hw_lock); 1939 err_put_device: 1940 put_device(&pdev->dev); 1941 err_of_node_put: 1942 of_node_put(np); 1943 } 1944 1945 void mtk_wed_exit(void) 1946 { 1947 int i; 1948 1949 rcu_assign_pointer(mtk_soc_wed_ops, NULL); 1950 1951 synchronize_rcu(); 1952 1953 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 1954 struct mtk_wed_hw *hw; 1955 1956 hw = hw_list[i]; 1957 if (!hw) 1958 continue; 1959 1960 hw_list[i] = NULL; 1961 debugfs_remove(hw->debugfs_dir); 1962 put_device(hw->dev); 1963 of_node_put(hw->node); 1964 kfree(hw); 1965 } 1966 } 1967