1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ 3 4 #include <linux/kernel.h> 5 #include <linux/slab.h> 6 #include <linux/module.h> 7 #include <linux/bitfield.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/skbuff.h> 10 #include <linux/of_platform.h> 11 #include <linux/of_address.h> 12 #include <linux/of_reserved_mem.h> 13 #include <linux/mfd/syscon.h> 14 #include <linux/debugfs.h> 15 #include <linux/soc/mediatek/mtk_wed.h> 16 #include "mtk_eth_soc.h" 17 #include "mtk_wed_regs.h" 18 #include "mtk_wed.h" 19 #include "mtk_ppe.h" 20 #include "mtk_wed_wo.h" 21 22 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) 23 24 #define MTK_WED_PKT_SIZE 1900 25 #define MTK_WED_BUF_SIZE 2048 26 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) 27 #define MTK_WED_RX_RING_SIZE 1536 28 29 #define MTK_WED_TX_RING_SIZE 2048 30 #define MTK_WED_WDMA_RING_SIZE 1024 31 #define MTK_WED_MAX_GROUP_SIZE 0x100 32 #define MTK_WED_VLD_GROUP_SIZE 0x40 33 #define MTK_WED_PER_GROUP_PKT 128 34 35 #define MTK_WED_FBUF_SIZE 128 36 #define MTK_WED_MIOD_CNT 16 37 #define MTK_WED_FB_CMD_CNT 1024 38 #define MTK_WED_RRO_QUE_CNT 8192 39 #define MTK_WED_MIOD_ENTRY_CNT 128 40 41 static struct mtk_wed_hw *hw_list[2]; 42 static DEFINE_MUTEX(hw_lock); 43 44 static void 45 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 46 { 47 regmap_update_bits(dev->hw->regs, reg, mask | val, val); 48 } 49 50 static void 51 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 52 { 53 return wed_m32(dev, reg, 0, mask); 54 } 55 56 static void 57 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 58 { 59 return wed_m32(dev, reg, mask, 0); 60 } 61 62 static void 63 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 64 { 65 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); 66 } 67 68 static void 69 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 70 { 71 wdma_m32(dev, reg, 0, mask); 72 } 73 74 static void 75 wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 76 { 77 wdma_m32(dev, reg, mask, 0); 78 } 79 80 static u32 81 wifi_r32(struct mtk_wed_device *dev, u32 reg) 82 { 83 return readl(dev->wlan.base + reg); 84 } 85 86 static void 87 wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) 88 { 89 writel(val, dev->wlan.base + reg); 90 } 91 92 static u32 93 mtk_wed_read_reset(struct mtk_wed_device *dev) 94 { 95 return wed_r32(dev, MTK_WED_RESET); 96 } 97 98 static u32 99 mtk_wdma_read_reset(struct mtk_wed_device *dev) 100 { 101 return wdma_r32(dev, MTK_WDMA_GLO_CFG); 102 } 103 104 static void 105 mtk_wdma_rx_reset(struct mtk_wed_device *dev) 106 { 107 u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; 108 int i; 109 110 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); 111 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, 112 !(status & mask), 0, 1000)) 113 dev_err(dev->hw->dev, "rx reset failed\n"); 114 115 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { 116 if (dev->rx_wdma[i].desc) 117 continue; 118 119 wdma_w32(dev, 120 MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 121 } 122 } 123 124 static void 125 mtk_wdma_tx_reset(struct mtk_wed_device *dev) 126 { 127 u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; 128 int i; 129 130 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 131 if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, 132 !(status & mask), 0, 1000)) 133 dev_err(dev->hw->dev, "tx reset failed\n"); 134 135 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) { 136 if (dev->tx_wdma[i].desc) 137 continue; 138 139 wdma_w32(dev, 140 MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); 141 } 142 } 143 144 static void 145 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) 146 { 147 u32 status; 148 149 wed_w32(dev, MTK_WED_RESET, mask); 150 if (readx_poll_timeout(mtk_wed_read_reset, dev, status, 151 !(status & mask), 0, 1000)) 152 WARN_ON_ONCE(1); 153 } 154 155 static u32 156 mtk_wed_wo_read_status(struct mtk_wed_device *dev) 157 { 158 return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); 159 } 160 161 static void 162 mtk_wed_wo_reset(struct mtk_wed_device *dev) 163 { 164 struct mtk_wed_wo *wo = dev->hw->wed_wo; 165 u8 state = MTK_WED_WO_STATE_DISABLE; 166 void __iomem *reg; 167 u32 val; 168 169 mtk_wdma_tx_reset(dev); 170 mtk_wed_reset(dev, MTK_WED_RESET_WED); 171 172 mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 173 MTK_WED_WO_CMD_CHANGE_STATE, &state, 174 sizeof(state), false); 175 176 if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, 177 val == MTK_WED_WOIF_DISABLE_DONE, 178 100, MTK_WOCPU_TIMEOUT)) 179 dev_err(dev->hw->dev, "failed to disable wed-wo\n"); 180 181 reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); 182 183 val = readl(reg); 184 switch (dev->hw->index) { 185 case 0: 186 val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 187 writel(val, reg); 188 val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; 189 writel(val, reg); 190 break; 191 case 1: 192 val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 193 writel(val, reg); 194 val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; 195 writel(val, reg); 196 break; 197 default: 198 break; 199 } 200 iounmap(reg); 201 } 202 203 static struct mtk_wed_hw * 204 mtk_wed_assign(struct mtk_wed_device *dev) 205 { 206 struct mtk_wed_hw *hw; 207 int i; 208 209 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 210 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; 211 if (!hw) 212 return NULL; 213 214 if (!hw->wed_dev) 215 goto out; 216 217 if (hw->version == 1) 218 return NULL; 219 220 /* MT7986 WED devices do not have any pcie slot restrictions */ 221 } 222 /* MT7986 PCIE or AXI */ 223 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 224 hw = hw_list[i]; 225 if (hw && !hw->wed_dev) 226 goto out; 227 } 228 229 return NULL; 230 231 out: 232 hw->wed_dev = dev; 233 return hw; 234 } 235 236 static int 237 mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) 238 { 239 struct mtk_wdma_desc *desc; 240 dma_addr_t desc_phys; 241 void **page_list; 242 int token = dev->wlan.token_start; 243 int ring_size; 244 int n_pages; 245 int i, page_idx; 246 247 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 248 n_pages = ring_size / MTK_WED_BUF_PER_PAGE; 249 250 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 251 if (!page_list) 252 return -ENOMEM; 253 254 dev->tx_buf_ring.size = ring_size; 255 dev->tx_buf_ring.pages = page_list; 256 257 desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), 258 &desc_phys, GFP_KERNEL); 259 if (!desc) 260 return -ENOMEM; 261 262 dev->tx_buf_ring.desc = desc; 263 dev->tx_buf_ring.desc_phys = desc_phys; 264 265 for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { 266 dma_addr_t page_phys, buf_phys; 267 struct page *page; 268 void *buf; 269 int s; 270 271 page = __dev_alloc_pages(GFP_KERNEL, 0); 272 if (!page) 273 return -ENOMEM; 274 275 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 276 DMA_BIDIRECTIONAL); 277 if (dma_mapping_error(dev->hw->dev, page_phys)) { 278 __free_page(page); 279 return -ENOMEM; 280 } 281 282 page_list[page_idx++] = page; 283 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 284 DMA_BIDIRECTIONAL); 285 286 buf = page_to_virt(page); 287 buf_phys = page_phys; 288 289 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { 290 u32 txd_size; 291 u32 ctrl; 292 293 txd_size = dev->wlan.init_buf(buf, buf_phys, token++); 294 295 desc->buf0 = cpu_to_le32(buf_phys); 296 desc->buf1 = cpu_to_le32(buf_phys + txd_size); 297 298 if (dev->hw->version == 1) 299 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 300 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, 301 MTK_WED_BUF_SIZE - txd_size) | 302 MTK_WDMA_DESC_CTRL_LAST_SEG1; 303 else 304 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 305 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, 306 MTK_WED_BUF_SIZE - txd_size) | 307 MTK_WDMA_DESC_CTRL_LAST_SEG0; 308 desc->ctrl = cpu_to_le32(ctrl); 309 desc->info = 0; 310 desc++; 311 312 buf += MTK_WED_BUF_SIZE; 313 buf_phys += MTK_WED_BUF_SIZE; 314 } 315 316 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 317 DMA_BIDIRECTIONAL); 318 } 319 320 return 0; 321 } 322 323 static void 324 mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) 325 { 326 struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc; 327 void **page_list = dev->tx_buf_ring.pages; 328 int page_idx; 329 int i; 330 331 if (!page_list) 332 return; 333 334 if (!desc) 335 goto free_pagelist; 336 337 for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size; 338 i += MTK_WED_BUF_PER_PAGE) { 339 void *page = page_list[page_idx++]; 340 dma_addr_t buf_addr; 341 342 if (!page) 343 break; 344 345 buf_addr = le32_to_cpu(desc[i].buf0); 346 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, 347 DMA_BIDIRECTIONAL); 348 __free_page(page); 349 } 350 351 dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc), 352 desc, dev->tx_buf_ring.desc_phys); 353 354 free_pagelist: 355 kfree(page_list); 356 } 357 358 static int 359 mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) 360 { 361 struct mtk_rxbm_desc *desc; 362 dma_addr_t desc_phys; 363 364 dev->rx_buf_ring.size = dev->wlan.rx_nbuf; 365 desc = dma_alloc_coherent(dev->hw->dev, 366 dev->wlan.rx_nbuf * sizeof(*desc), 367 &desc_phys, GFP_KERNEL); 368 if (!desc) 369 return -ENOMEM; 370 371 dev->rx_buf_ring.desc = desc; 372 dev->rx_buf_ring.desc_phys = desc_phys; 373 dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); 374 375 return 0; 376 } 377 378 static void 379 mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) 380 { 381 struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc; 382 383 if (!desc) 384 return; 385 386 dev->wlan.release_rx_buf(dev); 387 dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), 388 desc, dev->rx_buf_ring.desc_phys); 389 } 390 391 static void 392 mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) 393 { 394 wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, 395 FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); 396 wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); 397 wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | 398 FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); 399 wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, 400 FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); 401 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 402 } 403 404 static void 405 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) 406 { 407 if (!ring->desc) 408 return; 409 410 dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, 411 ring->desc, ring->desc_phys); 412 } 413 414 static void 415 mtk_wed_free_rx_rings(struct mtk_wed_device *dev) 416 { 417 mtk_wed_free_rx_buffer(dev); 418 mtk_wed_free_ring(dev, &dev->rro.ring); 419 } 420 421 static void 422 mtk_wed_free_tx_rings(struct mtk_wed_device *dev) 423 { 424 int i; 425 426 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) 427 mtk_wed_free_ring(dev, &dev->tx_ring[i]); 428 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 429 mtk_wed_free_ring(dev, &dev->rx_wdma[i]); 430 } 431 432 static void 433 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) 434 { 435 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 436 437 if (dev->hw->version == 1) 438 mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 439 else 440 mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 441 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 442 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 443 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 444 445 if (!dev->hw->num_flows) 446 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 447 448 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); 449 wed_r32(dev, MTK_WED_EXT_INT_MASK); 450 } 451 452 static void 453 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) 454 { 455 if (enable) { 456 wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 457 wed_w32(dev, MTK_WED_TXP_DW1, 458 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); 459 } else { 460 wed_w32(dev, MTK_WED_TXP_DW1, 461 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); 462 wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 463 } 464 } 465 466 #define MTK_WFMDA_RX_DMA_EN BIT(2) 467 static void 468 mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx) 469 { 470 u32 val; 471 int i; 472 473 if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED)) 474 return; /* queue is not configured by mt76 */ 475 476 for (i = 0; i < 3; i++) { 477 u32 cur_idx; 478 479 cur_idx = wed_r32(dev, 480 MTK_WED_WPDMA_RING_RX_DATA(idx) + 481 MTK_WED_RING_OFS_CPU_IDX); 482 if (cur_idx == MTK_WED_RX_RING_SIZE - 1) 483 break; 484 485 usleep_range(100000, 200000); 486 } 487 488 if (i == 3) { 489 dev_err(dev->hw->dev, "rx dma enable failed\n"); 490 return; 491 } 492 493 val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | 494 MTK_WFMDA_RX_DMA_EN; 495 wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); 496 } 497 498 static void 499 mtk_wed_dma_disable(struct mtk_wed_device *dev) 500 { 501 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 502 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 503 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 504 505 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 506 507 wed_clr(dev, MTK_WED_GLO_CFG, 508 MTK_WED_GLO_CFG_TX_DMA_EN | 509 MTK_WED_GLO_CFG_RX_DMA_EN); 510 511 wdma_clr(dev, MTK_WDMA_GLO_CFG, 512 MTK_WDMA_GLO_CFG_TX_DMA_EN | 513 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 514 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 515 516 if (dev->hw->version == 1) { 517 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); 518 wdma_clr(dev, MTK_WDMA_GLO_CFG, 519 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 520 } else { 521 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 522 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 523 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 524 525 wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 526 MTK_WED_WPDMA_RX_D_RX_DRV_EN); 527 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 528 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 529 530 mtk_wed_set_512_support(dev, false); 531 } 532 } 533 534 static void 535 mtk_wed_stop(struct mtk_wed_device *dev) 536 { 537 mtk_wed_dma_disable(dev); 538 mtk_wed_set_ext_int(dev, false); 539 540 wed_clr(dev, MTK_WED_CTRL, 541 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 542 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 543 MTK_WED_CTRL_WED_TX_BM_EN | 544 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 545 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); 546 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); 547 wdma_w32(dev, MTK_WDMA_INT_MASK, 0); 548 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); 549 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); 550 551 if (dev->hw->version == 1) 552 return; 553 554 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); 555 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); 556 wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); 557 } 558 559 static void 560 mtk_wed_detach(struct mtk_wed_device *dev) 561 { 562 struct mtk_wed_hw *hw = dev->hw; 563 564 mutex_lock(&hw_lock); 565 566 mtk_wed_stop(dev); 567 568 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 569 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 570 571 mtk_wed_reset(dev, MTK_WED_RESET_WED); 572 if (mtk_wed_get_rx_capa(dev)) { 573 wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); 574 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); 575 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 576 } 577 578 mtk_wed_free_tx_buffer(dev); 579 mtk_wed_free_tx_rings(dev); 580 581 if (mtk_wed_get_rx_capa(dev)) { 582 mtk_wed_wo_reset(dev); 583 mtk_wed_free_rx_rings(dev); 584 mtk_wed_wo_deinit(hw); 585 mtk_wdma_rx_reset(dev); 586 } 587 588 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 589 struct device_node *wlan_node; 590 591 wlan_node = dev->wlan.pci_dev->dev.of_node; 592 if (of_dma_is_coherent(wlan_node) && hw->hifsys) 593 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 594 BIT(hw->index), BIT(hw->index)); 595 } 596 597 if (!hw_list[!hw->index]->wed_dev && 598 hw->eth->dma_dev != hw->eth->dev) 599 mtk_eth_set_dma_device(hw->eth, hw->eth->dev); 600 601 memset(dev, 0, sizeof(*dev)); 602 module_put(THIS_MODULE); 603 604 hw->wed_dev = NULL; 605 mutex_unlock(&hw_lock); 606 } 607 608 #define PCIE_BASE_ADDR0 0x11280000 609 static void 610 mtk_wed_bus_init(struct mtk_wed_device *dev) 611 { 612 switch (dev->wlan.bus_type) { 613 case MTK_WED_BUS_PCIE: { 614 struct device_node *np = dev->hw->eth->dev->of_node; 615 struct regmap *regs; 616 617 regs = syscon_regmap_lookup_by_phandle(np, 618 "mediatek,wed-pcie"); 619 if (IS_ERR(regs)) 620 break; 621 622 regmap_update_bits(regs, 0, BIT(0), BIT(0)); 623 624 wed_w32(dev, MTK_WED_PCIE_INT_CTRL, 625 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); 626 627 /* pcie interrupt control: pola/source selection */ 628 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 629 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 630 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); 631 wed_r32(dev, MTK_WED_PCIE_INT_CTRL); 632 633 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); 634 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); 635 636 /* pcie interrupt status trigger register */ 637 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); 638 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); 639 640 /* pola setting */ 641 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 642 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); 643 break; 644 } 645 case MTK_WED_BUS_AXI: 646 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 647 MTK_WED_WPDMA_INT_CTRL_SIG_SRC | 648 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); 649 break; 650 default: 651 break; 652 } 653 } 654 655 static void 656 mtk_wed_set_wpdma(struct mtk_wed_device *dev) 657 { 658 if (dev->hw->version == 1) { 659 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); 660 } else { 661 mtk_wed_bus_init(dev); 662 663 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); 664 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); 665 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); 666 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); 667 wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); 668 wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx); 669 } 670 } 671 672 static void 673 mtk_wed_hw_init_early(struct mtk_wed_device *dev) 674 { 675 u32 mask, set; 676 677 mtk_wed_stop(dev); 678 mtk_wed_reset(dev, MTK_WED_RESET_WED); 679 mtk_wed_set_wpdma(dev); 680 681 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | 682 MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | 683 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; 684 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | 685 MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | 686 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; 687 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); 688 689 if (dev->hw->version == 1) { 690 u32 offset = dev->hw->index ? 0x04000400 : 0; 691 692 wdma_set(dev, MTK_WDMA_GLO_CFG, 693 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 694 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | 695 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 696 697 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); 698 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); 699 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 700 MTK_PCIE_BASE(dev->hw->index)); 701 } else { 702 wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); 703 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); 704 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 705 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, 706 MTK_WDMA_INT_STATUS) | 707 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, 708 MTK_WDMA_GLO_CFG)); 709 710 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 711 FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, 712 MTK_WDMA_RING_TX(0)) | 713 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, 714 MTK_WDMA_RING_RX(0))); 715 } 716 } 717 718 static int 719 mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 720 int size) 721 { 722 ring->desc = dma_alloc_coherent(dev->hw->dev, 723 size * sizeof(*ring->desc), 724 &ring->desc_phys, GFP_KERNEL); 725 if (!ring->desc) 726 return -ENOMEM; 727 728 ring->desc_size = sizeof(*ring->desc); 729 ring->size = size; 730 memset(ring->desc, 0, size); 731 732 return 0; 733 } 734 735 #define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) 736 static int 737 mtk_wed_rro_alloc(struct mtk_wed_device *dev) 738 { 739 struct reserved_mem *rmem; 740 struct device_node *np; 741 int index; 742 743 index = of_property_match_string(dev->hw->node, "memory-region-names", 744 "wo-dlm"); 745 if (index < 0) 746 return index; 747 748 np = of_parse_phandle(dev->hw->node, "memory-region", index); 749 if (!np) 750 return -ENODEV; 751 752 rmem = of_reserved_mem_lookup(np); 753 of_node_put(np); 754 755 if (!rmem) 756 return -ENODEV; 757 758 dev->rro.miod_phys = rmem->base; 759 dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; 760 761 return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, 762 MTK_WED_RRO_QUE_CNT); 763 } 764 765 static int 766 mtk_wed_rro_cfg(struct mtk_wed_device *dev) 767 { 768 struct mtk_wed_wo *wo = dev->hw->wed_wo; 769 struct { 770 struct { 771 __le32 base; 772 __le32 cnt; 773 __le32 unit; 774 } ring[2]; 775 __le32 wed; 776 u8 version; 777 } req = { 778 .ring[0] = { 779 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), 780 .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), 781 .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), 782 }, 783 .ring[1] = { 784 .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + 785 MTK_WED_MIOD_COUNT), 786 .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), 787 .unit = cpu_to_le32(4), 788 }, 789 }; 790 791 return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, 792 MTK_WED_WO_CMD_WED_CFG, 793 &req, sizeof(req), true); 794 } 795 796 static void 797 mtk_wed_rro_hw_init(struct mtk_wed_device *dev) 798 { 799 wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, 800 FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | 801 FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | 802 FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, 803 MTK_WED_MIOD_ENTRY_CNT >> 2)); 804 805 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); 806 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, 807 FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); 808 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); 809 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, 810 FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); 811 wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); 812 wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); 813 814 wed_set(dev, MTK_WED_RROQM_RST_IDX, 815 MTK_WED_RROQM_RST_IDX_MIOD | 816 MTK_WED_RROQM_RST_IDX_FDBK); 817 818 wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); 819 wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); 820 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); 821 } 822 823 static void 824 mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) 825 { 826 wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); 827 828 for (;;) { 829 usleep_range(100, 200); 830 if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) 831 break; 832 } 833 834 /* configure RX_ROUTE_QM */ 835 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 836 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); 837 wed_set(dev, MTK_WED_RTQM_GLO_CFG, 838 FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); 839 wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); 840 /* enable RX_ROUTE_QM */ 841 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); 842 } 843 844 static void 845 mtk_wed_hw_init(struct mtk_wed_device *dev) 846 { 847 if (dev->init_done) 848 return; 849 850 dev->init_done = true; 851 mtk_wed_set_ext_int(dev, false); 852 wed_w32(dev, MTK_WED_TX_BM_CTRL, 853 MTK_WED_TX_BM_CTRL_PAUSE | 854 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 855 dev->tx_buf_ring.size / 128) | 856 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 857 MTK_WED_TX_RING_SIZE / 256)); 858 859 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); 860 861 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); 862 863 if (dev->hw->version == 1) { 864 wed_w32(dev, MTK_WED_TX_BM_TKID, 865 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 866 dev->wlan.token_start) | 867 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 868 dev->wlan.token_start + 869 dev->wlan.nbuf - 1)); 870 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 871 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | 872 MTK_WED_TX_BM_DYN_THR_HI); 873 } else { 874 wed_w32(dev, MTK_WED_TX_BM_TKID_V2, 875 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 876 dev->wlan.token_start) | 877 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 878 dev->wlan.token_start + 879 dev->wlan.nbuf - 1)); 880 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 881 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | 882 MTK_WED_TX_BM_DYN_THR_HI_V2); 883 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 884 MTK_WED_TX_TKID_CTRL_PAUSE | 885 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, 886 dev->tx_buf_ring.size / 128) | 887 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, 888 dev->tx_buf_ring.size / 128)); 889 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, 890 FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | 891 MTK_WED_TX_TKID_DYN_THR_HI); 892 } 893 894 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 895 896 if (dev->hw->version == 1) { 897 wed_set(dev, MTK_WED_CTRL, 898 MTK_WED_CTRL_WED_TX_BM_EN | 899 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 900 } else { 901 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); 902 /* rx hw init */ 903 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 904 MTK_WED_WPDMA_RX_D_RST_CRX_IDX | 905 MTK_WED_WPDMA_RX_D_RST_DRV_IDX); 906 wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); 907 908 mtk_wed_rx_buffer_hw_init(dev); 909 mtk_wed_rro_hw_init(dev); 910 mtk_wed_route_qm_hw_init(dev); 911 } 912 913 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); 914 } 915 916 static void 917 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) 918 { 919 void *head = (void *)ring->desc; 920 int i; 921 922 for (i = 0; i < size; i++) { 923 struct mtk_wdma_desc *desc; 924 925 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); 926 desc->buf0 = 0; 927 if (tx) 928 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 929 else 930 desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); 931 desc->buf1 = 0; 932 desc->info = 0; 933 } 934 } 935 936 static u32 937 mtk_wed_check_busy(struct mtk_wed_device *dev) 938 { 939 if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) 940 return true; 941 942 if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & 943 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) 944 return true; 945 946 if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) 947 return true; 948 949 if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & 950 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) 951 return true; 952 953 if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & 954 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) 955 return true; 956 957 if (wed_r32(dev, MTK_WED_CTRL) & 958 (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) 959 return true; 960 961 return false; 962 } 963 964 static int 965 mtk_wed_poll_busy(struct mtk_wed_device *dev) 966 { 967 int sleep = 15000; 968 int timeout = 100 * sleep; 969 u32 val; 970 971 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, 972 timeout, false, dev); 973 } 974 975 static void 976 mtk_wed_reset_dma(struct mtk_wed_device *dev) 977 { 978 bool busy = false; 979 u32 val; 980 int i; 981 982 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { 983 if (!dev->tx_ring[i].desc) 984 continue; 985 986 mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, 987 true); 988 } 989 990 if (mtk_wed_poll_busy(dev)) 991 busy = mtk_wed_check_busy(dev); 992 993 if (busy) { 994 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); 995 } else { 996 wed_w32(dev, MTK_WED_RESET_IDX, 997 MTK_WED_RESET_IDX_TX | 998 MTK_WED_RESET_IDX_RX); 999 wed_w32(dev, MTK_WED_RESET_IDX, 0); 1000 } 1001 1002 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 1003 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 1004 1005 if (mtk_wed_get_rx_capa(dev)) 1006 mtk_wdma_rx_reset(dev); 1007 1008 if (busy) { 1009 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); 1010 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); 1011 } else { 1012 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 1013 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); 1014 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); 1015 1016 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1017 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1018 1019 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 1020 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 1021 } 1022 1023 for (i = 0; i < 100; i++) { 1024 val = wed_r32(dev, MTK_WED_TX_BM_INTF); 1025 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) 1026 break; 1027 } 1028 1029 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); 1030 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 1031 1032 if (busy) { 1033 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 1034 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); 1035 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); 1036 } else { 1037 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 1038 MTK_WED_WPDMA_RESET_IDX_TX | 1039 MTK_WED_WPDMA_RESET_IDX_RX); 1040 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); 1041 } 1042 } 1043 1044 static int 1045 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 1046 int size, u32 desc_size, bool tx) 1047 { 1048 ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, 1049 &ring->desc_phys, GFP_KERNEL); 1050 if (!ring->desc) 1051 return -ENOMEM; 1052 1053 ring->desc_size = desc_size; 1054 ring->size = size; 1055 mtk_wed_ring_reset(ring, size, tx); 1056 1057 return 0; 1058 } 1059 1060 static int 1061 mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size) 1062 { 1063 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 1064 struct mtk_wed_ring *wdma; 1065 1066 if (idx >= ARRAY_SIZE(dev->rx_wdma)) 1067 return -EINVAL; 1068 1069 wdma = &dev->rx_wdma[idx]; 1070 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size, 1071 true)) 1072 return -ENOMEM; 1073 1074 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1075 wdma->desc_phys); 1076 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1077 size); 1078 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1079 1080 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 1081 wdma->desc_phys); 1082 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 1083 size); 1084 1085 return 0; 1086 } 1087 1088 static int 1089 mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size) 1090 { 1091 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 1092 struct mtk_wed_ring *wdma; 1093 1094 if (idx >= ARRAY_SIZE(dev->tx_wdma)) 1095 return -EINVAL; 1096 1097 wdma = &dev->tx_wdma[idx]; 1098 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size, 1099 true)) 1100 return -ENOMEM; 1101 1102 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1103 wdma->desc_phys); 1104 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1105 size); 1106 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1107 wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); 1108 1109 if (!idx) { 1110 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, 1111 wdma->desc_phys); 1112 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, 1113 size); 1114 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, 1115 0); 1116 wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, 1117 0); 1118 } 1119 1120 return 0; 1121 } 1122 1123 static void 1124 mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, 1125 u32 reason, u32 hash) 1126 { 1127 struct mtk_eth *eth = dev->hw->eth; 1128 struct ethhdr *eh; 1129 1130 if (!skb) 1131 return; 1132 1133 if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) 1134 return; 1135 1136 skb_set_mac_header(skb, 0); 1137 eh = eth_hdr(skb); 1138 skb->protocol = eh->h_proto; 1139 mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); 1140 } 1141 1142 static void 1143 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) 1144 { 1145 u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); 1146 1147 /* wed control cr set */ 1148 wed_set(dev, MTK_WED_CTRL, 1149 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 1150 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 1151 MTK_WED_CTRL_WED_TX_BM_EN | 1152 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 1153 1154 if (dev->hw->version == 1) { 1155 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, 1156 MTK_WED_PCIE_INT_TRIGGER_STATUS); 1157 1158 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 1159 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | 1160 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); 1161 1162 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); 1163 } else { 1164 wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, 1165 GENMASK(1, 0)); 1166 /* initail tx interrupt trigger */ 1167 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, 1168 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | 1169 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | 1170 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | 1171 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | 1172 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, 1173 dev->wlan.tx_tbit[0]) | 1174 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, 1175 dev->wlan.tx_tbit[1])); 1176 1177 /* initail txfree interrupt trigger */ 1178 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, 1179 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | 1180 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | 1181 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, 1182 dev->wlan.txfree_tbit)); 1183 1184 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, 1185 MTK_WED_WPDMA_INT_CTRL_RX0_EN | 1186 MTK_WED_WPDMA_INT_CTRL_RX0_CLR | 1187 MTK_WED_WPDMA_INT_CTRL_RX1_EN | 1188 MTK_WED_WPDMA_INT_CTRL_RX1_CLR | 1189 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, 1190 dev->wlan.rx_tbit[0]) | 1191 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, 1192 dev->wlan.rx_tbit[1])); 1193 1194 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); 1195 wed_set(dev, MTK_WED_WDMA_INT_CTRL, 1196 FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, 1197 dev->wdma_idx)); 1198 } 1199 1200 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); 1201 1202 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); 1203 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); 1204 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 1205 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 1206 } 1207 1208 static void 1209 mtk_wed_dma_enable(struct mtk_wed_device *dev) 1210 { 1211 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); 1212 1213 wed_set(dev, MTK_WED_GLO_CFG, 1214 MTK_WED_GLO_CFG_TX_DMA_EN | 1215 MTK_WED_GLO_CFG_RX_DMA_EN); 1216 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 1217 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 1218 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 1219 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1220 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 1221 1222 wdma_set(dev, MTK_WDMA_GLO_CFG, 1223 MTK_WDMA_GLO_CFG_TX_DMA_EN | 1224 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 1225 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 1226 1227 if (dev->hw->version == 1) { 1228 wdma_set(dev, MTK_WDMA_GLO_CFG, 1229 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 1230 } else { 1231 int i; 1232 1233 wed_set(dev, MTK_WED_WPDMA_CTRL, 1234 MTK_WED_WPDMA_CTRL_SDL1_FIXED); 1235 1236 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 1237 MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | 1238 MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); 1239 1240 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 1241 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 1242 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 1243 1244 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 1245 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | 1246 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); 1247 1248 wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, 1249 MTK_WED_WPDMA_RX_D_RX_DRV_EN | 1250 FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | 1251 FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 1252 0x2)); 1253 1254 for (i = 0; i < MTK_WED_RX_QUEUES; i++) 1255 mtk_wed_check_wfdma_rx_fill(dev, i); 1256 } 1257 } 1258 1259 static void 1260 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) 1261 { 1262 int i; 1263 1264 for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) 1265 if (!dev->rx_wdma[i].desc) 1266 mtk_wed_wdma_rx_ring_setup(dev, i, 16); 1267 1268 mtk_wed_hw_init(dev); 1269 mtk_wed_configure_irq(dev, irq_mask); 1270 1271 mtk_wed_set_ext_int(dev, true); 1272 1273 if (dev->hw->version == 1) { 1274 u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | 1275 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, 1276 dev->hw->index); 1277 1278 val |= BIT(0) | (BIT(1) * !!dev->hw->index); 1279 regmap_write(dev->hw->mirror, dev->hw->index * 4, val); 1280 } else { 1281 /* driver set mid ready and only once */ 1282 wed_w32(dev, MTK_WED_EXT_INT_MASK1, 1283 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 1284 wed_w32(dev, MTK_WED_EXT_INT_MASK2, 1285 MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); 1286 1287 wed_r32(dev, MTK_WED_EXT_INT_MASK1); 1288 wed_r32(dev, MTK_WED_EXT_INT_MASK2); 1289 1290 if (mtk_wed_rro_cfg(dev)) 1291 return; 1292 1293 mtk_wed_set_512_support(dev, dev->wlan.wcid_512); 1294 } 1295 1296 mtk_wed_dma_enable(dev); 1297 dev->running = true; 1298 } 1299 1300 static int 1301 mtk_wed_attach(struct mtk_wed_device *dev) 1302 __releases(RCU) 1303 { 1304 struct mtk_wed_hw *hw; 1305 struct device *device; 1306 int ret = 0; 1307 1308 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 1309 "mtk_wed_attach without holding the RCU read lock"); 1310 1311 if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && 1312 pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || 1313 !try_module_get(THIS_MODULE)) 1314 ret = -ENODEV; 1315 1316 rcu_read_unlock(); 1317 1318 if (ret) 1319 return ret; 1320 1321 mutex_lock(&hw_lock); 1322 1323 hw = mtk_wed_assign(dev); 1324 if (!hw) { 1325 module_put(THIS_MODULE); 1326 ret = -ENODEV; 1327 goto unlock; 1328 } 1329 1330 device = dev->wlan.bus_type == MTK_WED_BUS_PCIE 1331 ? &dev->wlan.pci_dev->dev 1332 : &dev->wlan.platform_dev->dev; 1333 dev_info(device, "attaching wed device %d version %d\n", 1334 hw->index, hw->version); 1335 1336 dev->hw = hw; 1337 dev->dev = hw->dev; 1338 dev->irq = hw->irq; 1339 dev->wdma_idx = hw->index; 1340 dev->version = hw->version; 1341 1342 if (hw->eth->dma_dev == hw->eth->dev && 1343 of_dma_is_coherent(hw->eth->dev->of_node)) 1344 mtk_eth_set_dma_device(hw->eth, hw->dev); 1345 1346 ret = mtk_wed_tx_buffer_alloc(dev); 1347 if (ret) 1348 goto out; 1349 1350 if (mtk_wed_get_rx_capa(dev)) { 1351 ret = mtk_wed_rx_buffer_alloc(dev); 1352 if (ret) 1353 goto out; 1354 1355 ret = mtk_wed_rro_alloc(dev); 1356 if (ret) 1357 goto out; 1358 } 1359 1360 mtk_wed_hw_init_early(dev); 1361 if (hw->version == 1) 1362 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 1363 BIT(hw->index), 0); 1364 else 1365 ret = mtk_wed_wo_init(hw); 1366 out: 1367 if (ret) 1368 mtk_wed_detach(dev); 1369 unlock: 1370 mutex_unlock(&hw_lock); 1371 1372 return ret; 1373 } 1374 1375 static int 1376 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) 1377 { 1378 struct mtk_wed_ring *ring = &dev->tx_ring[idx]; 1379 1380 /* 1381 * Tx ring redirection: 1382 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN 1383 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) 1384 * registers. 1385 * 1386 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it 1387 * into MTK_WED_WPDMA_RING_TX(n) registers. 1388 * It gets filled with packets picked up from WED TX ring and from 1389 * WDMA RX. 1390 */ 1391 1392 if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) 1393 return -EINVAL; 1394 1395 if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 1396 sizeof(*ring->desc), true)) 1397 return -ENOMEM; 1398 1399 if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) 1400 return -ENOMEM; 1401 1402 ring->reg_base = MTK_WED_RING_TX(idx); 1403 ring->wpdma = regs; 1404 1405 /* WED -> WPDMA */ 1406 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 1407 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); 1408 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); 1409 1410 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 1411 ring->desc_phys); 1412 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 1413 MTK_WED_TX_RING_SIZE); 1414 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 1415 1416 return 0; 1417 } 1418 1419 static int 1420 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 1421 { 1422 struct mtk_wed_ring *ring = &dev->txfree_ring; 1423 int i, index = dev->hw->version == 1; 1424 1425 /* 1426 * For txfree event handling, the same DMA ring is shared between WED 1427 * and WLAN. The WLAN driver accesses the ring index registers through 1428 * WED 1429 */ 1430 ring->reg_base = MTK_WED_RING_RX(index); 1431 ring->wpdma = regs; 1432 1433 for (i = 0; i < 12; i += 4) { 1434 u32 val = readl(regs + i); 1435 1436 wed_w32(dev, MTK_WED_RING_RX(index) + i, val); 1437 wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); 1438 } 1439 1440 return 0; 1441 } 1442 1443 static int 1444 mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) 1445 { 1446 struct mtk_wed_ring *ring = &dev->rx_ring[idx]; 1447 1448 if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) 1449 return -EINVAL; 1450 1451 if (mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, 1452 sizeof(*ring->desc), false)) 1453 return -ENOMEM; 1454 1455 if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) 1456 return -ENOMEM; 1457 1458 ring->reg_base = MTK_WED_RING_RX_DATA(idx); 1459 ring->wpdma = regs; 1460 ring->flags |= MTK_WED_RING_CONFIGURED; 1461 1462 /* WPDMA -> WED */ 1463 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 1464 wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); 1465 1466 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, 1467 ring->desc_phys); 1468 wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, 1469 MTK_WED_RX_RING_SIZE); 1470 1471 return 0; 1472 } 1473 1474 static u32 1475 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) 1476 { 1477 u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 1478 1479 if (dev->hw->version == 1) 1480 ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 1481 else 1482 ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 1483 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 1484 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 1485 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 1486 1487 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); 1488 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); 1489 val &= ext_mask; 1490 if (!dev->hw->num_flows) 1491 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 1492 if (val && net_ratelimit()) 1493 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); 1494 1495 val = wed_r32(dev, MTK_WED_INT_STATUS); 1496 val &= mask; 1497 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ 1498 1499 return val; 1500 } 1501 1502 static void 1503 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) 1504 { 1505 if (!dev->running) 1506 return; 1507 1508 mtk_wed_set_ext_int(dev, !!mask); 1509 wed_w32(dev, MTK_WED_INT_MASK, mask); 1510 } 1511 1512 int mtk_wed_flow_add(int index) 1513 { 1514 struct mtk_wed_hw *hw = hw_list[index]; 1515 int ret; 1516 1517 if (!hw || !hw->wed_dev) 1518 return -ENODEV; 1519 1520 if (hw->num_flows) { 1521 hw->num_flows++; 1522 return 0; 1523 } 1524 1525 mutex_lock(&hw_lock); 1526 if (!hw->wed_dev) { 1527 ret = -ENODEV; 1528 goto out; 1529 } 1530 1531 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); 1532 if (!ret) 1533 hw->num_flows++; 1534 mtk_wed_set_ext_int(hw->wed_dev, true); 1535 1536 out: 1537 mutex_unlock(&hw_lock); 1538 1539 return ret; 1540 } 1541 1542 void mtk_wed_flow_remove(int index) 1543 { 1544 struct mtk_wed_hw *hw = hw_list[index]; 1545 1546 if (!hw) 1547 return; 1548 1549 if (--hw->num_flows) 1550 return; 1551 1552 mutex_lock(&hw_lock); 1553 if (!hw->wed_dev) 1554 goto out; 1555 1556 hw->wed_dev->wlan.offload_disable(hw->wed_dev); 1557 mtk_wed_set_ext_int(hw->wed_dev, true); 1558 1559 out: 1560 mutex_unlock(&hw_lock); 1561 } 1562 1563 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, 1564 void __iomem *wdma, phys_addr_t wdma_phy, 1565 int index) 1566 { 1567 static const struct mtk_wed_ops wed_ops = { 1568 .attach = mtk_wed_attach, 1569 .tx_ring_setup = mtk_wed_tx_ring_setup, 1570 .rx_ring_setup = mtk_wed_rx_ring_setup, 1571 .txfree_ring_setup = mtk_wed_txfree_ring_setup, 1572 .msg_update = mtk_wed_mcu_msg_update, 1573 .start = mtk_wed_start, 1574 .stop = mtk_wed_stop, 1575 .reset_dma = mtk_wed_reset_dma, 1576 .reg_read = wed_r32, 1577 .reg_write = wed_w32, 1578 .irq_get = mtk_wed_irq_get, 1579 .irq_set_mask = mtk_wed_irq_set_mask, 1580 .detach = mtk_wed_detach, 1581 .ppe_check = mtk_wed_ppe_check, 1582 }; 1583 struct device_node *eth_np = eth->dev->of_node; 1584 struct platform_device *pdev; 1585 struct mtk_wed_hw *hw; 1586 struct regmap *regs; 1587 int irq; 1588 1589 if (!np) 1590 return; 1591 1592 pdev = of_find_device_by_node(np); 1593 if (!pdev) 1594 goto err_of_node_put; 1595 1596 get_device(&pdev->dev); 1597 irq = platform_get_irq(pdev, 0); 1598 if (irq < 0) 1599 goto err_put_device; 1600 1601 regs = syscon_regmap_lookup_by_phandle(np, NULL); 1602 if (IS_ERR(regs)) 1603 goto err_put_device; 1604 1605 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); 1606 1607 mutex_lock(&hw_lock); 1608 1609 if (WARN_ON(hw_list[index])) 1610 goto unlock; 1611 1612 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 1613 if (!hw) 1614 goto unlock; 1615 1616 hw->node = np; 1617 hw->regs = regs; 1618 hw->eth = eth; 1619 hw->dev = &pdev->dev; 1620 hw->wdma_phy = wdma_phy; 1621 hw->wdma = wdma; 1622 hw->index = index; 1623 hw->irq = irq; 1624 hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; 1625 1626 if (hw->version == 1) { 1627 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, 1628 "mediatek,pcie-mirror"); 1629 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, 1630 "mediatek,hifsys"); 1631 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { 1632 kfree(hw); 1633 goto unlock; 1634 } 1635 1636 if (!index) { 1637 regmap_write(hw->mirror, 0, 0); 1638 regmap_write(hw->mirror, 4, 0); 1639 } 1640 } 1641 1642 mtk_wed_hw_add_debugfs(hw); 1643 1644 hw_list[index] = hw; 1645 1646 mutex_unlock(&hw_lock); 1647 1648 return; 1649 1650 unlock: 1651 mutex_unlock(&hw_lock); 1652 err_put_device: 1653 put_device(&pdev->dev); 1654 err_of_node_put: 1655 of_node_put(np); 1656 } 1657 1658 void mtk_wed_exit(void) 1659 { 1660 int i; 1661 1662 rcu_assign_pointer(mtk_soc_wed_ops, NULL); 1663 1664 synchronize_rcu(); 1665 1666 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 1667 struct mtk_wed_hw *hw; 1668 1669 hw = hw_list[i]; 1670 if (!hw) 1671 continue; 1672 1673 hw_list[i] = NULL; 1674 debugfs_remove(hw->debugfs_dir); 1675 put_device(hw->dev); 1676 of_node_put(hw->node); 1677 kfree(hw); 1678 } 1679 } 1680