1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ 3 4 #include <linux/kernel.h> 5 #include <linux/slab.h> 6 #include <linux/module.h> 7 #include <linux/bitfield.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/skbuff.h> 10 #include <linux/of_platform.h> 11 #include <linux/of_address.h> 12 #include <linux/mfd/syscon.h> 13 #include <linux/debugfs.h> 14 #include <linux/soc/mediatek/mtk_wed.h> 15 #include "mtk_eth_soc.h" 16 #include "mtk_wed_regs.h" 17 #include "mtk_wed.h" 18 #include "mtk_ppe.h" 19 20 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) 21 22 #define MTK_WED_PKT_SIZE 1900 23 #define MTK_WED_BUF_SIZE 2048 24 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) 25 26 #define MTK_WED_TX_RING_SIZE 2048 27 #define MTK_WED_WDMA_RING_SIZE 1024 28 #define MTK_WED_MAX_GROUP_SIZE 0x100 29 #define MTK_WED_VLD_GROUP_SIZE 0x40 30 #define MTK_WED_PER_GROUP_PKT 128 31 32 #define MTK_WED_FBUF_SIZE 128 33 34 static struct mtk_wed_hw *hw_list[2]; 35 static DEFINE_MUTEX(hw_lock); 36 37 static void 38 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 39 { 40 regmap_update_bits(dev->hw->regs, reg, mask | val, val); 41 } 42 43 static void 44 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 45 { 46 return wed_m32(dev, reg, 0, mask); 47 } 48 49 static void 50 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) 51 { 52 return wed_m32(dev, reg, mask, 0); 53 } 54 55 static void 56 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) 57 { 58 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); 59 } 60 61 static void 62 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) 63 { 64 wdma_m32(dev, reg, 0, mask); 65 } 66 67 static u32 68 mtk_wed_read_reset(struct mtk_wed_device *dev) 69 { 70 return wed_r32(dev, MTK_WED_RESET); 71 } 72 73 static void 74 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) 75 { 76 u32 status; 77 78 wed_w32(dev, MTK_WED_RESET, mask); 79 if (readx_poll_timeout(mtk_wed_read_reset, dev, status, 80 !(status & mask), 0, 1000)) 81 WARN_ON_ONCE(1); 82 } 83 84 static struct mtk_wed_hw * 85 mtk_wed_assign(struct mtk_wed_device *dev) 86 { 87 struct mtk_wed_hw *hw; 88 int i; 89 90 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 91 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; 92 if (!hw) 93 return NULL; 94 95 if (!hw->wed_dev) 96 goto out; 97 98 if (hw->version == 1) 99 return NULL; 100 101 /* MT7986 WED devices do not have any pcie slot restrictions */ 102 } 103 /* MT7986 PCIE or AXI */ 104 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 105 hw = hw_list[i]; 106 if (hw && !hw->wed_dev) 107 goto out; 108 } 109 110 return NULL; 111 112 out: 113 hw->wed_dev = dev; 114 return hw; 115 } 116 117 static int 118 mtk_wed_buffer_alloc(struct mtk_wed_device *dev) 119 { 120 struct mtk_wdma_desc *desc; 121 dma_addr_t desc_phys; 122 void **page_list; 123 int token = dev->wlan.token_start; 124 int ring_size; 125 int n_pages; 126 int i, page_idx; 127 128 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); 129 n_pages = ring_size / MTK_WED_BUF_PER_PAGE; 130 131 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); 132 if (!page_list) 133 return -ENOMEM; 134 135 dev->buf_ring.size = ring_size; 136 dev->buf_ring.pages = page_list; 137 138 desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), 139 &desc_phys, GFP_KERNEL); 140 if (!desc) 141 return -ENOMEM; 142 143 dev->buf_ring.desc = desc; 144 dev->buf_ring.desc_phys = desc_phys; 145 146 for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { 147 dma_addr_t page_phys, buf_phys; 148 struct page *page; 149 void *buf; 150 int s; 151 152 page = __dev_alloc_pages(GFP_KERNEL, 0); 153 if (!page) 154 return -ENOMEM; 155 156 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, 157 DMA_BIDIRECTIONAL); 158 if (dma_mapping_error(dev->hw->dev, page_phys)) { 159 __free_page(page); 160 return -ENOMEM; 161 } 162 163 page_list[page_idx++] = page; 164 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, 165 DMA_BIDIRECTIONAL); 166 167 buf = page_to_virt(page); 168 buf_phys = page_phys; 169 170 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { 171 u32 txd_size; 172 u32 ctrl; 173 174 txd_size = dev->wlan.init_buf(buf, buf_phys, token++); 175 176 desc->buf0 = cpu_to_le32(buf_phys); 177 desc->buf1 = cpu_to_le32(buf_phys + txd_size); 178 179 if (dev->hw->version == 1) 180 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 181 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, 182 MTK_WED_BUF_SIZE - txd_size) | 183 MTK_WDMA_DESC_CTRL_LAST_SEG1; 184 else 185 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | 186 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, 187 MTK_WED_BUF_SIZE - txd_size) | 188 MTK_WDMA_DESC_CTRL_LAST_SEG0; 189 desc->ctrl = cpu_to_le32(ctrl); 190 desc->info = 0; 191 desc++; 192 193 buf += MTK_WED_BUF_SIZE; 194 buf_phys += MTK_WED_BUF_SIZE; 195 } 196 197 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, 198 DMA_BIDIRECTIONAL); 199 } 200 201 return 0; 202 } 203 204 static void 205 mtk_wed_free_buffer(struct mtk_wed_device *dev) 206 { 207 struct mtk_wdma_desc *desc = dev->buf_ring.desc; 208 void **page_list = dev->buf_ring.pages; 209 int page_idx; 210 int i; 211 212 if (!page_list) 213 return; 214 215 if (!desc) 216 goto free_pagelist; 217 218 for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { 219 void *page = page_list[page_idx++]; 220 dma_addr_t buf_addr; 221 222 if (!page) 223 break; 224 225 buf_addr = le32_to_cpu(desc[i].buf0); 226 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, 227 DMA_BIDIRECTIONAL); 228 __free_page(page); 229 } 230 231 dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), 232 desc, dev->buf_ring.desc_phys); 233 234 free_pagelist: 235 kfree(page_list); 236 } 237 238 static void 239 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) 240 { 241 if (!ring->desc) 242 return; 243 244 dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, 245 ring->desc, ring->desc_phys); 246 } 247 248 static void 249 mtk_wed_free_tx_rings(struct mtk_wed_device *dev) 250 { 251 int i; 252 253 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) 254 mtk_wed_free_ring(dev, &dev->tx_ring[i]); 255 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) 256 mtk_wed_free_ring(dev, &dev->tx_wdma[i]); 257 } 258 259 static void 260 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) 261 { 262 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 263 264 if (dev->hw->version == 1) 265 mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 266 else 267 mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 268 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 269 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 270 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 271 272 if (!dev->hw->num_flows) 273 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 274 275 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); 276 wed_r32(dev, MTK_WED_EXT_INT_MASK); 277 } 278 279 static void 280 mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) 281 { 282 if (enable) { 283 wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 284 wed_w32(dev, MTK_WED_TXP_DW1, 285 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); 286 } else { 287 wed_w32(dev, MTK_WED_TXP_DW1, 288 FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); 289 wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); 290 } 291 } 292 293 static void 294 mtk_wed_dma_disable(struct mtk_wed_device *dev) 295 { 296 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 297 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 298 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 299 300 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 301 302 wed_clr(dev, MTK_WED_GLO_CFG, 303 MTK_WED_GLO_CFG_TX_DMA_EN | 304 MTK_WED_GLO_CFG_RX_DMA_EN); 305 306 wdma_m32(dev, MTK_WDMA_GLO_CFG, 307 MTK_WDMA_GLO_CFG_TX_DMA_EN | 308 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 309 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0); 310 311 if (dev->hw->version == 1) { 312 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); 313 wdma_m32(dev, MTK_WDMA_GLO_CFG, 314 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0); 315 } else { 316 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 317 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 318 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 319 320 mtk_wed_set_512_support(dev, false); 321 } 322 } 323 324 static void 325 mtk_wed_stop(struct mtk_wed_device *dev) 326 { 327 mtk_wed_dma_disable(dev); 328 mtk_wed_set_ext_int(dev, false); 329 330 wed_clr(dev, MTK_WED_CTRL, 331 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 332 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 333 MTK_WED_CTRL_WED_TX_BM_EN | 334 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 335 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); 336 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); 337 wdma_w32(dev, MTK_WDMA_INT_MASK, 0); 338 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); 339 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); 340 } 341 342 static void 343 mtk_wed_detach(struct mtk_wed_device *dev) 344 { 345 struct mtk_wed_hw *hw = dev->hw; 346 347 mutex_lock(&hw_lock); 348 349 mtk_wed_stop(dev); 350 351 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 352 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 353 354 mtk_wed_reset(dev, MTK_WED_RESET_WED); 355 356 mtk_wed_free_buffer(dev); 357 mtk_wed_free_tx_rings(dev); 358 359 if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { 360 struct device_node *wlan_node; 361 362 wlan_node = dev->wlan.pci_dev->dev.of_node; 363 if (of_dma_is_coherent(wlan_node) && hw->hifsys) 364 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 365 BIT(hw->index), BIT(hw->index)); 366 } 367 368 if (!hw_list[!hw->index]->wed_dev && 369 hw->eth->dma_dev != hw->eth->dev) 370 mtk_eth_set_dma_device(hw->eth, hw->eth->dev); 371 372 memset(dev, 0, sizeof(*dev)); 373 module_put(THIS_MODULE); 374 375 hw->wed_dev = NULL; 376 mutex_unlock(&hw_lock); 377 } 378 379 #define PCIE_BASE_ADDR0 0x11280000 380 static void 381 mtk_wed_bus_init(struct mtk_wed_device *dev) 382 { 383 switch (dev->wlan.bus_type) { 384 case MTK_WED_BUS_PCIE: { 385 struct device_node *np = dev->hw->eth->dev->of_node; 386 struct regmap *regs; 387 388 regs = syscon_regmap_lookup_by_phandle(np, 389 "mediatek,wed-pcie"); 390 if (IS_ERR(regs)) 391 break; 392 393 regmap_update_bits(regs, 0, BIT(0), BIT(0)); 394 395 wed_w32(dev, MTK_WED_PCIE_INT_CTRL, 396 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); 397 398 /* pcie interrupt control: pola/source selection */ 399 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 400 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | 401 FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); 402 wed_r32(dev, MTK_WED_PCIE_INT_CTRL); 403 404 wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); 405 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); 406 407 /* pcie interrupt status trigger register */ 408 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); 409 wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); 410 411 /* pola setting */ 412 wed_set(dev, MTK_WED_PCIE_INT_CTRL, 413 MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); 414 break; 415 } 416 case MTK_WED_BUS_AXI: 417 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, 418 MTK_WED_WPDMA_INT_CTRL_SIG_SRC | 419 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); 420 break; 421 default: 422 break; 423 } 424 } 425 426 static void 427 mtk_wed_set_wpdma(struct mtk_wed_device *dev) 428 { 429 if (dev->hw->version == 1) { 430 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); 431 } else { 432 mtk_wed_bus_init(dev); 433 434 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); 435 wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); 436 wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); 437 wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); 438 } 439 } 440 441 static void 442 mtk_wed_hw_init_early(struct mtk_wed_device *dev) 443 { 444 u32 mask, set; 445 446 mtk_wed_stop(dev); 447 mtk_wed_reset(dev, MTK_WED_RESET_WED); 448 mtk_wed_set_wpdma(dev); 449 450 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | 451 MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | 452 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; 453 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | 454 MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | 455 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; 456 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); 457 458 if (dev->hw->version == 1) { 459 u32 offset = dev->hw->index ? 0x04000400 : 0; 460 461 wdma_set(dev, MTK_WDMA_GLO_CFG, 462 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 463 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | 464 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 465 466 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); 467 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); 468 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, 469 MTK_PCIE_BASE(dev->hw->index)); 470 } else { 471 wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); 472 wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); 473 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 474 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, 475 MTK_WDMA_INT_STATUS) | 476 FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, 477 MTK_WDMA_GLO_CFG)); 478 479 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 480 FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, 481 MTK_WDMA_RING_TX(0)) | 482 FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, 483 MTK_WDMA_RING_RX(0))); 484 } 485 } 486 487 static void 488 mtk_wed_hw_init(struct mtk_wed_device *dev) 489 { 490 if (dev->init_done) 491 return; 492 493 dev->init_done = true; 494 mtk_wed_set_ext_int(dev, false); 495 wed_w32(dev, MTK_WED_TX_BM_CTRL, 496 MTK_WED_TX_BM_CTRL_PAUSE | 497 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, 498 dev->buf_ring.size / 128) | 499 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, 500 MTK_WED_TX_RING_SIZE / 256)); 501 502 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); 503 504 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); 505 506 if (dev->hw->version == 1) { 507 wed_w32(dev, MTK_WED_TX_BM_TKID, 508 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 509 dev->wlan.token_start) | 510 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 511 dev->wlan.token_start + 512 dev->wlan.nbuf - 1)); 513 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 514 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | 515 MTK_WED_TX_BM_DYN_THR_HI); 516 } else { 517 wed_w32(dev, MTK_WED_TX_BM_TKID_V2, 518 FIELD_PREP(MTK_WED_TX_BM_TKID_START, 519 dev->wlan.token_start) | 520 FIELD_PREP(MTK_WED_TX_BM_TKID_END, 521 dev->wlan.token_start + 522 dev->wlan.nbuf - 1)); 523 wed_w32(dev, MTK_WED_TX_BM_DYN_THR, 524 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | 525 MTK_WED_TX_BM_DYN_THR_HI_V2); 526 wed_w32(dev, MTK_WED_TX_TKID_CTRL, 527 MTK_WED_TX_TKID_CTRL_PAUSE | 528 FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, 529 dev->buf_ring.size / 128) | 530 FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, 531 dev->buf_ring.size / 128)); 532 wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, 533 FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | 534 MTK_WED_TX_TKID_DYN_THR_HI); 535 } 536 537 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 538 539 if (dev->hw->version == 1) 540 wed_set(dev, MTK_WED_CTRL, 541 MTK_WED_CTRL_WED_TX_BM_EN | 542 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 543 else 544 wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); 545 546 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); 547 } 548 549 static void 550 mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size) 551 { 552 void *head = (void *)ring->desc; 553 int i; 554 555 for (i = 0; i < size; i++) { 556 struct mtk_wdma_desc *desc; 557 558 desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); 559 desc->buf0 = 0; 560 desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); 561 desc->buf1 = 0; 562 desc->info = 0; 563 } 564 } 565 566 static u32 567 mtk_wed_check_busy(struct mtk_wed_device *dev) 568 { 569 if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) 570 return true; 571 572 if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & 573 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) 574 return true; 575 576 if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) 577 return true; 578 579 if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & 580 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) 581 return true; 582 583 if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & 584 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) 585 return true; 586 587 if (wed_r32(dev, MTK_WED_CTRL) & 588 (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) 589 return true; 590 591 return false; 592 } 593 594 static int 595 mtk_wed_poll_busy(struct mtk_wed_device *dev) 596 { 597 int sleep = 15000; 598 int timeout = 100 * sleep; 599 u32 val; 600 601 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, 602 timeout, false, dev); 603 } 604 605 static void 606 mtk_wed_reset_dma(struct mtk_wed_device *dev) 607 { 608 bool busy = false; 609 u32 val; 610 int i; 611 612 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { 613 if (!dev->tx_ring[i].desc) 614 continue; 615 616 mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE); 617 } 618 619 if (mtk_wed_poll_busy(dev)) 620 busy = mtk_wed_check_busy(dev); 621 622 if (busy) { 623 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); 624 } else { 625 wed_w32(dev, MTK_WED_RESET_IDX, 626 MTK_WED_RESET_IDX_TX | 627 MTK_WED_RESET_IDX_RX); 628 wed_w32(dev, MTK_WED_RESET_IDX, 0); 629 } 630 631 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); 632 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); 633 634 if (busy) { 635 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); 636 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); 637 } else { 638 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 639 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); 640 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); 641 642 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 643 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 644 645 wed_clr(dev, MTK_WED_WDMA_GLO_CFG, 646 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); 647 } 648 649 for (i = 0; i < 100; i++) { 650 val = wed_r32(dev, MTK_WED_TX_BM_INTF); 651 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) 652 break; 653 } 654 655 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); 656 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); 657 658 if (busy) { 659 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); 660 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); 661 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); 662 } else { 663 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 664 MTK_WED_WPDMA_RESET_IDX_TX | 665 MTK_WED_WPDMA_RESET_IDX_RX); 666 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); 667 } 668 669 } 670 671 static int 672 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, 673 int size, u32 desc_size) 674 { 675 ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, 676 &ring->desc_phys, GFP_KERNEL); 677 if (!ring->desc) 678 return -ENOMEM; 679 680 ring->desc_size = desc_size; 681 ring->size = size; 682 mtk_wed_ring_reset(ring, size); 683 684 return 0; 685 } 686 687 static int 688 mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) 689 { 690 u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; 691 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; 692 693 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size)) 694 return -ENOMEM; 695 696 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 697 wdma->desc_phys); 698 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 699 size); 700 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 701 702 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, 703 wdma->desc_phys); 704 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, 705 size); 706 707 return 0; 708 } 709 710 static void 711 mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) 712 { 713 u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); 714 715 /* wed control cr set */ 716 wed_set(dev, MTK_WED_CTRL, 717 MTK_WED_CTRL_WDMA_INT_AGENT_EN | 718 MTK_WED_CTRL_WPDMA_INT_AGENT_EN | 719 MTK_WED_CTRL_WED_TX_BM_EN | 720 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); 721 722 if (dev->hw->version == 1) { 723 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, 724 MTK_WED_PCIE_INT_TRIGGER_STATUS); 725 726 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 727 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | 728 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); 729 730 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); 731 } else { 732 /* initail tx interrupt trigger */ 733 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, 734 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | 735 MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | 736 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | 737 MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | 738 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, 739 dev->wlan.tx_tbit[0]) | 740 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, 741 dev->wlan.tx_tbit[1])); 742 743 /* initail txfree interrupt trigger */ 744 wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, 745 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | 746 MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | 747 FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, 748 dev->wlan.txfree_tbit)); 749 750 wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); 751 wed_set(dev, MTK_WED_WDMA_INT_CTRL, 752 FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, 753 dev->wdma_idx)); 754 } 755 756 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); 757 758 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); 759 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); 760 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); 761 wed_w32(dev, MTK_WED_INT_MASK, irq_mask); 762 } 763 764 static void 765 mtk_wed_dma_enable(struct mtk_wed_device *dev) 766 { 767 wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); 768 769 wed_set(dev, MTK_WED_GLO_CFG, 770 MTK_WED_GLO_CFG_TX_DMA_EN | 771 MTK_WED_GLO_CFG_RX_DMA_EN); 772 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 773 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | 774 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); 775 wed_set(dev, MTK_WED_WDMA_GLO_CFG, 776 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); 777 778 wdma_set(dev, MTK_WDMA_GLO_CFG, 779 MTK_WDMA_GLO_CFG_TX_DMA_EN | 780 MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | 781 MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); 782 783 if (dev->hw->version == 1) { 784 wdma_set(dev, MTK_WDMA_GLO_CFG, 785 MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); 786 } else { 787 wed_set(dev, MTK_WED_WPDMA_CTRL, 788 MTK_WED_WPDMA_CTRL_SDL1_FIXED); 789 790 wed_set(dev, MTK_WED_WPDMA_GLO_CFG, 791 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | 792 MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); 793 794 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, 795 MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | 796 MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); 797 } 798 } 799 800 static void 801 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) 802 { 803 int i; 804 805 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) 806 if (!dev->tx_wdma[i].desc) 807 mtk_wed_wdma_ring_setup(dev, i, 16); 808 809 mtk_wed_hw_init(dev); 810 mtk_wed_configure_irq(dev, irq_mask); 811 812 mtk_wed_set_ext_int(dev, true); 813 814 if (dev->hw->version == 1) { 815 u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | 816 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, 817 dev->hw->index); 818 819 val |= BIT(0) | (BIT(1) * !!dev->hw->index); 820 regmap_write(dev->hw->mirror, dev->hw->index * 4, val); 821 } else { 822 mtk_wed_set_512_support(dev, true); 823 } 824 825 mtk_wed_dma_enable(dev); 826 dev->running = true; 827 } 828 829 static int 830 mtk_wed_attach(struct mtk_wed_device *dev) 831 __releases(RCU) 832 { 833 struct mtk_wed_hw *hw; 834 struct device *device; 835 int ret = 0; 836 837 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 838 "mtk_wed_attach without holding the RCU read lock"); 839 840 if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && 841 pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || 842 !try_module_get(THIS_MODULE)) 843 ret = -ENODEV; 844 845 rcu_read_unlock(); 846 847 if (ret) 848 return ret; 849 850 mutex_lock(&hw_lock); 851 852 hw = mtk_wed_assign(dev); 853 if (!hw) { 854 module_put(THIS_MODULE); 855 ret = -ENODEV; 856 goto out; 857 } 858 859 device = dev->wlan.bus_type == MTK_WED_BUS_PCIE 860 ? &dev->wlan.pci_dev->dev 861 : &dev->wlan.platform_dev->dev; 862 dev_info(device, "attaching wed device %d version %d\n", 863 hw->index, hw->version); 864 865 dev->hw = hw; 866 dev->dev = hw->dev; 867 dev->irq = hw->irq; 868 dev->wdma_idx = hw->index; 869 870 if (hw->eth->dma_dev == hw->eth->dev && 871 of_dma_is_coherent(hw->eth->dev->of_node)) 872 mtk_eth_set_dma_device(hw->eth, hw->dev); 873 874 ret = mtk_wed_buffer_alloc(dev); 875 if (ret) { 876 mtk_wed_detach(dev); 877 goto out; 878 } 879 880 mtk_wed_hw_init_early(dev); 881 if (hw->hifsys) 882 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, 883 BIT(hw->index), 0); 884 885 out: 886 mutex_unlock(&hw_lock); 887 888 return ret; 889 } 890 891 static int 892 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) 893 { 894 struct mtk_wed_ring *ring = &dev->tx_ring[idx]; 895 896 /* 897 * Tx ring redirection: 898 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN 899 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) 900 * registers. 901 * 902 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it 903 * into MTK_WED_WPDMA_RING_TX(n) registers. 904 * It gets filled with packets picked up from WED TX ring and from 905 * WDMA RX. 906 */ 907 908 BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring)); 909 910 if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, 911 sizeof(*ring->desc))) 912 return -ENOMEM; 913 914 if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) 915 return -ENOMEM; 916 917 ring->reg_base = MTK_WED_RING_TX(idx); 918 ring->wpdma = regs; 919 920 /* WED -> WPDMA */ 921 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); 922 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); 923 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); 924 925 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, 926 ring->desc_phys); 927 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, 928 MTK_WED_TX_RING_SIZE); 929 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); 930 931 return 0; 932 } 933 934 static int 935 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) 936 { 937 struct mtk_wed_ring *ring = &dev->txfree_ring; 938 int i, index = dev->hw->version == 1; 939 940 /* 941 * For txfree event handling, the same DMA ring is shared between WED 942 * and WLAN. The WLAN driver accesses the ring index registers through 943 * WED 944 */ 945 ring->reg_base = MTK_WED_RING_RX(index); 946 ring->wpdma = regs; 947 948 for (i = 0; i < 12; i += 4) { 949 u32 val = readl(regs + i); 950 951 wed_w32(dev, MTK_WED_RING_RX(index) + i, val); 952 wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); 953 } 954 955 return 0; 956 } 957 958 static u32 959 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) 960 { 961 u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; 962 963 if (dev->hw->version == 1) 964 ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; 965 else 966 ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | 967 MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | 968 MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | 969 MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; 970 971 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); 972 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); 973 val &= ext_mask; 974 if (!dev->hw->num_flows) 975 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; 976 if (val && net_ratelimit()) 977 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); 978 979 val = wed_r32(dev, MTK_WED_INT_STATUS); 980 val &= mask; 981 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ 982 983 return val; 984 } 985 986 static void 987 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) 988 { 989 if (!dev->running) 990 return; 991 992 mtk_wed_set_ext_int(dev, !!mask); 993 wed_w32(dev, MTK_WED_INT_MASK, mask); 994 } 995 996 int mtk_wed_flow_add(int index) 997 { 998 struct mtk_wed_hw *hw = hw_list[index]; 999 int ret; 1000 1001 if (!hw || !hw->wed_dev) 1002 return -ENODEV; 1003 1004 if (hw->num_flows) { 1005 hw->num_flows++; 1006 return 0; 1007 } 1008 1009 mutex_lock(&hw_lock); 1010 if (!hw->wed_dev) { 1011 ret = -ENODEV; 1012 goto out; 1013 } 1014 1015 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); 1016 if (!ret) 1017 hw->num_flows++; 1018 mtk_wed_set_ext_int(hw->wed_dev, true); 1019 1020 out: 1021 mutex_unlock(&hw_lock); 1022 1023 return ret; 1024 } 1025 1026 void mtk_wed_flow_remove(int index) 1027 { 1028 struct mtk_wed_hw *hw = hw_list[index]; 1029 1030 if (!hw) 1031 return; 1032 1033 if (--hw->num_flows) 1034 return; 1035 1036 mutex_lock(&hw_lock); 1037 if (!hw->wed_dev) 1038 goto out; 1039 1040 hw->wed_dev->wlan.offload_disable(hw->wed_dev); 1041 mtk_wed_set_ext_int(hw->wed_dev, true); 1042 1043 out: 1044 mutex_unlock(&hw_lock); 1045 } 1046 1047 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, 1048 void __iomem *wdma, phys_addr_t wdma_phy, 1049 int index) 1050 { 1051 static const struct mtk_wed_ops wed_ops = { 1052 .attach = mtk_wed_attach, 1053 .tx_ring_setup = mtk_wed_tx_ring_setup, 1054 .txfree_ring_setup = mtk_wed_txfree_ring_setup, 1055 .start = mtk_wed_start, 1056 .stop = mtk_wed_stop, 1057 .reset_dma = mtk_wed_reset_dma, 1058 .reg_read = wed_r32, 1059 .reg_write = wed_w32, 1060 .irq_get = mtk_wed_irq_get, 1061 .irq_set_mask = mtk_wed_irq_set_mask, 1062 .detach = mtk_wed_detach, 1063 }; 1064 struct device_node *eth_np = eth->dev->of_node; 1065 struct platform_device *pdev; 1066 struct mtk_wed_hw *hw; 1067 struct regmap *regs; 1068 int irq; 1069 1070 if (!np) 1071 return; 1072 1073 pdev = of_find_device_by_node(np); 1074 if (!pdev) 1075 goto err_of_node_put; 1076 1077 get_device(&pdev->dev); 1078 irq = platform_get_irq(pdev, 0); 1079 if (irq < 0) 1080 goto err_put_device; 1081 1082 regs = syscon_regmap_lookup_by_phandle(np, NULL); 1083 if (IS_ERR(regs)) 1084 goto err_put_device; 1085 1086 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); 1087 1088 mutex_lock(&hw_lock); 1089 1090 if (WARN_ON(hw_list[index])) 1091 goto unlock; 1092 1093 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 1094 if (!hw) 1095 goto unlock; 1096 1097 hw->node = np; 1098 hw->regs = regs; 1099 hw->eth = eth; 1100 hw->dev = &pdev->dev; 1101 hw->wdma_phy = wdma_phy; 1102 hw->wdma = wdma; 1103 hw->index = index; 1104 hw->irq = irq; 1105 hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; 1106 1107 if (hw->version == 1) { 1108 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, 1109 "mediatek,pcie-mirror"); 1110 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, 1111 "mediatek,hifsys"); 1112 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { 1113 kfree(hw); 1114 goto unlock; 1115 } 1116 1117 if (!index) { 1118 regmap_write(hw->mirror, 0, 0); 1119 regmap_write(hw->mirror, 4, 0); 1120 } 1121 } 1122 1123 mtk_wed_hw_add_debugfs(hw); 1124 1125 hw_list[index] = hw; 1126 1127 mutex_unlock(&hw_lock); 1128 1129 return; 1130 1131 unlock: 1132 mutex_unlock(&hw_lock); 1133 err_put_device: 1134 put_device(&pdev->dev); 1135 err_of_node_put: 1136 of_node_put(np); 1137 } 1138 1139 void mtk_wed_exit(void) 1140 { 1141 int i; 1142 1143 rcu_assign_pointer(mtk_soc_wed_ops, NULL); 1144 1145 synchronize_rcu(); 1146 1147 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 1148 struct mtk_wed_hw *hw; 1149 1150 hw = hw_list[i]; 1151 if (!hw) 1152 continue; 1153 1154 hw_list[i] = NULL; 1155 debugfs_remove(hw->debugfs_dir); 1156 put_device(hw->dev); 1157 of_node_put(hw->node); 1158 kfree(hw); 1159 } 1160 } 1161