1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021, MediaTek Inc. 4 * Copyright (c) 2021-2022, Intel Corporation. 5 * 6 * Authors: 7 * Amir Hanania <amir.hanania@intel.com> 8 * Haijun Liu <haijun.liu@mediatek.com> 9 * Moises Veleta <moises.veleta@intel.com> 10 * Ricardo Martinez <ricardo.martinez@linux.intel.com> 11 * Sreehari Kancharla <sreehari.kancharla@intel.com> 12 * 13 * Contributors: 14 * Andy Shevchenko <andriy.shevchenko@linux.intel.com> 15 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 16 * Eliot Lee <eliot.lee@intel.com> 17 */ 18 19 #include <linux/bits.h> 20 #include <linux/bitops.h> 21 #include <linux/delay.h> 22 #include <linux/device.h> 23 #include <linux/dmapool.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/dma-direction.h> 26 #include <linux/gfp.h> 27 #include <linux/io.h> 28 #include <linux/io-64-nonatomic-lo-hi.h> 29 #include <linux/iopoll.h> 30 #include <linux/irqreturn.h> 31 #include <linux/kernel.h> 32 #include <linux/kthread.h> 33 #include <linux/list.h> 34 #include <linux/netdevice.h> 35 #include <linux/pci.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/sched.h> 38 #include <linux/skbuff.h> 39 #include <linux/slab.h> 40 #include <linux/spinlock.h> 41 #include <linux/types.h> 42 #include <linux/wait.h> 43 #include <linux/workqueue.h> 44 45 #include "t7xx_cldma.h" 46 #include "t7xx_hif_cldma.h" 47 #include "t7xx_mhccif.h" 48 #include "t7xx_pci.h" 49 #include "t7xx_pcie_mac.h" 50 #include "t7xx_port_proxy.h" 51 #include "t7xx_reg.h" 52 #include "t7xx_state_monitor.h" 53 54 #define MAX_TX_BUDGET 16 55 #define MAX_RX_BUDGET 16 56 57 #define CHECK_Q_STOP_TIMEOUT_US 1000000 58 #define CHECK_Q_STOP_STEP_US 10000 59 60 #define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header)) 61 62 static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, 63 enum mtk_txrx tx_rx, unsigned int index) 64 { 65 queue->dir = tx_rx; 66 queue->index = index; 67 queue->md_ctrl = md_ctrl; 68 queue->tr_ring = NULL; 69 queue->tr_done = NULL; 70 queue->tx_next = NULL; 71 } 72 73 static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, 74 enum mtk_txrx tx_rx, unsigned int index) 75 { 76 md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index); 77 init_waitqueue_head(&queue->req_wq); 78 spin_lock_init(&queue->ring_lock); 79 } 80 81 static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr) 82 { 83 gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr)); 84 gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr)); 85 } 86 87 static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr) 88 { 89 gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr)); 90 gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr)); 91 } 92 93 static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, 94 size_t size, gfp_t gfp_mask) 95 { 96 req->skb = __dev_alloc_skb(size, gfp_mask); 97 if (!req->skb) 98 return -ENOMEM; 99 100 req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE); 101 if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) { 102 dev_kfree_skb_any(req->skb); 103 req->skb = NULL; 104 req->mapped_buff = 0; 105 dev_err(md_ctrl->dev, "DMA mapping failed\n"); 106 return -ENOMEM; 107 } 108 109 return 0; 110 } 111 112 static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget) 113 { 114 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 115 unsigned int hwo_polling_count = 0; 116 struct t7xx_cldma_hw *hw_info; 117 bool rx_not_done = true; 118 unsigned long flags; 119 int count = 0; 120 121 hw_info = &md_ctrl->hw_info; 122 123 do { 124 struct cldma_request *req; 125 struct cldma_gpd *gpd; 126 struct sk_buff *skb; 127 int ret; 128 129 req = queue->tr_done; 130 if (!req) 131 return -ENODATA; 132 133 gpd = req->gpd; 134 if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { 135 dma_addr_t gpd_addr; 136 137 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) { 138 dev_err(md_ctrl->dev, "PCIe Link disconnected\n"); 139 return -ENODEV; 140 } 141 142 gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base + 143 REG_CLDMA_DL_CURRENT_ADDRL_0 + 144 queue->index * sizeof(u64)); 145 if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100) 146 return 0; 147 148 udelay(1); 149 continue; 150 } 151 152 hwo_polling_count = 0; 153 skb = req->skb; 154 155 if (req->mapped_buff) { 156 dma_unmap_single(md_ctrl->dev, req->mapped_buff, 157 queue->tr_ring->pkt_size, DMA_FROM_DEVICE); 158 req->mapped_buff = 0; 159 } 160 161 skb->len = 0; 162 skb_reset_tail_pointer(skb); 163 skb_put(skb, le16_to_cpu(gpd->data_buff_len)); 164 165 ret = md_ctrl->recv_skb(queue, skb); 166 /* Break processing, will try again later */ 167 if (ret < 0) 168 return ret; 169 170 req->skb = NULL; 171 t7xx_cldma_gpd_set_data_ptr(gpd, 0); 172 173 spin_lock_irqsave(&queue->ring_lock, flags); 174 queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); 175 spin_unlock_irqrestore(&queue->ring_lock, flags); 176 req = queue->rx_refill; 177 178 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL); 179 if (ret) 180 return ret; 181 182 gpd = req->gpd; 183 t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); 184 gpd->data_buff_len = 0; 185 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; 186 187 spin_lock_irqsave(&queue->ring_lock, flags); 188 queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); 189 spin_unlock_irqrestore(&queue->ring_lock, flags); 190 191 rx_not_done = ++count < budget || !need_resched(); 192 } while (rx_not_done); 193 194 *over_budget = true; 195 return 0; 196 } 197 198 static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget) 199 { 200 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 201 struct t7xx_cldma_hw *hw_info; 202 unsigned int pending_rx_int; 203 bool over_budget = false; 204 unsigned long flags; 205 int ret; 206 207 hw_info = &md_ctrl->hw_info; 208 209 do { 210 ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget); 211 if (ret == -ENODATA) 212 return 0; 213 else if (ret) 214 return ret; 215 216 pending_rx_int = 0; 217 218 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 219 if (md_ctrl->rxq_active & BIT(queue->index)) { 220 if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX)) 221 t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX); 222 223 pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index), 224 MTK_RX); 225 if (pending_rx_int) { 226 t7xx_cldma_hw_rx_done(hw_info, pending_rx_int); 227 228 if (over_budget) { 229 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 230 return -EAGAIN; 231 } 232 } 233 } 234 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 235 } while (pending_rx_int); 236 237 return 0; 238 } 239 240 static void t7xx_cldma_rx_done(struct work_struct *work) 241 { 242 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); 243 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 244 int value; 245 246 value = t7xx_cldma_gpd_rx_collect(queue, queue->budget); 247 if (value && md_ctrl->rxq_active & BIT(queue->index)) { 248 queue_work(queue->worker, &queue->cldma_work); 249 return; 250 } 251 252 t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); 253 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); 254 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); 255 pm_runtime_mark_last_busy(md_ctrl->dev); 256 pm_runtime_put_autosuspend(md_ctrl->dev); 257 } 258 259 static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) 260 { 261 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 262 unsigned int dma_len, count = 0; 263 struct cldma_request *req; 264 struct cldma_gpd *gpd; 265 unsigned long flags; 266 dma_addr_t dma_free; 267 struct sk_buff *skb; 268 269 while (!kthread_should_stop()) { 270 spin_lock_irqsave(&queue->ring_lock, flags); 271 req = queue->tr_done; 272 if (!req) { 273 spin_unlock_irqrestore(&queue->ring_lock, flags); 274 break; 275 } 276 gpd = req->gpd; 277 if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { 278 spin_unlock_irqrestore(&queue->ring_lock, flags); 279 break; 280 } 281 queue->budget++; 282 dma_free = req->mapped_buff; 283 dma_len = le16_to_cpu(gpd->data_buff_len); 284 skb = req->skb; 285 req->skb = NULL; 286 queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); 287 spin_unlock_irqrestore(&queue->ring_lock, flags); 288 289 count++; 290 dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE); 291 dev_kfree_skb_any(skb); 292 } 293 294 if (count) 295 wake_up_nr(&queue->req_wq, count); 296 297 return count; 298 } 299 300 static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue) 301 { 302 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 303 struct cldma_request *req; 304 dma_addr_t ul_curr_addr; 305 unsigned long flags; 306 bool pending_gpd; 307 308 if (!(md_ctrl->txq_active & BIT(queue->index))) 309 return; 310 311 spin_lock_irqsave(&queue->ring_lock, flags); 312 req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry); 313 spin_unlock_irqrestore(&queue->ring_lock, flags); 314 315 pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb; 316 317 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 318 if (pending_gpd) { 319 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 320 321 /* Check current processing TGPD, 64-bit address is in a table by Q index */ 322 ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 + 323 queue->index * sizeof(u64)); 324 if (req->gpd_addr != ul_curr_addr) { 325 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 326 dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n", 327 md_ctrl->hif_id, queue->index); 328 return; 329 } 330 331 t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX); 332 } 333 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 334 } 335 336 static void t7xx_cldma_tx_done(struct work_struct *work) 337 { 338 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); 339 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 340 struct t7xx_cldma_hw *hw_info; 341 unsigned int l2_tx_int; 342 unsigned long flags; 343 344 hw_info = &md_ctrl->hw_info; 345 t7xx_cldma_gpd_tx_collect(queue); 346 l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index), 347 MTK_TX); 348 if (l2_tx_int & EQ_STA_BIT(queue->index)) { 349 t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index)); 350 t7xx_cldma_txq_empty_hndl(queue); 351 } 352 353 if (l2_tx_int & BIT(queue->index)) { 354 t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index)); 355 queue_work(queue->worker, &queue->cldma_work); 356 return; 357 } 358 359 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 360 if (md_ctrl->txq_active & BIT(queue->index)) { 361 t7xx_cldma_clear_ip_busy(hw_info); 362 t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX); 363 t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX); 364 } 365 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 366 367 pm_runtime_mark_last_busy(md_ctrl->dev); 368 pm_runtime_put_autosuspend(md_ctrl->dev); 369 } 370 371 static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, 372 struct cldma_ring *ring, enum dma_data_direction tx_rx) 373 { 374 struct cldma_request *req_cur, *req_next; 375 376 list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) { 377 if (req_cur->mapped_buff && req_cur->skb) { 378 dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff, 379 ring->pkt_size, tx_rx); 380 req_cur->mapped_buff = 0; 381 } 382 383 dev_kfree_skb_any(req_cur->skb); 384 385 if (req_cur->gpd) 386 dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr); 387 388 list_del(&req_cur->entry); 389 kfree(req_cur); 390 } 391 } 392 393 static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size) 394 { 395 struct cldma_request *req; 396 int val; 397 398 req = kzalloc(sizeof(*req), GFP_KERNEL); 399 if (!req) 400 return NULL; 401 402 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); 403 if (!req->gpd) 404 goto err_free_req; 405 406 val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL); 407 if (val) 408 goto err_free_pool; 409 410 return req; 411 412 err_free_pool: 413 dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr); 414 415 err_free_req: 416 kfree(req); 417 418 return NULL; 419 } 420 421 static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) 422 { 423 struct cldma_request *req; 424 struct cldma_gpd *gpd; 425 int i; 426 427 INIT_LIST_HEAD(&ring->gpd_ring); 428 ring->length = MAX_RX_BUDGET; 429 430 for (i = 0; i < ring->length; i++) { 431 req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size); 432 if (!req) { 433 t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE); 434 return -ENOMEM; 435 } 436 437 gpd = req->gpd; 438 t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); 439 gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size); 440 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; 441 INIT_LIST_HEAD(&req->entry); 442 list_add_tail(&req->entry, &ring->gpd_ring); 443 } 444 445 /* Link previous GPD to next GPD, circular */ 446 list_for_each_entry(req, &ring->gpd_ring, entry) { 447 t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); 448 gpd = req->gpd; 449 } 450 451 return 0; 452 } 453 454 static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl) 455 { 456 struct cldma_request *req; 457 458 req = kzalloc(sizeof(*req), GFP_KERNEL); 459 if (!req) 460 return NULL; 461 462 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); 463 if (!req->gpd) { 464 kfree(req); 465 return NULL; 466 } 467 468 return req; 469 } 470 471 static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) 472 { 473 struct cldma_request *req; 474 struct cldma_gpd *gpd; 475 int i; 476 477 INIT_LIST_HEAD(&ring->gpd_ring); 478 ring->length = MAX_TX_BUDGET; 479 480 for (i = 0; i < ring->length; i++) { 481 req = t7xx_alloc_tx_request(md_ctrl); 482 if (!req) { 483 t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE); 484 return -ENOMEM; 485 } 486 487 gpd = req->gpd; 488 gpd->flags = GPD_FLAGS_IOC; 489 INIT_LIST_HEAD(&req->entry); 490 list_add_tail(&req->entry, &ring->gpd_ring); 491 } 492 493 /* Link previous GPD to next GPD, circular */ 494 list_for_each_entry(req, &ring->gpd_ring, entry) { 495 t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); 496 gpd = req->gpd; 497 } 498 499 return 0; 500 } 501 502 /** 503 * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values. 504 * @queue: Pointer to the queue structure. 505 * 506 * Called with ring_lock (unless called during initialization phase) 507 */ 508 static void t7xx_cldma_q_reset(struct cldma_queue *queue) 509 { 510 struct cldma_request *req; 511 512 req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry); 513 queue->tr_done = req; 514 queue->budget = queue->tr_ring->length; 515 516 if (queue->dir == MTK_TX) 517 queue->tx_next = req; 518 else 519 queue->rx_refill = req; 520 } 521 522 static void t7xx_cldma_rxq_init(struct cldma_queue *queue) 523 { 524 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 525 526 queue->dir = MTK_RX; 527 queue->tr_ring = &md_ctrl->rx_ring[queue->index]; 528 t7xx_cldma_q_reset(queue); 529 } 530 531 static void t7xx_cldma_txq_init(struct cldma_queue *queue) 532 { 533 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 534 535 queue->dir = MTK_TX; 536 queue->tr_ring = &md_ctrl->tx_ring[queue->index]; 537 t7xx_cldma_q_reset(queue); 538 } 539 540 static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl) 541 { 542 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); 543 } 544 545 static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl) 546 { 547 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); 548 } 549 550 static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl) 551 { 552 unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val; 553 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 554 int i; 555 556 /* L2 raw interrupt status */ 557 l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); 558 l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); 559 l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0); 560 l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0); 561 l2_tx_int &= ~l2_tx_int_msk; 562 l2_rx_int &= ~l2_rx_int_msk; 563 564 if (l2_tx_int) { 565 if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) { 566 /* Read and clear L3 TX interrupt status */ 567 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); 568 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); 569 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); 570 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); 571 } 572 573 t7xx_cldma_hw_tx_done(hw_info, l2_tx_int); 574 if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { 575 for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) { 576 if (i < CLDMA_TXQ_NUM) { 577 pm_runtime_get(md_ctrl->dev); 578 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX); 579 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX); 580 queue_work(md_ctrl->txq[i].worker, 581 &md_ctrl->txq[i].cldma_work); 582 } else { 583 t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]); 584 } 585 } 586 } 587 } 588 589 if (l2_rx_int) { 590 if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) { 591 /* Read and clear L3 RX interrupt status */ 592 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); 593 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); 594 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); 595 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); 596 } 597 598 t7xx_cldma_hw_rx_done(hw_info, l2_rx_int); 599 if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { 600 l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM; 601 for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) { 602 pm_runtime_get(md_ctrl->dev); 603 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX); 604 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX); 605 queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); 606 } 607 } 608 } 609 } 610 611 static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl) 612 { 613 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 614 unsigned int tx_active; 615 unsigned int rx_active; 616 617 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) 618 return false; 619 620 tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX); 621 rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX); 622 623 return tx_active || rx_active; 624 } 625 626 /** 627 * t7xx_cldma_stop() - Stop CLDMA. 628 * @md_ctrl: CLDMA context structure. 629 * 630 * Stop TX and RX queues. Disable L1 and L2 interrupts. 631 * Clear status registers. 632 * 633 * Return: 634 * * 0 - Success. 635 * * -ERROR - Error code from polling cldma_queues_active. 636 */ 637 int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl) 638 { 639 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 640 bool active; 641 int i, ret; 642 643 md_ctrl->rxq_active = 0; 644 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); 645 md_ctrl->txq_active = 0; 646 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); 647 md_ctrl->txq_started = 0; 648 t7xx_cldma_disable_irq(md_ctrl); 649 t7xx_cldma_hw_stop(hw_info, MTK_RX); 650 t7xx_cldma_hw_stop(hw_info, MTK_TX); 651 t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK); 652 t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK); 653 654 if (md_ctrl->is_late_init) { 655 for (i = 0; i < CLDMA_TXQ_NUM; i++) 656 flush_work(&md_ctrl->txq[i].cldma_work); 657 658 for (i = 0; i < CLDMA_RXQ_NUM; i++) 659 flush_work(&md_ctrl->rxq[i].cldma_work); 660 } 661 662 ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US, 663 CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl); 664 if (ret) 665 dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id); 666 667 return ret; 668 } 669 670 static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl) 671 { 672 int i; 673 674 if (!md_ctrl->is_late_init) 675 return; 676 677 for (i = 0; i < CLDMA_TXQ_NUM; i++) 678 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); 679 680 for (i = 0; i < CLDMA_RXQ_NUM; i++) 681 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE); 682 683 dma_pool_destroy(md_ctrl->gpd_dmapool); 684 md_ctrl->gpd_dmapool = NULL; 685 md_ctrl->is_late_init = false; 686 } 687 688 void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl) 689 { 690 unsigned long flags; 691 int i; 692 693 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 694 md_ctrl->txq_active = 0; 695 md_ctrl->rxq_active = 0; 696 t7xx_cldma_disable_irq(md_ctrl); 697 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 698 699 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 700 cancel_work_sync(&md_ctrl->txq[i].cldma_work); 701 702 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 703 md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); 704 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 705 } 706 707 for (i = 0; i < CLDMA_RXQ_NUM; i++) { 708 cancel_work_sync(&md_ctrl->rxq[i].cldma_work); 709 710 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 711 md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); 712 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 713 } 714 715 t7xx_cldma_late_release(md_ctrl); 716 } 717 718 /** 719 * t7xx_cldma_start() - Start CLDMA. 720 * @md_ctrl: CLDMA context structure. 721 * 722 * Set TX/RX start address. 723 * Start all RX queues and enable L2 interrupt. 724 */ 725 void t7xx_cldma_start(struct cldma_ctrl *md_ctrl) 726 { 727 unsigned long flags; 728 729 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 730 if (md_ctrl->is_late_init) { 731 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 732 int i; 733 734 t7xx_cldma_enable_irq(md_ctrl); 735 736 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 737 if (md_ctrl->txq[i].tr_done) 738 t7xx_cldma_hw_set_start_addr(hw_info, i, 739 md_ctrl->txq[i].tr_done->gpd_addr, 740 MTK_TX); 741 } 742 743 for (i = 0; i < CLDMA_RXQ_NUM; i++) { 744 if (md_ctrl->rxq[i].tr_done) 745 t7xx_cldma_hw_set_start_addr(hw_info, i, 746 md_ctrl->rxq[i].tr_done->gpd_addr, 747 MTK_RX); 748 } 749 750 /* Enable L2 interrupt */ 751 t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); 752 t7xx_cldma_hw_start(hw_info); 753 md_ctrl->txq_started = 0; 754 md_ctrl->txq_active |= TXRX_STATUS_BITMASK; 755 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; 756 } 757 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 758 } 759 760 static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum) 761 { 762 struct cldma_queue *txq = &md_ctrl->txq[qnum]; 763 struct cldma_request *req; 764 struct cldma_gpd *gpd; 765 unsigned long flags; 766 767 spin_lock_irqsave(&txq->ring_lock, flags); 768 t7xx_cldma_q_reset(txq); 769 list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) { 770 gpd = req->gpd; 771 gpd->flags &= ~GPD_FLAGS_HWO; 772 t7xx_cldma_gpd_set_data_ptr(gpd, 0); 773 gpd->data_buff_len = 0; 774 dev_kfree_skb_any(req->skb); 775 req->skb = NULL; 776 } 777 spin_unlock_irqrestore(&txq->ring_lock, flags); 778 } 779 780 static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum) 781 { 782 struct cldma_queue *rxq = &md_ctrl->rxq[qnum]; 783 struct cldma_request *req; 784 struct cldma_gpd *gpd; 785 unsigned long flags; 786 int ret = 0; 787 788 spin_lock_irqsave(&rxq->ring_lock, flags); 789 t7xx_cldma_q_reset(rxq); 790 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { 791 gpd = req->gpd; 792 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; 793 gpd->data_buff_len = 0; 794 795 if (req->skb) { 796 req->skb->len = 0; 797 skb_reset_tail_pointer(req->skb); 798 } 799 } 800 801 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { 802 if (req->skb) 803 continue; 804 805 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC); 806 if (ret) 807 break; 808 809 t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff); 810 } 811 spin_unlock_irqrestore(&rxq->ring_lock, flags); 812 813 return ret; 814 } 815 816 void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) 817 { 818 int i; 819 820 if (tx_rx == MTK_TX) { 821 for (i = 0; i < CLDMA_TXQ_NUM; i++) 822 t7xx_cldma_clear_txq(md_ctrl, i); 823 } else { 824 for (i = 0; i < CLDMA_RXQ_NUM; i++) 825 t7xx_cldma_clear_rxq(md_ctrl, i); 826 } 827 } 828 829 void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) 830 { 831 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 832 unsigned long flags; 833 834 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 835 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx); 836 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx); 837 if (tx_rx == MTK_RX) 838 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; 839 else 840 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; 841 t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx); 842 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 843 } 844 845 static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req, 846 struct sk_buff *skb) 847 { 848 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 849 struct cldma_gpd *gpd = tx_req->gpd; 850 unsigned long flags; 851 852 /* Update GPD */ 853 tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE); 854 855 if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) { 856 dev_err(md_ctrl->dev, "DMA mapping failed\n"); 857 return -ENOMEM; 858 } 859 860 t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff); 861 gpd->data_buff_len = cpu_to_le16(skb->len); 862 863 /* This lock must cover TGPD setting, as even without a resume operation, 864 * CLDMA can send next HWO=1 if last TGPD just finished. 865 */ 866 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 867 if (md_ctrl->txq_active & BIT(queue->index)) 868 gpd->flags |= GPD_FLAGS_HWO; 869 870 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 871 872 tx_req->skb = skb; 873 return 0; 874 } 875 876 /* Called with cldma_lock */ 877 static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, 878 struct cldma_request *prev_req) 879 { 880 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 881 882 /* Check whether the device was powered off (CLDMA start address is not set) */ 883 if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) { 884 t7xx_cldma_hw_init(hw_info); 885 t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX); 886 md_ctrl->txq_started &= ~BIT(qno); 887 } 888 889 if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) { 890 if (md_ctrl->txq_started & BIT(qno)) 891 t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX); 892 else 893 t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX); 894 895 md_ctrl->txq_started |= BIT(qno); 896 } 897 } 898 899 /** 900 * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets. 901 * @md_ctrl: CLDMA context structure. 902 * @recv_skb: Receiving skb callback. 903 */ 904 void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, 905 int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)) 906 { 907 md_ctrl->recv_skb = recv_skb; 908 } 909 910 /** 911 * t7xx_cldma_send_skb() - Send control data to modem. 912 * @md_ctrl: CLDMA context structure. 913 * @qno: Queue number. 914 * @skb: Socket buffer. 915 * 916 * Return: 917 * * 0 - Success. 918 * * -ENOMEM - Allocation failure. 919 * * -EINVAL - Invalid queue request. 920 * * -EIO - Queue is not active. 921 * * -ETIMEDOUT - Timeout waiting for the device to wake up. 922 */ 923 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb) 924 { 925 struct cldma_request *tx_req; 926 struct cldma_queue *queue; 927 unsigned long flags; 928 int ret; 929 930 if (qno >= CLDMA_TXQ_NUM) 931 return -EINVAL; 932 933 ret = pm_runtime_resume_and_get(md_ctrl->dev); 934 if (ret < 0 && ret != -EACCES) 935 return ret; 936 937 t7xx_pci_disable_sleep(md_ctrl->t7xx_dev); 938 queue = &md_ctrl->txq[qno]; 939 940 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 941 if (!(md_ctrl->txq_active & BIT(qno))) { 942 ret = -EIO; 943 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 944 goto allow_sleep; 945 } 946 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 947 948 do { 949 spin_lock_irqsave(&queue->ring_lock, flags); 950 tx_req = queue->tx_next; 951 if (queue->budget > 0 && !tx_req->skb) { 952 struct list_head *gpd_ring = &queue->tr_ring->gpd_ring; 953 954 queue->budget--; 955 t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb); 956 queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry); 957 spin_unlock_irqrestore(&queue->ring_lock, flags); 958 959 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { 960 ret = -ETIMEDOUT; 961 break; 962 } 963 964 /* Protect the access to the modem for queues operations (resume/start) 965 * which access shared locations by all the queues. 966 * cldma_lock is independent of ring_lock which is per queue. 967 */ 968 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 969 t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req); 970 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 971 972 break; 973 } 974 spin_unlock_irqrestore(&queue->ring_lock, flags); 975 976 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { 977 ret = -ETIMEDOUT; 978 break; 979 } 980 981 if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { 982 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 983 t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); 984 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 985 } 986 987 ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0); 988 } while (!ret); 989 990 allow_sleep: 991 t7xx_pci_enable_sleep(md_ctrl->t7xx_dev); 992 pm_runtime_mark_last_busy(md_ctrl->dev); 993 pm_runtime_put_autosuspend(md_ctrl->dev); 994 return ret; 995 } 996 997 static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) 998 { 999 char dma_pool_name[32]; 1000 int i, j, ret; 1001 1002 if (md_ctrl->is_late_init) { 1003 dev_err(md_ctrl->dev, "CLDMA late init was already done\n"); 1004 return -EALREADY; 1005 } 1006 1007 snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id); 1008 1009 md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev, 1010 sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0); 1011 if (!md_ctrl->gpd_dmapool) { 1012 dev_err(md_ctrl->dev, "DMA pool alloc fail\n"); 1013 return -ENOMEM; 1014 } 1015 1016 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 1017 ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]); 1018 if (ret) { 1019 dev_err(md_ctrl->dev, "control TX ring init fail\n"); 1020 goto err_free_tx_ring; 1021 } 1022 1023 md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU; 1024 } 1025 1026 for (j = 0; j < CLDMA_RXQ_NUM; j++) { 1027 md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU; 1028 1029 if (j == CLDMA_RXQ_NUM - 1) 1030 md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ; 1031 1032 ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]); 1033 if (ret) { 1034 dev_err(md_ctrl->dev, "Control RX ring init fail\n"); 1035 goto err_free_rx_ring; 1036 } 1037 } 1038 1039 for (i = 0; i < CLDMA_TXQ_NUM; i++) 1040 t7xx_cldma_txq_init(&md_ctrl->txq[i]); 1041 1042 for (j = 0; j < CLDMA_RXQ_NUM; j++) 1043 t7xx_cldma_rxq_init(&md_ctrl->rxq[j]); 1044 1045 md_ctrl->is_late_init = true; 1046 return 0; 1047 1048 err_free_rx_ring: 1049 while (j--) 1050 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE); 1051 1052 err_free_tx_ring: 1053 while (i--) 1054 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); 1055 1056 return ret; 1057 } 1058 1059 static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr) 1060 { 1061 return addr + phy_addr - addr_trs1; 1062 } 1063 1064 static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl) 1065 { 1066 struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr; 1067 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1068 u32 phy_ao_base, phy_pd_base; 1069 1070 hw_info->hw_mode = MODE_BIT_64; 1071 1072 if (md_ctrl->hif_id == CLDMA_ID_MD) { 1073 phy_ao_base = CLDMA1_AO_BASE; 1074 phy_pd_base = CLDMA1_PD_BASE; 1075 hw_info->phy_interrupt_id = CLDMA1_INT; 1076 } else { 1077 phy_ao_base = CLDMA0_AO_BASE; 1078 phy_pd_base = CLDMA0_PD_BASE; 1079 hw_info->phy_interrupt_id = CLDMA0_INT; 1080 } 1081 1082 hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, 1083 pbase->pcie_dev_reg_trsl_addr, phy_ao_base); 1084 hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, 1085 pbase->pcie_dev_reg_trsl_addr, phy_pd_base); 1086 } 1087 1088 static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) 1089 { 1090 dev_kfree_skb_any(skb); 1091 return 0; 1092 } 1093 1094 int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) 1095 { 1096 struct device *dev = &t7xx_dev->pdev->dev; 1097 struct cldma_ctrl *md_ctrl; 1098 1099 md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL); 1100 if (!md_ctrl) 1101 return -ENOMEM; 1102 1103 md_ctrl->t7xx_dev = t7xx_dev; 1104 md_ctrl->dev = dev; 1105 md_ctrl->hif_id = hif_id; 1106 md_ctrl->recv_skb = t7xx_cldma_default_recv_skb; 1107 t7xx_hw_info_init(md_ctrl); 1108 t7xx_dev->md->md_ctrl[hif_id] = md_ctrl; 1109 return 0; 1110 } 1111 1112 static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1113 { 1114 struct cldma_ctrl *md_ctrl = entity_param; 1115 struct t7xx_cldma_hw *hw_info; 1116 unsigned long flags; 1117 int qno_t; 1118 1119 hw_info = &md_ctrl->hw_info; 1120 1121 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1122 t7xx_cldma_hw_restore(hw_info); 1123 for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) { 1124 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr, 1125 MTK_TX); 1126 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr, 1127 MTK_RX); 1128 } 1129 t7xx_cldma_enable_irq(md_ctrl); 1130 t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); 1131 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; 1132 t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX); 1133 t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); 1134 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1135 } 1136 1137 static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1138 { 1139 struct cldma_ctrl *md_ctrl = entity_param; 1140 unsigned long flags; 1141 1142 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1143 md_ctrl->txq_active |= TXRX_STATUS_BITMASK; 1144 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); 1145 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); 1146 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1147 1148 if (md_ctrl->hif_id == CLDMA_ID_MD) 1149 t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK); 1150 1151 return 0; 1152 } 1153 1154 static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1155 { 1156 struct cldma_ctrl *md_ctrl = entity_param; 1157 struct t7xx_cldma_hw *hw_info; 1158 unsigned long flags; 1159 1160 hw_info = &md_ctrl->hw_info; 1161 1162 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1163 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX); 1164 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); 1165 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; 1166 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); 1167 t7xx_cldma_clear_ip_busy(hw_info); 1168 t7xx_cldma_disable_irq(md_ctrl); 1169 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1170 } 1171 1172 static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1173 { 1174 struct cldma_ctrl *md_ctrl = entity_param; 1175 struct t7xx_cldma_hw *hw_info; 1176 unsigned long flags; 1177 1178 if (md_ctrl->hif_id == CLDMA_ID_MD) 1179 t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); 1180 1181 hw_info = &md_ctrl->hw_info; 1182 1183 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1184 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX); 1185 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX); 1186 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; 1187 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); 1188 md_ctrl->txq_started = 0; 1189 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1190 1191 return 0; 1192 } 1193 1194 static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl) 1195 { 1196 md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL); 1197 if (!md_ctrl->pm_entity) 1198 return -ENOMEM; 1199 1200 md_ctrl->pm_entity->entity_param = md_ctrl; 1201 1202 if (md_ctrl->hif_id == CLDMA_ID_MD) 1203 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1; 1204 else 1205 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2; 1206 1207 md_ctrl->pm_entity->suspend = t7xx_cldma_suspend; 1208 md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late; 1209 md_ctrl->pm_entity->resume = t7xx_cldma_resume; 1210 md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early; 1211 1212 return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity); 1213 } 1214 1215 static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl) 1216 { 1217 if (!md_ctrl->pm_entity) 1218 return -EINVAL; 1219 1220 t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity); 1221 kfree(md_ctrl->pm_entity); 1222 md_ctrl->pm_entity = NULL; 1223 return 0; 1224 } 1225 1226 void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) 1227 { 1228 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1229 unsigned long flags; 1230 1231 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1232 t7xx_cldma_hw_stop(hw_info, MTK_TX); 1233 t7xx_cldma_hw_stop(hw_info, MTK_RX); 1234 t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); 1235 t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); 1236 t7xx_cldma_hw_init(hw_info); 1237 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1238 } 1239 1240 static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data) 1241 { 1242 struct cldma_ctrl *md_ctrl = data; 1243 u32 interrupt; 1244 1245 interrupt = md_ctrl->hw_info.phy_interrupt_id; 1246 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt); 1247 t7xx_cldma_irq_work_cb(md_ctrl); 1248 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt); 1249 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt); 1250 return IRQ_HANDLED; 1251 } 1252 1253 static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl) 1254 { 1255 int i; 1256 1257 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 1258 if (md_ctrl->txq[i].worker) { 1259 destroy_workqueue(md_ctrl->txq[i].worker); 1260 md_ctrl->txq[i].worker = NULL; 1261 } 1262 } 1263 1264 for (i = 0; i < CLDMA_RXQ_NUM; i++) { 1265 if (md_ctrl->rxq[i].worker) { 1266 destroy_workqueue(md_ctrl->rxq[i].worker); 1267 md_ctrl->rxq[i].worker = NULL; 1268 } 1269 } 1270 } 1271 1272 /** 1273 * t7xx_cldma_init() - Initialize CLDMA. 1274 * @md_ctrl: CLDMA context structure. 1275 * 1276 * Allocate and initialize device power management entity. 1277 * Initialize HIF TX/RX queue structure. 1278 * Register CLDMA callback ISR with PCIe driver. 1279 * 1280 * Return: 1281 * * 0 - Success. 1282 * * -ERROR - Error code from failure sub-initializations. 1283 */ 1284 int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) 1285 { 1286 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1287 int ret, i; 1288 1289 md_ctrl->txq_active = 0; 1290 md_ctrl->rxq_active = 0; 1291 md_ctrl->is_late_init = false; 1292 1293 ret = t7xx_cldma_pm_init(md_ctrl); 1294 if (ret) 1295 return ret; 1296 1297 spin_lock_init(&md_ctrl->cldma_lock); 1298 1299 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 1300 md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); 1301 md_ctrl->txq[i].worker = 1302 alloc_ordered_workqueue("md_hif%d_tx%d_worker", 1303 WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), 1304 md_ctrl->hif_id, i); 1305 if (!md_ctrl->txq[i].worker) 1306 goto err_workqueue; 1307 1308 INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done); 1309 } 1310 1311 for (i = 0; i < CLDMA_RXQ_NUM; i++) { 1312 md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); 1313 INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); 1314 1315 md_ctrl->rxq[i].worker = 1316 alloc_ordered_workqueue("md_hif%d_rx%d_worker", 1317 WQ_MEM_RECLAIM, 1318 md_ctrl->hif_id, i); 1319 if (!md_ctrl->rxq[i].worker) 1320 goto err_workqueue; 1321 } 1322 1323 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); 1324 md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler; 1325 md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL; 1326 md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl; 1327 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); 1328 return 0; 1329 1330 err_workqueue: 1331 t7xx_cldma_destroy_wqs(md_ctrl); 1332 t7xx_cldma_pm_uninit(md_ctrl); 1333 return -ENOMEM; 1334 } 1335 1336 void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl) 1337 { 1338 t7xx_cldma_late_release(md_ctrl); 1339 t7xx_cldma_late_init(md_ctrl); 1340 } 1341 1342 void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl) 1343 { 1344 t7xx_cldma_stop(md_ctrl); 1345 t7xx_cldma_late_release(md_ctrl); 1346 t7xx_cldma_destroy_wqs(md_ctrl); 1347 t7xx_cldma_pm_uninit(md_ctrl); 1348 } 1349