1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mtu3_qmu.c - Queue Management Unit driver for device controller 4 * 5 * Copyright (C) 2016 MediaTek Inc. 6 * 7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 8 */ 9 10 /* 11 * Queue Management Unit (QMU) is designed to unload SW effort 12 * to serve DMA interrupts. 13 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD), 14 * SW links data buffers and triggers QMU to send / receive data to 15 * host / from device at a time. 16 * And now only GPD is supported. 17 * 18 * For more detailed information, please refer to QMU Programming Guide 19 */ 20 21 #include <linux/dmapool.h> 22 #include <linux/iopoll.h> 23 24 #include "mtu3.h" 25 26 #define QMU_CHECKSUM_LEN 16 27 28 #define GPD_FLAGS_HWO BIT(0) 29 #define GPD_FLAGS_BDP BIT(1) 30 #define GPD_FLAGS_BPS BIT(2) 31 #define GPD_FLAGS_IOC BIT(7) 32 #define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO) 33 34 #define GPD_RX_BUF_LEN(x) (((x) & 0xffff) << 16) 35 #define GPD_DATA_LEN(x) ((x) & 0xffff) 36 #define GPD_EXT_FLAG_ZLP BIT(29) 37 #define GPD_EXT_NGP(x) (((x) & 0xf) << 20) 38 #define GPD_EXT_BUF(x) (((x) & 0xf) << 16) 39 40 #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo)) 41 #define HILO_DMA(hi, lo) \ 42 ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo)))) 43 44 static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum) 45 { 46 u32 txcpr; 47 u32 txhiar; 48 49 txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum)); 50 txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum)); 51 52 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr); 53 } 54 55 static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum) 56 { 57 u32 rxcpr; 58 u32 rxhiar; 59 60 rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum)); 61 rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum)); 62 63 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr); 64 } 65 66 static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma) 67 { 68 u32 tqhiar; 69 70 mtu3_writel(mbase, USB_QMU_TQSAR(epnum), 71 cpu_to_le32(lower_32_bits(dma))); 72 tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum)); 73 tqhiar &= ~QMU_START_ADDR_HI_MSK; 74 tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma)); 75 mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar); 76 } 77 78 static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma) 79 { 80 u32 rqhiar; 81 82 mtu3_writel(mbase, USB_QMU_RQSAR(epnum), 83 cpu_to_le32(lower_32_bits(dma))); 84 rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum)); 85 rqhiar &= ~QMU_START_ADDR_HI_MSK; 86 rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma)); 87 mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar); 88 } 89 90 static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring, 91 dma_addr_t dma_addr) 92 { 93 dma_addr_t dma_base = ring->dma; 94 struct qmu_gpd *gpd_head = ring->start; 95 u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head); 96 97 if (offset >= MAX_GPD_NUM) 98 return NULL; 99 100 return gpd_head + offset; 101 } 102 103 static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring, 104 struct qmu_gpd *gpd) 105 { 106 dma_addr_t dma_base = ring->dma; 107 struct qmu_gpd *gpd_head = ring->start; 108 u32 offset; 109 110 offset = gpd - gpd_head; 111 if (offset >= MAX_GPD_NUM) 112 return 0; 113 114 return dma_base + (offset * sizeof(*gpd)); 115 } 116 117 static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) 118 { 119 ring->start = gpd; 120 ring->enqueue = gpd; 121 ring->dequeue = gpd; 122 ring->end = gpd + MAX_GPD_NUM - 1; 123 } 124 125 static void reset_gpd_list(struct mtu3_ep *mep) 126 { 127 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 128 struct qmu_gpd *gpd = ring->start; 129 130 if (gpd) { 131 gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); 132 gpd_ring_init(ring, gpd); 133 } 134 } 135 136 int mtu3_gpd_ring_alloc(struct mtu3_ep *mep) 137 { 138 struct qmu_gpd *gpd; 139 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 140 141 /* software own all gpds as default */ 142 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); 143 if (gpd == NULL) 144 return -ENOMEM; 145 146 gpd_ring_init(ring, gpd); 147 148 return 0; 149 } 150 151 void mtu3_gpd_ring_free(struct mtu3_ep *mep) 152 { 153 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 154 155 dma_pool_free(mep->mtu->qmu_gpd_pool, 156 ring->start, ring->dma); 157 memset(ring, 0, sizeof(*ring)); 158 } 159 160 void mtu3_qmu_resume(struct mtu3_ep *mep) 161 { 162 struct mtu3 *mtu = mep->mtu; 163 void __iomem *mbase = mtu->mac_base; 164 int epnum = mep->epnum; 165 u32 offset; 166 167 offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); 168 169 mtu3_writel(mbase, offset, QMU_Q_RESUME); 170 if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE)) 171 mtu3_writel(mbase, offset, QMU_Q_RESUME); 172 } 173 174 static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring) 175 { 176 if (ring->enqueue < ring->end) 177 ring->enqueue++; 178 else 179 ring->enqueue = ring->start; 180 181 return ring->enqueue; 182 } 183 184 static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring) 185 { 186 if (ring->dequeue < ring->end) 187 ring->dequeue++; 188 else 189 ring->dequeue = ring->start; 190 191 return ring->dequeue; 192 } 193 194 /* check if a ring is emtpy */ 195 static int gpd_ring_empty(struct mtu3_gpd_ring *ring) 196 { 197 struct qmu_gpd *enq = ring->enqueue; 198 struct qmu_gpd *next; 199 200 if (ring->enqueue < ring->end) 201 next = enq + 1; 202 else 203 next = ring->start; 204 205 /* one gpd is reserved to simplify gpd preparation */ 206 return next == ring->dequeue; 207 } 208 209 int mtu3_prepare_transfer(struct mtu3_ep *mep) 210 { 211 return gpd_ring_empty(&mep->gpd_ring); 212 } 213 214 static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 215 { 216 struct qmu_gpd *enq; 217 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 218 struct qmu_gpd *gpd = ring->enqueue; 219 struct usb_request *req = &mreq->request; 220 dma_addr_t enq_dma; 221 u32 ext_addr; 222 223 gpd->dw0_info = 0; /* SW own it */ 224 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); 225 ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma)); 226 gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(req->length)); 227 228 /* get the next GPD */ 229 enq = advance_enq_gpd(ring); 230 enq_dma = gpd_virt_to_dma(ring, enq); 231 dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", 232 mep->epnum, gpd, enq, &enq_dma); 233 234 enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); 235 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); 236 ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma)); 237 gpd->dw0_info = cpu_to_le32(ext_addr); 238 239 if (req->zero) 240 gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP); 241 242 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); 243 244 mreq->gpd = gpd; 245 246 return 0; 247 } 248 249 static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 250 { 251 struct qmu_gpd *enq; 252 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 253 struct qmu_gpd *gpd = ring->enqueue; 254 struct usb_request *req = &mreq->request; 255 dma_addr_t enq_dma; 256 u32 ext_addr; 257 258 gpd->dw0_info = 0; /* SW own it */ 259 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); 260 ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma)); 261 gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(req->length)); 262 263 /* get the next GPD */ 264 enq = advance_enq_gpd(ring); 265 enq_dma = gpd_virt_to_dma(ring, enq); 266 dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", 267 mep->epnum, gpd, enq, &enq_dma); 268 269 enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); 270 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); 271 ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma)); 272 gpd->dw3_info = cpu_to_le32(ext_addr); 273 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); 274 275 mreq->gpd = gpd; 276 277 return 0; 278 } 279 280 void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 281 { 282 283 if (mep->is_in) 284 mtu3_prepare_tx_gpd(mep, mreq); 285 else 286 mtu3_prepare_rx_gpd(mep, mreq); 287 } 288 289 int mtu3_qmu_start(struct mtu3_ep *mep) 290 { 291 struct mtu3 *mtu = mep->mtu; 292 void __iomem *mbase = mtu->mac_base; 293 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 294 u8 epnum = mep->epnum; 295 296 if (mep->is_in) { 297 /* set QMU start address */ 298 write_txq_start_addr(mbase, epnum, ring->dma); 299 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN); 300 /* send zero length packet according to ZLP flag in GPD */ 301 mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum)); 302 mtu3_writel(mbase, U3D_TQERRIESR0, 303 QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum)); 304 305 if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) { 306 dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum); 307 return 0; 308 } 309 mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START); 310 311 } else { 312 write_rxq_start_addr(mbase, epnum, ring->dma); 313 mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN); 314 /* don't expect ZLP */ 315 mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum)); 316 /* move to next GPD when receive ZLP */ 317 mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum)); 318 mtu3_writel(mbase, U3D_RQERRIESR0, 319 QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum)); 320 mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum)); 321 322 if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) { 323 dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum); 324 return 0; 325 } 326 mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START); 327 } 328 329 return 0; 330 } 331 332 /* may called in atomic context */ 333 void mtu3_qmu_stop(struct mtu3_ep *mep) 334 { 335 struct mtu3 *mtu = mep->mtu; 336 void __iomem *mbase = mtu->mac_base; 337 int epnum = mep->epnum; 338 u32 value = 0; 339 u32 qcsr; 340 int ret; 341 342 qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); 343 344 if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) { 345 dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name); 346 return; 347 } 348 mtu3_writel(mbase, qcsr, QMU_Q_STOP); 349 350 ret = readl_poll_timeout_atomic(mbase + qcsr, value, 351 !(value & QMU_Q_ACTIVE), 1, 1000); 352 if (ret) { 353 dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name); 354 return; 355 } 356 357 dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name); 358 } 359 360 void mtu3_qmu_flush(struct mtu3_ep *mep) 361 { 362 363 dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__, 364 ((mep->is_in) ? "TX" : "RX")); 365 366 /*Stop QMU */ 367 mtu3_qmu_stop(mep); 368 reset_gpd_list(mep); 369 } 370 371 /* 372 * QMU can't transfer zero length packet directly (a hardware limit 373 * on old SoCs), so when needs to send ZLP, we intentionally trigger 374 * a length error interrupt, and in the ISR sends a ZLP by BMU. 375 */ 376 static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum) 377 { 378 struct mtu3_ep *mep = mtu->in_eps + epnum; 379 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 380 void __iomem *mbase = mtu->mac_base; 381 struct qmu_gpd *gpd_current = NULL; 382 struct mtu3_request *mreq; 383 dma_addr_t cur_gpd_dma; 384 u32 txcsr = 0; 385 int ret; 386 387 mreq = next_request(mep); 388 if (mreq && mreq->request.length != 0) 389 return; 390 391 cur_gpd_dma = read_txq_cur_addr(mbase, epnum); 392 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); 393 394 if (GPD_DATA_LEN(le32_to_cpu(gpd_current->dw3_info)) != 0) { 395 dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum); 396 return; 397 } 398 399 dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq); 400 401 mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); 402 403 ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum), 404 txcsr, !(txcsr & TX_FIFOFULL), 1, 1000); 405 if (ret) { 406 dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__); 407 return; 408 } 409 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY); 410 411 /* by pass the current GDP */ 412 gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO); 413 414 /*enable DMAREQEN, switch back to QMU mode */ 415 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); 416 mtu3_qmu_resume(mep); 417 } 418 419 /* 420 * NOTE: request list maybe is already empty as following case: 421 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)--> 422 * queue_tx --> process_tasklet(meanwhile, the second one is transferred, 423 * tasklet process both of them)-->qmu_interrupt for second one. 424 * To avoid upper case, put qmu_done_tx in ISR directly to process it. 425 */ 426 static void qmu_done_tx(struct mtu3 *mtu, u8 epnum) 427 { 428 struct mtu3_ep *mep = mtu->in_eps + epnum; 429 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 430 void __iomem *mbase = mtu->mac_base; 431 struct qmu_gpd *gpd = ring->dequeue; 432 struct qmu_gpd *gpd_current = NULL; 433 struct usb_request *request = NULL; 434 struct mtu3_request *mreq; 435 dma_addr_t cur_gpd_dma; 436 437 /*transfer phy address got from QMU register to virtual address */ 438 cur_gpd_dma = read_txq_cur_addr(mbase, epnum); 439 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); 440 441 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", 442 __func__, epnum, gpd, gpd_current, ring->enqueue); 443 444 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) { 445 446 mreq = next_request(mep); 447 448 if (mreq == NULL || mreq->gpd != gpd) { 449 dev_err(mtu->dev, "no correct TX req is found\n"); 450 break; 451 } 452 453 request = &mreq->request; 454 request->actual = GPD_DATA_LEN(le32_to_cpu(gpd->dw3_info)); 455 mtu3_req_complete(mep, request, 0); 456 457 gpd = advance_deq_gpd(ring); 458 } 459 460 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", 461 __func__, epnum, ring->dequeue, ring->enqueue); 462 463 } 464 465 static void qmu_done_rx(struct mtu3 *mtu, u8 epnum) 466 { 467 struct mtu3_ep *mep = mtu->out_eps + epnum; 468 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 469 void __iomem *mbase = mtu->mac_base; 470 struct qmu_gpd *gpd = ring->dequeue; 471 struct qmu_gpd *gpd_current = NULL; 472 struct usb_request *req = NULL; 473 struct mtu3_request *mreq; 474 dma_addr_t cur_gpd_dma; 475 476 cur_gpd_dma = read_rxq_cur_addr(mbase, epnum); 477 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); 478 479 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", 480 __func__, epnum, gpd, gpd_current, ring->enqueue); 481 482 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) { 483 484 mreq = next_request(mep); 485 486 if (mreq == NULL || mreq->gpd != gpd) { 487 dev_err(mtu->dev, "no correct RX req is found\n"); 488 break; 489 } 490 req = &mreq->request; 491 492 req->actual = GPD_DATA_LEN(le32_to_cpu(gpd->dw3_info)); 493 mtu3_req_complete(mep, req, 0); 494 495 gpd = advance_deq_gpd(ring); 496 } 497 498 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", 499 __func__, epnum, ring->dequeue, ring->enqueue); 500 } 501 502 static void qmu_done_isr(struct mtu3 *mtu, u32 done_status) 503 { 504 int i; 505 506 for (i = 1; i < mtu->num_eps; i++) { 507 if (done_status & QMU_RX_DONE_INT(i)) 508 qmu_done_rx(mtu, i); 509 if (done_status & QMU_TX_DONE_INT(i)) 510 qmu_done_tx(mtu, i); 511 } 512 } 513 514 static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status) 515 { 516 void __iomem *mbase = mtu->mac_base; 517 u32 errval; 518 int i; 519 520 if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) { 521 errval = mtu3_readl(mbase, U3D_RQERRIR0); 522 for (i = 1; i < mtu->num_eps; i++) { 523 if (errval & QMU_RX_CS_ERR(i)) 524 dev_err(mtu->dev, "Rx %d CS error!\n", i); 525 526 if (errval & QMU_RX_LEN_ERR(i)) 527 dev_err(mtu->dev, "RX %d Length error\n", i); 528 } 529 mtu3_writel(mbase, U3D_RQERRIR0, errval); 530 } 531 532 if (qmu_status & RXQ_ZLPERR_INT) { 533 errval = mtu3_readl(mbase, U3D_RQERRIR1); 534 for (i = 1; i < mtu->num_eps; i++) { 535 if (errval & QMU_RX_ZLP_ERR(i)) 536 dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i); 537 } 538 mtu3_writel(mbase, U3D_RQERRIR1, errval); 539 } 540 541 if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) { 542 errval = mtu3_readl(mbase, U3D_TQERRIR0); 543 for (i = 1; i < mtu->num_eps; i++) { 544 if (errval & QMU_TX_CS_ERR(i)) 545 dev_err(mtu->dev, "Tx %d checksum error!\n", i); 546 547 if (errval & QMU_TX_LEN_ERR(i)) 548 qmu_tx_zlp_error_handler(mtu, i); 549 } 550 mtu3_writel(mbase, U3D_TQERRIR0, errval); 551 } 552 } 553 554 irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu) 555 { 556 void __iomem *mbase = mtu->mac_base; 557 u32 qmu_status; 558 u32 qmu_done_status; 559 560 /* U3D_QISAR1 is read update */ 561 qmu_status = mtu3_readl(mbase, U3D_QISAR1); 562 qmu_status &= mtu3_readl(mbase, U3D_QIER1); 563 564 qmu_done_status = mtu3_readl(mbase, U3D_QISAR0); 565 qmu_done_status &= mtu3_readl(mbase, U3D_QIER0); 566 mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */ 567 dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n", 568 (qmu_done_status & 0xFFFF), qmu_done_status >> 16, 569 qmu_status); 570 571 if (qmu_done_status) 572 qmu_done_isr(mtu, qmu_done_status); 573 574 if (qmu_status) 575 qmu_exception_isr(mtu, qmu_status); 576 577 return IRQ_HANDLED; 578 } 579 580 int mtu3_qmu_init(struct mtu3 *mtu) 581 { 582 583 compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B"); 584 585 mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev, 586 QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0); 587 588 if (!mtu->qmu_gpd_pool) 589 return -ENOMEM; 590 591 return 0; 592 } 593 594 void mtu3_qmu_exit(struct mtu3 *mtu) 595 { 596 dma_pool_destroy(mtu->qmu_gpd_pool); 597 } 598