1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mtu3_qmu.c - Queue Management Unit driver for device controller 4 * 5 * Copyright (C) 2016 MediaTek Inc. 6 * 7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com> 8 */ 9 10 /* 11 * Queue Management Unit (QMU) is designed to unload SW effort 12 * to serve DMA interrupts. 13 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD), 14 * SW links data buffers and triggers QMU to send / receive data to 15 * host / from device at a time. 16 * And now only GPD is supported. 17 * 18 * For more detailed information, please refer to QMU Programming Guide 19 */ 20 21 #include <linux/dmapool.h> 22 #include <linux/iopoll.h> 23 24 #include "mtu3.h" 25 26 #define QMU_CHECKSUM_LEN 16 27 28 #define GPD_FLAGS_HWO BIT(0) 29 #define GPD_FLAGS_BDP BIT(1) 30 #define GPD_FLAGS_BPS BIT(2) 31 #define GPD_FLAGS_IOC BIT(7) 32 33 #define GPD_EXT_FLAG_ZLP BIT(5) 34 #define GPD_EXT_NGP(x) (((x) & 0xf) << 4) 35 #define GPD_EXT_BUF(x) (((x) & 0xf) << 0) 36 37 #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo)) 38 #define HILO_DMA(hi, lo) \ 39 ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo)))) 40 41 static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum) 42 { 43 u32 txcpr; 44 u32 txhiar; 45 46 txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum)); 47 txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum)); 48 49 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr); 50 } 51 52 static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum) 53 { 54 u32 rxcpr; 55 u32 rxhiar; 56 57 rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum)); 58 rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum)); 59 60 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr); 61 } 62 63 static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma) 64 { 65 u32 tqhiar; 66 67 mtu3_writel(mbase, USB_QMU_TQSAR(epnum), 68 cpu_to_le32(lower_32_bits(dma))); 69 tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum)); 70 tqhiar &= ~QMU_START_ADDR_HI_MSK; 71 tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma)); 72 mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar); 73 } 74 75 static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma) 76 { 77 u32 rqhiar; 78 79 mtu3_writel(mbase, USB_QMU_RQSAR(epnum), 80 cpu_to_le32(lower_32_bits(dma))); 81 rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum)); 82 rqhiar &= ~QMU_START_ADDR_HI_MSK; 83 rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma)); 84 mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar); 85 } 86 87 static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring, 88 dma_addr_t dma_addr) 89 { 90 dma_addr_t dma_base = ring->dma; 91 struct qmu_gpd *gpd_head = ring->start; 92 u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head); 93 94 if (offset >= MAX_GPD_NUM) 95 return NULL; 96 97 return gpd_head + offset; 98 } 99 100 static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring, 101 struct qmu_gpd *gpd) 102 { 103 dma_addr_t dma_base = ring->dma; 104 struct qmu_gpd *gpd_head = ring->start; 105 u32 offset; 106 107 offset = gpd - gpd_head; 108 if (offset >= MAX_GPD_NUM) 109 return 0; 110 111 return dma_base + (offset * sizeof(*gpd)); 112 } 113 114 static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) 115 { 116 ring->start = gpd; 117 ring->enqueue = gpd; 118 ring->dequeue = gpd; 119 ring->end = gpd + MAX_GPD_NUM - 1; 120 } 121 122 static void reset_gpd_list(struct mtu3_ep *mep) 123 { 124 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 125 struct qmu_gpd *gpd = ring->start; 126 127 if (gpd) { 128 gpd->flag &= ~GPD_FLAGS_HWO; 129 gpd_ring_init(ring, gpd); 130 } 131 } 132 133 int mtu3_gpd_ring_alloc(struct mtu3_ep *mep) 134 { 135 struct qmu_gpd *gpd; 136 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 137 138 /* software own all gpds as default */ 139 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); 140 if (gpd == NULL) 141 return -ENOMEM; 142 143 gpd_ring_init(ring, gpd); 144 145 return 0; 146 } 147 148 void mtu3_gpd_ring_free(struct mtu3_ep *mep) 149 { 150 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 151 152 dma_pool_free(mep->mtu->qmu_gpd_pool, 153 ring->start, ring->dma); 154 memset(ring, 0, sizeof(*ring)); 155 } 156 157 void mtu3_qmu_resume(struct mtu3_ep *mep) 158 { 159 struct mtu3 *mtu = mep->mtu; 160 void __iomem *mbase = mtu->mac_base; 161 int epnum = mep->epnum; 162 u32 offset; 163 164 offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); 165 166 mtu3_writel(mbase, offset, QMU_Q_RESUME); 167 if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE)) 168 mtu3_writel(mbase, offset, QMU_Q_RESUME); 169 } 170 171 static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring) 172 { 173 if (ring->enqueue < ring->end) 174 ring->enqueue++; 175 else 176 ring->enqueue = ring->start; 177 178 return ring->enqueue; 179 } 180 181 static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring) 182 { 183 if (ring->dequeue < ring->end) 184 ring->dequeue++; 185 else 186 ring->dequeue = ring->start; 187 188 return ring->dequeue; 189 } 190 191 /* check if a ring is emtpy */ 192 static int gpd_ring_empty(struct mtu3_gpd_ring *ring) 193 { 194 struct qmu_gpd *enq = ring->enqueue; 195 struct qmu_gpd *next; 196 197 if (ring->enqueue < ring->end) 198 next = enq + 1; 199 else 200 next = ring->start; 201 202 /* one gpd is reserved to simplify gpd preparation */ 203 return next == ring->dequeue; 204 } 205 206 int mtu3_prepare_transfer(struct mtu3_ep *mep) 207 { 208 return gpd_ring_empty(&mep->gpd_ring); 209 } 210 211 static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 212 { 213 struct qmu_gpd *enq; 214 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 215 struct qmu_gpd *gpd = ring->enqueue; 216 struct usb_request *req = &mreq->request; 217 dma_addr_t enq_dma; 218 u16 ext_addr; 219 220 /* set all fields to zero as default value */ 221 memset(gpd, 0, sizeof(*gpd)); 222 223 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); 224 ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma)); 225 gpd->buf_len = cpu_to_le16(req->length); 226 gpd->flag |= GPD_FLAGS_IOC; 227 228 /* get the next GPD */ 229 enq = advance_enq_gpd(ring); 230 enq_dma = gpd_virt_to_dma(ring, enq); 231 dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", 232 mep->epnum, gpd, enq, &enq_dma); 233 234 enq->flag &= ~GPD_FLAGS_HWO; 235 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); 236 ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma)); 237 gpd->tx_ext_addr = cpu_to_le16(ext_addr); 238 239 if (req->zero) 240 gpd->ext_flag |= GPD_EXT_FLAG_ZLP; 241 242 gpd->flag |= GPD_FLAGS_HWO; 243 244 mreq->gpd = gpd; 245 246 return 0; 247 } 248 249 static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 250 { 251 struct qmu_gpd *enq; 252 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 253 struct qmu_gpd *gpd = ring->enqueue; 254 struct usb_request *req = &mreq->request; 255 dma_addr_t enq_dma; 256 u16 ext_addr; 257 258 /* set all fields to zero as default value */ 259 memset(gpd, 0, sizeof(*gpd)); 260 261 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); 262 ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma)); 263 gpd->data_buf_len = cpu_to_le16(req->length); 264 gpd->flag |= GPD_FLAGS_IOC; 265 266 /* get the next GPD */ 267 enq = advance_enq_gpd(ring); 268 enq_dma = gpd_virt_to_dma(ring, enq); 269 dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n", 270 mep->epnum, gpd, enq, &enq_dma); 271 272 enq->flag &= ~GPD_FLAGS_HWO; 273 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); 274 ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma)); 275 gpd->rx_ext_addr = cpu_to_le16(ext_addr); 276 gpd->flag |= GPD_FLAGS_HWO; 277 278 mreq->gpd = gpd; 279 280 return 0; 281 } 282 283 void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq) 284 { 285 286 if (mep->is_in) 287 mtu3_prepare_tx_gpd(mep, mreq); 288 else 289 mtu3_prepare_rx_gpd(mep, mreq); 290 } 291 292 int mtu3_qmu_start(struct mtu3_ep *mep) 293 { 294 struct mtu3 *mtu = mep->mtu; 295 void __iomem *mbase = mtu->mac_base; 296 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 297 u8 epnum = mep->epnum; 298 299 if (mep->is_in) { 300 /* set QMU start address */ 301 write_txq_start_addr(mbase, epnum, ring->dma); 302 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN); 303 /* send zero length packet according to ZLP flag in GPD */ 304 mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum)); 305 mtu3_writel(mbase, U3D_TQERRIESR0, 306 QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum)); 307 308 if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) { 309 dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum); 310 return 0; 311 } 312 mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START); 313 314 } else { 315 write_rxq_start_addr(mbase, epnum, ring->dma); 316 mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN); 317 /* don't expect ZLP */ 318 mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum)); 319 /* move to next GPD when receive ZLP */ 320 mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum)); 321 mtu3_writel(mbase, U3D_RQERRIESR0, 322 QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum)); 323 mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum)); 324 325 if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) { 326 dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum); 327 return 0; 328 } 329 mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START); 330 } 331 332 return 0; 333 } 334 335 /* may called in atomic context */ 336 void mtu3_qmu_stop(struct mtu3_ep *mep) 337 { 338 struct mtu3 *mtu = mep->mtu; 339 void __iomem *mbase = mtu->mac_base; 340 int epnum = mep->epnum; 341 u32 value = 0; 342 u32 qcsr; 343 int ret; 344 345 qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum); 346 347 if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) { 348 dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name); 349 return; 350 } 351 mtu3_writel(mbase, qcsr, QMU_Q_STOP); 352 353 ret = readl_poll_timeout_atomic(mbase + qcsr, value, 354 !(value & QMU_Q_ACTIVE), 1, 1000); 355 if (ret) { 356 dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name); 357 return; 358 } 359 360 dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name); 361 } 362 363 void mtu3_qmu_flush(struct mtu3_ep *mep) 364 { 365 366 dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__, 367 ((mep->is_in) ? "TX" : "RX")); 368 369 /*Stop QMU */ 370 mtu3_qmu_stop(mep); 371 reset_gpd_list(mep); 372 } 373 374 /* 375 * QMU can't transfer zero length packet directly (a hardware limit 376 * on old SoCs), so when needs to send ZLP, we intentionally trigger 377 * a length error interrupt, and in the ISR sends a ZLP by BMU. 378 */ 379 static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum) 380 { 381 struct mtu3_ep *mep = mtu->in_eps + epnum; 382 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 383 void __iomem *mbase = mtu->mac_base; 384 struct qmu_gpd *gpd_current = NULL; 385 struct usb_request *req = NULL; 386 struct mtu3_request *mreq; 387 dma_addr_t cur_gpd_dma; 388 u32 txcsr = 0; 389 int ret; 390 391 mreq = next_request(mep); 392 if (mreq && mreq->request.length == 0) 393 req = &mreq->request; 394 else 395 return; 396 397 cur_gpd_dma = read_txq_cur_addr(mbase, epnum); 398 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); 399 400 if (le16_to_cpu(gpd_current->buf_len) != 0) { 401 dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum); 402 return; 403 } 404 405 dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, req); 406 407 mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); 408 409 ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum), 410 txcsr, !(txcsr & TX_FIFOFULL), 1, 1000); 411 if (ret) { 412 dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__); 413 return; 414 } 415 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY); 416 417 /* by pass the current GDP */ 418 gpd_current->flag |= GPD_FLAGS_BPS; 419 gpd_current->flag |= GPD_FLAGS_HWO; 420 421 /*enable DMAREQEN, switch back to QMU mode */ 422 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN); 423 mtu3_qmu_resume(mep); 424 } 425 426 /* 427 * NOTE: request list maybe is already empty as following case: 428 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)--> 429 * queue_tx --> process_tasklet(meanwhile, the second one is transferred, 430 * tasklet process both of them)-->qmu_interrupt for second one. 431 * To avoid upper case, put qmu_done_tx in ISR directly to process it. 432 */ 433 static void qmu_done_tx(struct mtu3 *mtu, u8 epnum) 434 { 435 struct mtu3_ep *mep = mtu->in_eps + epnum; 436 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 437 void __iomem *mbase = mtu->mac_base; 438 struct qmu_gpd *gpd = ring->dequeue; 439 struct qmu_gpd *gpd_current = NULL; 440 struct usb_request *request = NULL; 441 struct mtu3_request *mreq; 442 dma_addr_t cur_gpd_dma; 443 444 /*transfer phy address got from QMU register to virtual address */ 445 cur_gpd_dma = read_txq_cur_addr(mbase, epnum); 446 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); 447 448 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", 449 __func__, epnum, gpd, gpd_current, ring->enqueue); 450 451 while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { 452 453 mreq = next_request(mep); 454 455 if (mreq == NULL || mreq->gpd != gpd) { 456 dev_err(mtu->dev, "no correct TX req is found\n"); 457 break; 458 } 459 460 request = &mreq->request; 461 request->actual = le16_to_cpu(gpd->buf_len); 462 mtu3_req_complete(mep, request, 0); 463 464 gpd = advance_deq_gpd(ring); 465 } 466 467 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", 468 __func__, epnum, ring->dequeue, ring->enqueue); 469 470 } 471 472 static void qmu_done_rx(struct mtu3 *mtu, u8 epnum) 473 { 474 struct mtu3_ep *mep = mtu->out_eps + epnum; 475 struct mtu3_gpd_ring *ring = &mep->gpd_ring; 476 void __iomem *mbase = mtu->mac_base; 477 struct qmu_gpd *gpd = ring->dequeue; 478 struct qmu_gpd *gpd_current = NULL; 479 struct usb_request *req = NULL; 480 struct mtu3_request *mreq; 481 dma_addr_t cur_gpd_dma; 482 483 cur_gpd_dma = read_rxq_cur_addr(mbase, epnum); 484 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); 485 486 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n", 487 __func__, epnum, gpd, gpd_current, ring->enqueue); 488 489 while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { 490 491 mreq = next_request(mep); 492 493 if (mreq == NULL || mreq->gpd != gpd) { 494 dev_err(mtu->dev, "no correct RX req is found\n"); 495 break; 496 } 497 req = &mreq->request; 498 499 req->actual = le16_to_cpu(gpd->buf_len); 500 mtu3_req_complete(mep, req, 0); 501 502 gpd = advance_deq_gpd(ring); 503 } 504 505 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n", 506 __func__, epnum, ring->dequeue, ring->enqueue); 507 } 508 509 static void qmu_done_isr(struct mtu3 *mtu, u32 done_status) 510 { 511 int i; 512 513 for (i = 1; i < mtu->num_eps; i++) { 514 if (done_status & QMU_RX_DONE_INT(i)) 515 qmu_done_rx(mtu, i); 516 if (done_status & QMU_TX_DONE_INT(i)) 517 qmu_done_tx(mtu, i); 518 } 519 } 520 521 static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status) 522 { 523 void __iomem *mbase = mtu->mac_base; 524 u32 errval; 525 int i; 526 527 if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) { 528 errval = mtu3_readl(mbase, U3D_RQERRIR0); 529 for (i = 1; i < mtu->num_eps; i++) { 530 if (errval & QMU_RX_CS_ERR(i)) 531 dev_err(mtu->dev, "Rx %d CS error!\n", i); 532 533 if (errval & QMU_RX_LEN_ERR(i)) 534 dev_err(mtu->dev, "RX %d Length error\n", i); 535 } 536 mtu3_writel(mbase, U3D_RQERRIR0, errval); 537 } 538 539 if (qmu_status & RXQ_ZLPERR_INT) { 540 errval = mtu3_readl(mbase, U3D_RQERRIR1); 541 for (i = 1; i < mtu->num_eps; i++) { 542 if (errval & QMU_RX_ZLP_ERR(i)) 543 dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i); 544 } 545 mtu3_writel(mbase, U3D_RQERRIR1, errval); 546 } 547 548 if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) { 549 errval = mtu3_readl(mbase, U3D_TQERRIR0); 550 for (i = 1; i < mtu->num_eps; i++) { 551 if (errval & QMU_TX_CS_ERR(i)) 552 dev_err(mtu->dev, "Tx %d checksum error!\n", i); 553 554 if (errval & QMU_TX_LEN_ERR(i)) 555 qmu_tx_zlp_error_handler(mtu, i); 556 } 557 mtu3_writel(mbase, U3D_TQERRIR0, errval); 558 } 559 } 560 561 irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu) 562 { 563 void __iomem *mbase = mtu->mac_base; 564 u32 qmu_status; 565 u32 qmu_done_status; 566 567 /* U3D_QISAR1 is read update */ 568 qmu_status = mtu3_readl(mbase, U3D_QISAR1); 569 qmu_status &= mtu3_readl(mbase, U3D_QIER1); 570 571 qmu_done_status = mtu3_readl(mbase, U3D_QISAR0); 572 qmu_done_status &= mtu3_readl(mbase, U3D_QIER0); 573 mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */ 574 dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n", 575 (qmu_done_status & 0xFFFF), qmu_done_status >> 16, 576 qmu_status); 577 578 if (qmu_done_status) 579 qmu_done_isr(mtu, qmu_done_status); 580 581 if (qmu_status) 582 qmu_exception_isr(mtu, qmu_status); 583 584 return IRQ_HANDLED; 585 } 586 587 int mtu3_qmu_init(struct mtu3 *mtu) 588 { 589 590 compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B"); 591 592 mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev, 593 QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0); 594 595 if (!mtu->qmu_gpd_pool) 596 return -ENOMEM; 597 598 return 0; 599 } 600 601 void mtu3_qmu_exit(struct mtu3 *mtu) 602 { 603 dma_pool_destroy(mtu->qmu_gpd_pool); 604 } 605