1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2019 HiSilicon Limited. */ 3 #include <linux/bitfield.h> 4 #include <linux/dmaengine.h> 5 #include <linux/init.h> 6 #include <linux/iopoll.h> 7 #include <linux/module.h> 8 #include <linux/pci.h> 9 #include <linux/spinlock.h> 10 #include "virt-dma.h" 11 12 #define HISI_DMA_SQ_BASE_L 0x0 13 #define HISI_DMA_SQ_BASE_H 0x4 14 #define HISI_DMA_SQ_DEPTH 0x8 15 #define HISI_DMA_SQ_TAIL_PTR 0xc 16 #define HISI_DMA_CQ_BASE_L 0x10 17 #define HISI_DMA_CQ_BASE_H 0x14 18 #define HISI_DMA_CQ_DEPTH 0x18 19 #define HISI_DMA_CQ_HEAD_PTR 0x1c 20 #define HISI_DMA_CTRL0 0x20 21 #define HISI_DMA_CTRL0_QUEUE_EN_S 0 22 #define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4 23 #define HISI_DMA_CTRL1 0x24 24 #define HISI_DMA_CTRL1_QUEUE_RESET_S 0 25 #define HISI_DMA_Q_FSM_STS 0x30 26 #define HISI_DMA_FSM_STS_MASK GENMASK(3, 0) 27 #define HISI_DMA_INT_STS 0x40 28 #define HISI_DMA_INT_STS_MASK GENMASK(12, 0) 29 #define HISI_DMA_INT_MSK 0x44 30 #define HISI_DMA_MODE 0x217c 31 #define HISI_DMA_OFFSET 0x100 32 33 #define HISI_DMA_MSI_NUM 32 34 #define HISI_DMA_CHAN_NUM 30 35 #define HISI_DMA_Q_DEPTH_VAL 1024 36 37 #define PCI_BAR_2 2 38 39 enum hisi_dma_mode { 40 EP = 0, 41 RC, 42 }; 43 44 enum hisi_dma_chan_status { 45 DISABLE = -1, 46 IDLE = 0, 47 RUN, 48 CPL, 49 PAUSE, 50 HALT, 51 ABORT, 52 WAIT, 53 BUFFCLR, 54 }; 55 56 struct hisi_dma_sqe { 57 __le32 dw0; 58 #define OPCODE_MASK GENMASK(3, 0) 59 #define OPCODE_SMALL_PACKAGE 0x1 60 #define OPCODE_M2M 0x4 61 #define LOCAL_IRQ_EN BIT(8) 62 #define ATTR_SRC_MASK GENMASK(14, 12) 63 __le32 dw1; 64 __le32 dw2; 65 #define ATTR_DST_MASK GENMASK(26, 24) 66 __le32 length; 67 __le64 src_addr; 68 __le64 dst_addr; 69 }; 70 71 struct hisi_dma_cqe { 72 __le32 rsv0; 73 __le32 rsv1; 74 __le16 sq_head; 75 __le16 rsv2; 76 __le16 rsv3; 77 __le16 w0; 78 #define STATUS_MASK GENMASK(15, 1) 79 #define STATUS_SUCC 0x0 80 #define VALID_BIT BIT(0) 81 }; 82 83 struct hisi_dma_desc { 84 struct virt_dma_desc vd; 85 struct hisi_dma_sqe sqe; 86 }; 87 88 struct hisi_dma_chan { 89 struct virt_dma_chan vc; 90 struct hisi_dma_dev *hdma_dev; 91 struct hisi_dma_sqe *sq; 92 struct hisi_dma_cqe *cq; 93 dma_addr_t sq_dma; 94 dma_addr_t cq_dma; 95 u32 sq_tail; 96 u32 cq_head; 97 u32 qp_num; 98 enum hisi_dma_chan_status status; 99 struct hisi_dma_desc *desc; 100 }; 101 102 struct hisi_dma_dev { 103 struct pci_dev *pdev; 104 void __iomem *base; 105 struct dma_device dma_dev; 106 u32 chan_num; 107 u32 chan_depth; 108 struct hisi_dma_chan chan[]; 109 }; 110 111 static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c) 112 { 113 return container_of(c, struct hisi_dma_chan, vc.chan); 114 } 115 116 static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd) 117 { 118 return container_of(vd, struct hisi_dma_desc, vd); 119 } 120 121 static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index, 122 u32 val) 123 { 124 writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET); 125 } 126 127 static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val) 128 { 129 u32 tmp; 130 131 tmp = readl_relaxed(addr); 132 tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos); 133 writel_relaxed(tmp, addr); 134 } 135 136 static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index, 137 bool pause) 138 { 139 void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index * 140 HISI_DMA_OFFSET; 141 142 hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause); 143 } 144 145 static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index, 146 bool enable) 147 { 148 void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index * 149 HISI_DMA_OFFSET; 150 151 hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable); 152 } 153 154 static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) 155 { 156 hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index, 157 HISI_DMA_INT_STS_MASK); 158 } 159 160 static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) 161 { 162 void __iomem *base = hdma_dev->base; 163 164 hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index, 165 HISI_DMA_INT_STS_MASK); 166 hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0); 167 } 168 169 static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index) 170 { 171 void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index * 172 HISI_DMA_OFFSET; 173 174 hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1); 175 } 176 177 static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index) 178 { 179 hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0); 180 hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0); 181 } 182 183 static void hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan *chan, 184 bool disable) 185 { 186 struct hisi_dma_dev *hdma_dev = chan->hdma_dev; 187 u32 index = chan->qp_num, tmp; 188 int ret; 189 190 hisi_dma_pause_dma(hdma_dev, index, true); 191 hisi_dma_enable_dma(hdma_dev, index, false); 192 hisi_dma_mask_irq(hdma_dev, index); 193 194 ret = readl_relaxed_poll_timeout(hdma_dev->base + 195 HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, 196 FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000); 197 if (ret) { 198 dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n"); 199 WARN_ON(1); 200 } 201 202 hisi_dma_do_reset(hdma_dev, index); 203 hisi_dma_reset_qp_point(hdma_dev, index); 204 hisi_dma_pause_dma(hdma_dev, index, false); 205 206 if (!disable) { 207 hisi_dma_enable_dma(hdma_dev, index, true); 208 hisi_dma_unmask_irq(hdma_dev, index); 209 } 210 211 ret = readl_relaxed_poll_timeout(hdma_dev->base + 212 HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, 213 FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000); 214 if (ret) { 215 dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n"); 216 WARN_ON(1); 217 } 218 } 219 220 static void hisi_dma_free_chan_resources(struct dma_chan *c) 221 { 222 struct hisi_dma_chan *chan = to_hisi_dma_chan(c); 223 struct hisi_dma_dev *hdma_dev = chan->hdma_dev; 224 225 hisi_dma_reset_or_disable_hw_chan(chan, false); 226 vchan_free_chan_resources(&chan->vc); 227 228 memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth); 229 memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth); 230 chan->sq_tail = 0; 231 chan->cq_head = 0; 232 chan->status = DISABLE; 233 } 234 235 static void hisi_dma_desc_free(struct virt_dma_desc *vd) 236 { 237 kfree(to_hisi_dma_desc(vd)); 238 } 239 240 static struct dma_async_tx_descriptor * 241 hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src, 242 size_t len, unsigned long flags) 243 { 244 struct hisi_dma_chan *chan = to_hisi_dma_chan(c); 245 struct hisi_dma_desc *desc; 246 247 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 248 if (!desc) 249 return NULL; 250 251 desc->sqe.length = cpu_to_le32(len); 252 desc->sqe.src_addr = cpu_to_le64(src); 253 desc->sqe.dst_addr = cpu_to_le64(dst); 254 255 return vchan_tx_prep(&chan->vc, &desc->vd, flags); 256 } 257 258 static enum dma_status 259 hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 260 struct dma_tx_state *txstate) 261 { 262 return dma_cookie_status(c, cookie, txstate); 263 } 264 265 static void hisi_dma_start_transfer(struct hisi_dma_chan *chan) 266 { 267 struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail; 268 struct hisi_dma_dev *hdma_dev = chan->hdma_dev; 269 struct hisi_dma_desc *desc; 270 struct virt_dma_desc *vd; 271 272 vd = vchan_next_desc(&chan->vc); 273 if (!vd) { 274 dev_err(&hdma_dev->pdev->dev, "no issued task!\n"); 275 chan->desc = NULL; 276 return; 277 } 278 list_del(&vd->node); 279 desc = to_hisi_dma_desc(vd); 280 chan->desc = desc; 281 282 memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe)); 283 284 /* update other field in sqe */ 285 sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M)); 286 sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN); 287 288 /* make sure data has been updated in sqe */ 289 wmb(); 290 291 /* update sq tail, point to new sqe position */ 292 chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth; 293 294 /* update sq_tail to trigger a new task */ 295 hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num, 296 chan->sq_tail); 297 } 298 299 static void hisi_dma_issue_pending(struct dma_chan *c) 300 { 301 struct hisi_dma_chan *chan = to_hisi_dma_chan(c); 302 unsigned long flags; 303 304 spin_lock_irqsave(&chan->vc.lock, flags); 305 306 if (vchan_issue_pending(&chan->vc)) 307 hisi_dma_start_transfer(chan); 308 309 spin_unlock_irqrestore(&chan->vc.lock, flags); 310 } 311 312 static int hisi_dma_terminate_all(struct dma_chan *c) 313 { 314 struct hisi_dma_chan *chan = to_hisi_dma_chan(c); 315 unsigned long flags; 316 LIST_HEAD(head); 317 318 spin_lock_irqsave(&chan->vc.lock, flags); 319 320 hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true); 321 if (chan->desc) { 322 vchan_terminate_vdesc(&chan->desc->vd); 323 chan->desc = NULL; 324 } 325 326 vchan_get_all_descriptors(&chan->vc, &head); 327 328 spin_unlock_irqrestore(&chan->vc.lock, flags); 329 330 vchan_dma_desc_free_list(&chan->vc, &head); 331 hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false); 332 333 return 0; 334 } 335 336 static void hisi_dma_synchronize(struct dma_chan *c) 337 { 338 struct hisi_dma_chan *chan = to_hisi_dma_chan(c); 339 340 vchan_synchronize(&chan->vc); 341 } 342 343 static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev) 344 { 345 size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth; 346 size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth; 347 struct device *dev = &hdma_dev->pdev->dev; 348 struct hisi_dma_chan *chan; 349 int i; 350 351 for (i = 0; i < hdma_dev->chan_num; i++) { 352 chan = &hdma_dev->chan[i]; 353 chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma, 354 GFP_KERNEL); 355 if (!chan->sq) 356 return -ENOMEM; 357 358 chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma, 359 GFP_KERNEL); 360 if (!chan->cq) 361 return -ENOMEM; 362 } 363 364 return 0; 365 } 366 367 static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index) 368 { 369 struct hisi_dma_chan *chan = &hdma_dev->chan[index]; 370 u32 hw_depth = hdma_dev->chan_depth - 1; 371 void __iomem *base = hdma_dev->base; 372 373 /* set sq, cq base */ 374 hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index, 375 lower_32_bits(chan->sq_dma)); 376 hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index, 377 upper_32_bits(chan->sq_dma)); 378 hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index, 379 lower_32_bits(chan->cq_dma)); 380 hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index, 381 upper_32_bits(chan->cq_dma)); 382 383 /* set sq, cq depth */ 384 hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth); 385 hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth); 386 387 /* init sq tail and cq head */ 388 hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0); 389 hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0); 390 } 391 392 static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) 393 { 394 hisi_dma_init_hw_qp(hdma_dev, qp_index); 395 hisi_dma_unmask_irq(hdma_dev, qp_index); 396 hisi_dma_enable_dma(hdma_dev, qp_index, true); 397 } 398 399 static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) 400 { 401 hisi_dma_reset_or_disable_hw_chan(&hdma_dev->chan[qp_index], true); 402 } 403 404 static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev) 405 { 406 int i; 407 408 for (i = 0; i < hdma_dev->chan_num; i++) { 409 hdma_dev->chan[i].qp_num = i; 410 hdma_dev->chan[i].hdma_dev = hdma_dev; 411 hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free; 412 vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev); 413 hisi_dma_enable_qp(hdma_dev, i); 414 } 415 } 416 417 static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev) 418 { 419 int i; 420 421 for (i = 0; i < hdma_dev->chan_num; i++) { 422 hisi_dma_disable_qp(hdma_dev, i); 423 tasklet_kill(&hdma_dev->chan[i].vc.task); 424 } 425 } 426 427 static irqreturn_t hisi_dma_irq(int irq, void *data) 428 { 429 struct hisi_dma_chan *chan = data; 430 struct hisi_dma_dev *hdma_dev = chan->hdma_dev; 431 struct hisi_dma_desc *desc; 432 struct hisi_dma_cqe *cqe; 433 434 spin_lock(&chan->vc.lock); 435 436 desc = chan->desc; 437 cqe = chan->cq + chan->cq_head; 438 if (desc) { 439 chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth; 440 hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, 441 chan->qp_num, chan->cq_head); 442 if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) { 443 vchan_cookie_complete(&desc->vd); 444 } else { 445 dev_err(&hdma_dev->pdev->dev, "task error!\n"); 446 } 447 448 chan->desc = NULL; 449 } 450 451 spin_unlock(&chan->vc.lock); 452 453 return IRQ_HANDLED; 454 } 455 456 static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev) 457 { 458 struct pci_dev *pdev = hdma_dev->pdev; 459 int i, ret; 460 461 for (i = 0; i < hdma_dev->chan_num; i++) { 462 ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), 463 hisi_dma_irq, IRQF_SHARED, "hisi_dma", 464 &hdma_dev->chan[i]); 465 if (ret) 466 return ret; 467 } 468 469 return 0; 470 } 471 472 /* This function enables all hw channels in a device */ 473 static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev) 474 { 475 int ret; 476 477 ret = hisi_dma_alloc_qps_mem(hdma_dev); 478 if (ret) { 479 dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n"); 480 return ret; 481 } 482 483 ret = hisi_dma_request_qps_irq(hdma_dev); 484 if (ret) { 485 dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n"); 486 return ret; 487 } 488 489 hisi_dma_enable_qps(hdma_dev); 490 491 return 0; 492 } 493 494 static void hisi_dma_disable_hw_channels(void *data) 495 { 496 hisi_dma_disable_qps(data); 497 } 498 499 static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev, 500 enum hisi_dma_mode mode) 501 { 502 writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE); 503 } 504 505 static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id) 506 { 507 struct device *dev = &pdev->dev; 508 struct hisi_dma_dev *hdma_dev; 509 struct dma_device *dma_dev; 510 int ret; 511 512 ret = pcim_enable_device(pdev); 513 if (ret) { 514 dev_err(dev, "failed to enable device mem!\n"); 515 return ret; 516 } 517 518 ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev)); 519 if (ret) { 520 dev_err(dev, "failed to remap I/O region!\n"); 521 return ret; 522 } 523 524 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 525 if (ret) 526 return ret; 527 528 hdma_dev = devm_kzalloc(dev, struct_size(hdma_dev, chan, HISI_DMA_CHAN_NUM), GFP_KERNEL); 529 if (!hdma_dev) 530 return -EINVAL; 531 532 hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2]; 533 hdma_dev->pdev = pdev; 534 hdma_dev->chan_num = HISI_DMA_CHAN_NUM; 535 hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL; 536 537 pci_set_drvdata(pdev, hdma_dev); 538 pci_set_master(pdev); 539 540 /* This will be freed by 'pcim_release()'. See 'pcim_enable_device()' */ 541 ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM, 542 PCI_IRQ_MSI); 543 if (ret < 0) { 544 dev_err(dev, "Failed to allocate MSI vectors!\n"); 545 return ret; 546 } 547 548 dma_dev = &hdma_dev->dma_dev; 549 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 550 dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources; 551 dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy; 552 dma_dev->device_tx_status = hisi_dma_tx_status; 553 dma_dev->device_issue_pending = hisi_dma_issue_pending; 554 dma_dev->device_terminate_all = hisi_dma_terminate_all; 555 dma_dev->device_synchronize = hisi_dma_synchronize; 556 dma_dev->directions = BIT(DMA_MEM_TO_MEM); 557 dma_dev->dev = dev; 558 INIT_LIST_HEAD(&dma_dev->channels); 559 560 hisi_dma_set_mode(hdma_dev, RC); 561 562 ret = hisi_dma_enable_hw_channels(hdma_dev); 563 if (ret < 0) { 564 dev_err(dev, "failed to enable hw channel!\n"); 565 return ret; 566 } 567 568 ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels, 569 hdma_dev); 570 if (ret) 571 return ret; 572 573 ret = dmaenginem_async_device_register(dma_dev); 574 if (ret < 0) 575 dev_err(dev, "failed to register device!\n"); 576 577 return ret; 578 } 579 580 static const struct pci_device_id hisi_dma_pci_tbl[] = { 581 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) }, 582 { 0, } 583 }; 584 585 static struct pci_driver hisi_dma_pci_driver = { 586 .name = "hisi_dma", 587 .id_table = hisi_dma_pci_tbl, 588 .probe = hisi_dma_probe, 589 }; 590 591 module_pci_driver(hisi_dma_pci_driver); 592 593 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 594 MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>"); 595 MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver"); 596 MODULE_LICENSE("GPL v2"); 597 MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl); 598