1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ 4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ 5 6 #include <linux/bitfield.h> 7 #include <linux/bits.h> 8 #include <linux/completion.h> 9 #include <linux/delay.h> 10 #include <linux/dma-buf.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/interrupt.h> 13 #include <linux/kref.h> 14 #include <linux/list.h> 15 #include <linux/math64.h> 16 #include <linux/mm.h> 17 #include <linux/moduleparam.h> 18 #include <linux/scatterlist.h> 19 #include <linux/spinlock.h> 20 #include <linux/srcu.h> 21 #include <linux/types.h> 22 #include <linux/uaccess.h> 23 #include <linux/wait.h> 24 #include <drm/drm_file.h> 25 #include <drm/drm_gem.h> 26 #include <drm/drm_print.h> 27 #include <uapi/drm/qaic_accel.h> 28 29 #include "qaic.h" 30 31 #define SEM_VAL_MASK GENMASK_ULL(11, 0) 32 #define SEM_INDEX_MASK GENMASK_ULL(4, 0) 33 #define BULK_XFER BIT(3) 34 #define GEN_COMPLETION BIT(4) 35 #define INBOUND_XFER 1 36 #define OUTBOUND_XFER 2 37 #define REQHP_OFF 0x0 /* we read this */ 38 #define REQTP_OFF 0x4 /* we write this */ 39 #define RSPHP_OFF 0x8 /* we write this */ 40 #define RSPTP_OFF 0xc /* we read this */ 41 42 #define ENCODE_SEM(val, index, sync, cmd, flags) \ 43 ({ \ 44 FIELD_PREP(GENMASK(11, 0), (val)) | \ 45 FIELD_PREP(GENMASK(20, 16), (index)) | \ 46 FIELD_PREP(BIT(22), (sync)) | \ 47 FIELD_PREP(GENMASK(26, 24), (cmd)) | \ 48 FIELD_PREP(GENMASK(30, 29), (flags)) | \ 49 FIELD_PREP(BIT(31), (cmd) ? 1 : 0); \ 50 }) 51 #define NUM_EVENTS 128 52 #define NUM_DELAYS 10 53 54 static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */ 55 module_param(wait_exec_default_timeout_ms, uint, 0600); 56 MODULE_PARM_DESC(wait_exec_default_timeout_ms, "Default timeout for DRM_IOCTL_QAIC_WAIT_BO"); 57 58 static unsigned int datapath_poll_interval_us = 100; /* 100 usec default */ 59 module_param(datapath_poll_interval_us, uint, 0600); 60 MODULE_PARM_DESC(datapath_poll_interval_us, 61 "Amount of time to sleep between activity when datapath polling is enabled"); 62 63 struct dbc_req { 64 /* 65 * A request ID is assigned to each memory handle going in DMA queue. 66 * As a single memory handle can enqueue multiple elements in DMA queue 67 * all of them will have the same request ID. 68 */ 69 __le16 req_id; 70 /* Future use */ 71 __u8 seq_id; 72 /* 73 * Special encoded variable 74 * 7 0 - Do not force to generate MSI after DMA is completed 75 * 1 - Force to generate MSI after DMA is completed 76 * 6:5 Reserved 77 * 4 1 - Generate completion element in the response queue 78 * 0 - No Completion Code 79 * 3 0 - DMA request is a Link list transfer 80 * 1 - DMA request is a Bulk transfer 81 * 2 Reserved 82 * 1:0 00 - No DMA transfer involved 83 * 01 - DMA transfer is part of inbound transfer 84 * 10 - DMA transfer has outbound transfer 85 * 11 - NA 86 */ 87 __u8 cmd; 88 __le32 resv; 89 /* Source address for the transfer */ 90 __le64 src_addr; 91 /* Destination address for the transfer */ 92 __le64 dest_addr; 93 /* Length of transfer request */ 94 __le32 len; 95 __le32 resv2; 96 /* Doorbell address */ 97 __le64 db_addr; 98 /* 99 * Special encoded variable 100 * 7 1 - Doorbell(db) write 101 * 0 - No doorbell write 102 * 6:2 Reserved 103 * 1:0 00 - 32 bit access, db address must be aligned to 32bit-boundary 104 * 01 - 16 bit access, db address must be aligned to 16bit-boundary 105 * 10 - 8 bit access, db address must be aligned to 8bit-boundary 106 * 11 - Reserved 107 */ 108 __u8 db_len; 109 __u8 resv3; 110 __le16 resv4; 111 /* 32 bit data written to doorbell address */ 112 __le32 db_data; 113 /* 114 * Special encoded variable 115 * All the fields of sem_cmdX are passed from user and all are ORed 116 * together to form sem_cmd. 117 * 0:11 Semaphore value 118 * 15:12 Reserved 119 * 20:16 Semaphore index 120 * 21 Reserved 121 * 22 Semaphore Sync 122 * 23 Reserved 123 * 26:24 Semaphore command 124 * 28:27 Reserved 125 * 29 Semaphore DMA out bound sync fence 126 * 30 Semaphore DMA in bound sync fence 127 * 31 Enable semaphore command 128 */ 129 __le32 sem_cmd0; 130 __le32 sem_cmd1; 131 __le32 sem_cmd2; 132 __le32 sem_cmd3; 133 } __packed; 134 135 struct dbc_rsp { 136 /* Request ID of the memory handle whose DMA transaction is completed */ 137 __le16 req_id; 138 /* Status of the DMA transaction. 0 : Success otherwise failure */ 139 __le16 status; 140 } __packed; 141 142 inline int get_dbc_req_elem_size(void) 143 { 144 return sizeof(struct dbc_req); 145 } 146 147 inline int get_dbc_rsp_elem_size(void) 148 { 149 return sizeof(struct dbc_rsp); 150 } 151 152 static void free_slice(struct kref *kref) 153 { 154 struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count); 155 156 list_del(&slice->slice); 157 drm_gem_object_put(&slice->bo->base); 158 sg_free_table(slice->sgt); 159 kfree(slice->sgt); 160 kfree(slice->reqs); 161 kfree(slice); 162 } 163 164 static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out, 165 struct sg_table *sgt_in, u64 size, u64 offset) 166 { 167 int total_len, len, nents, offf = 0, offl = 0; 168 struct scatterlist *sg, *sgn, *sgf, *sgl; 169 struct sg_table *sgt; 170 int ret, j; 171 172 /* find out number of relevant nents needed for this mem */ 173 total_len = 0; 174 sgf = NULL; 175 sgl = NULL; 176 nents = 0; 177 178 size = size ? size : PAGE_SIZE; 179 for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) { 180 len = sg_dma_len(sg); 181 182 if (!len) 183 continue; 184 if (offset >= total_len && offset < total_len + len) { 185 sgf = sg; 186 offf = offset - total_len; 187 } 188 if (sgf) 189 nents++; 190 if (offset + size >= total_len && 191 offset + size <= total_len + len) { 192 sgl = sg; 193 offl = offset + size - total_len; 194 break; 195 } 196 total_len += len; 197 } 198 199 if (!sgf || !sgl) { 200 ret = -EINVAL; 201 goto out; 202 } 203 204 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 205 if (!sgt) { 206 ret = -ENOMEM; 207 goto out; 208 } 209 210 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 211 if (ret) 212 goto free_sgt; 213 214 /* copy relevant sg node and fix page and length */ 215 sgn = sgf; 216 for_each_sgtable_sg(sgt, sg, j) { 217 memcpy(sg, sgn, sizeof(*sg)); 218 if (sgn == sgf) { 219 sg_dma_address(sg) += offf; 220 sg_dma_len(sg) -= offf; 221 sg_set_page(sg, sg_page(sgn), sg_dma_len(sg), offf); 222 } else { 223 offf = 0; 224 } 225 if (sgn == sgl) { 226 sg_dma_len(sg) = offl - offf; 227 sg_set_page(sg, sg_page(sgn), offl - offf, offf); 228 sg_mark_end(sg); 229 break; 230 } 231 sgn = sg_next(sgn); 232 } 233 234 *sgt_out = sgt; 235 return ret; 236 237 free_sgt: 238 kfree(sgt); 239 out: 240 *sgt_out = NULL; 241 return ret; 242 } 243 244 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice, 245 struct qaic_attach_slice_entry *req) 246 { 247 __le64 db_addr = cpu_to_le64(req->db_addr); 248 __le32 db_data = cpu_to_le32(req->db_data); 249 struct scatterlist *sg; 250 __u8 cmd = BULK_XFER; 251 int presync_sem; 252 u64 dev_addr; 253 __u8 db_len; 254 int i; 255 256 if (!slice->no_xfer) 257 cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER); 258 259 if (req->db_len && !IS_ALIGNED(req->db_addr, req->db_len / 8)) 260 return -EINVAL; 261 262 presync_sem = req->sem0.presync + req->sem1.presync + req->sem2.presync + req->sem3.presync; 263 if (presync_sem > 1) 264 return -EINVAL; 265 266 presync_sem = req->sem0.presync << 0 | req->sem1.presync << 1 | 267 req->sem2.presync << 2 | req->sem3.presync << 3; 268 269 switch (req->db_len) { 270 case 32: 271 db_len = BIT(7); 272 break; 273 case 16: 274 db_len = BIT(7) | 1; 275 break; 276 case 8: 277 db_len = BIT(7) | 2; 278 break; 279 case 0: 280 db_len = 0; /* doorbell is not active for this command */ 281 break; 282 default: 283 return -EINVAL; /* should never hit this */ 284 } 285 286 /* 287 * When we end up splitting up a single request (ie a buf slice) into 288 * multiple DMA requests, we have to manage the sync data carefully. 289 * There can only be one presync sem. That needs to be on every xfer 290 * so that the DMA engine doesn't transfer data before the receiver is 291 * ready. We only do the doorbell and postsync sems after the xfer. 292 * To guarantee previous xfers for the request are complete, we use a 293 * fence. 294 */ 295 dev_addr = req->dev_addr; 296 for_each_sgtable_sg(slice->sgt, sg, i) { 297 slice->reqs[i].cmd = cmd; 298 slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? 299 sg_dma_address(sg) : dev_addr); 300 slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? 301 dev_addr : sg_dma_address(sg)); 302 /* 303 * sg_dma_len(sg) returns size of a DMA segment, maximum DMA 304 * segment size is set to UINT_MAX by qaic and hence return 305 * values of sg_dma_len(sg) can never exceed u32 range. So, 306 * by down sizing we are not corrupting the value. 307 */ 308 slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg)); 309 switch (presync_sem) { 310 case BIT(0): 311 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, 312 req->sem0.index, 313 req->sem0.presync, 314 req->sem0.cmd, 315 req->sem0.flags)); 316 break; 317 case BIT(1): 318 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, 319 req->sem1.index, 320 req->sem1.presync, 321 req->sem1.cmd, 322 req->sem1.flags)); 323 break; 324 case BIT(2): 325 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, 326 req->sem2.index, 327 req->sem2.presync, 328 req->sem2.cmd, 329 req->sem2.flags)); 330 break; 331 case BIT(3): 332 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, 333 req->sem3.index, 334 req->sem3.presync, 335 req->sem3.cmd, 336 req->sem3.flags)); 337 break; 338 } 339 dev_addr += sg_dma_len(sg); 340 } 341 /* add post transfer stuff to last segment */ 342 i--; 343 slice->reqs[i].cmd |= GEN_COMPLETION; 344 slice->reqs[i].db_addr = db_addr; 345 slice->reqs[i].db_len = db_len; 346 slice->reqs[i].db_data = db_data; 347 /* 348 * Add a fence if we have more than one request going to the hardware 349 * representing the entirety of the user request, and the user request 350 * has no presync condition. 351 * Fences are expensive, so we try to avoid them. We rely on the 352 * hardware behavior to avoid needing one when there is a presync 353 * condition. When a presync exists, all requests for that same 354 * presync will be queued into a fifo. Thus, since we queue the 355 * post xfer activity only on the last request we queue, the hardware 356 * will ensure that the last queued request is processed last, thus 357 * making sure the post xfer activity happens at the right time without 358 * a fence. 359 */ 360 if (i && !presync_sem) 361 req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ? 362 QAIC_SEM_INSYNCFENCE : QAIC_SEM_OUTSYNCFENCE); 363 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index, 364 req->sem0.presync, req->sem0.cmd, 365 req->sem0.flags)); 366 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index, 367 req->sem1.presync, req->sem1.cmd, 368 req->sem1.flags)); 369 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index, 370 req->sem2.presync, req->sem2.cmd, 371 req->sem2.flags)); 372 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index, 373 req->sem3.presync, req->sem3.cmd, 374 req->sem3.flags)); 375 376 return 0; 377 } 378 379 static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo, 380 struct qaic_attach_slice_entry *slice_ent) 381 { 382 struct sg_table *sgt = NULL; 383 struct bo_slice *slice; 384 int ret; 385 386 ret = clone_range_of_sgt_for_slice(qdev, &sgt, bo->sgt, slice_ent->size, slice_ent->offset); 387 if (ret) 388 goto out; 389 390 slice = kmalloc(sizeof(*slice), GFP_KERNEL); 391 if (!slice) { 392 ret = -ENOMEM; 393 goto free_sgt; 394 } 395 396 slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL); 397 if (!slice->reqs) { 398 ret = -ENOMEM; 399 goto free_slice; 400 } 401 402 slice->no_xfer = !slice_ent->size; 403 slice->sgt = sgt; 404 slice->nents = sgt->nents; 405 slice->dir = bo->dir; 406 slice->bo = bo; 407 slice->size = slice_ent->size; 408 slice->offset = slice_ent->offset; 409 410 ret = encode_reqs(qdev, slice, slice_ent); 411 if (ret) 412 goto free_req; 413 414 bo->total_slice_nents += sgt->nents; 415 kref_init(&slice->ref_count); 416 drm_gem_object_get(&bo->base); 417 list_add_tail(&slice->slice, &bo->slices); 418 419 return 0; 420 421 free_req: 422 kfree(slice->reqs); 423 free_slice: 424 kfree(slice); 425 free_sgt: 426 sg_free_table(sgt); 427 kfree(sgt); 428 out: 429 return ret; 430 } 431 432 static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size) 433 { 434 struct scatterlist *sg; 435 struct sg_table *sgt; 436 struct page **pages; 437 int *pages_order; 438 int buf_extra; 439 int max_order; 440 int nr_pages; 441 int ret = 0; 442 int i, j, k; 443 int order; 444 445 if (size) { 446 nr_pages = DIV_ROUND_UP(size, PAGE_SIZE); 447 /* 448 * calculate how much extra we are going to allocate, to remove 449 * later 450 */ 451 buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE; 452 max_order = min(MAX_ORDER - 1, get_order(size)); 453 } else { 454 /* allocate a single page for book keeping */ 455 nr_pages = 1; 456 buf_extra = 0; 457 max_order = 0; 458 } 459 460 pages = kvmalloc_array(nr_pages, sizeof(*pages) + sizeof(*pages_order), GFP_KERNEL); 461 if (!pages) { 462 ret = -ENOMEM; 463 goto out; 464 } 465 pages_order = (void *)pages + sizeof(*pages) * nr_pages; 466 467 /* 468 * Allocate requested memory using alloc_pages. It is possible to allocate 469 * the requested memory in multiple chunks by calling alloc_pages 470 * multiple times. Use SG table to handle multiple allocated pages. 471 */ 472 i = 0; 473 while (nr_pages > 0) { 474 order = min(get_order(nr_pages * PAGE_SIZE), max_order); 475 while (1) { 476 pages[i] = alloc_pages(GFP_KERNEL | GFP_HIGHUSER | 477 __GFP_NOWARN | __GFP_ZERO | 478 (order ? __GFP_NORETRY : __GFP_RETRY_MAYFAIL), 479 order); 480 if (pages[i]) 481 break; 482 if (!order--) { 483 ret = -ENOMEM; 484 goto free_partial_alloc; 485 } 486 } 487 488 max_order = order; 489 pages_order[i] = order; 490 491 nr_pages -= 1 << order; 492 if (nr_pages <= 0) 493 /* account for over allocation */ 494 buf_extra += abs(nr_pages) * PAGE_SIZE; 495 i++; 496 } 497 498 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 499 if (!sgt) { 500 ret = -ENOMEM; 501 goto free_partial_alloc; 502 } 503 504 if (sg_alloc_table(sgt, i, GFP_KERNEL)) { 505 ret = -ENOMEM; 506 goto free_sgt; 507 } 508 509 /* Populate the SG table with the allocated memory pages */ 510 sg = sgt->sgl; 511 for (k = 0; k < i; k++, sg = sg_next(sg)) { 512 /* Last entry requires special handling */ 513 if (k < i - 1) { 514 sg_set_page(sg, pages[k], PAGE_SIZE << pages_order[k], 0); 515 } else { 516 sg_set_page(sg, pages[k], (PAGE_SIZE << pages_order[k]) - buf_extra, 0); 517 sg_mark_end(sg); 518 } 519 } 520 521 kvfree(pages); 522 *sgt_out = sgt; 523 return ret; 524 525 free_sgt: 526 kfree(sgt); 527 free_partial_alloc: 528 for (j = 0; j < i; j++) 529 __free_pages(pages[j], pages_order[j]); 530 kvfree(pages); 531 out: 532 *sgt_out = NULL; 533 return ret; 534 } 535 536 static bool invalid_sem(struct qaic_sem *sem) 537 { 538 if (sem->val & ~SEM_VAL_MASK || sem->index & ~SEM_INDEX_MASK || 539 !(sem->presync == 0 || sem->presync == 1) || sem->pad || 540 sem->flags & ~(QAIC_SEM_INSYNCFENCE | QAIC_SEM_OUTSYNCFENCE) || 541 sem->cmd > QAIC_SEM_WAIT_GT_0) 542 return true; 543 return false; 544 } 545 546 static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent, 547 u32 count, u64 total_size) 548 { 549 int i; 550 551 for (i = 0; i < count; i++) { 552 if (!(slice_ent[i].db_len == 32 || slice_ent[i].db_len == 16 || 553 slice_ent[i].db_len == 8 || slice_ent[i].db_len == 0) || 554 invalid_sem(&slice_ent[i].sem0) || invalid_sem(&slice_ent[i].sem1) || 555 invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3)) 556 return -EINVAL; 557 558 if (slice_ent[i].offset + slice_ent[i].size > total_size) 559 return -EINVAL; 560 } 561 562 return 0; 563 } 564 565 static void qaic_free_sgt(struct sg_table *sgt) 566 { 567 struct scatterlist *sg; 568 569 for (sg = sgt->sgl; sg; sg = sg_next(sg)) 570 if (sg_page(sg)) 571 __free_pages(sg_page(sg), get_order(sg->length)); 572 sg_free_table(sgt); 573 kfree(sgt); 574 } 575 576 static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent, 577 const struct drm_gem_object *obj) 578 { 579 struct qaic_bo *bo = to_qaic_bo(obj); 580 581 drm_printf_indent(p, indent, "user requested size=%llu\n", bo->size); 582 } 583 584 static const struct vm_operations_struct drm_vm_ops = { 585 .open = drm_gem_vm_open, 586 .close = drm_gem_vm_close, 587 }; 588 589 static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 590 { 591 struct qaic_bo *bo = to_qaic_bo(obj); 592 unsigned long offset = 0; 593 struct scatterlist *sg; 594 int ret = 0; 595 596 if (obj->import_attach) 597 return -EINVAL; 598 599 for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) { 600 if (sg_page(sg)) { 601 ret = remap_pfn_range(vma, vma->vm_start + offset, page_to_pfn(sg_page(sg)), 602 sg->length, vma->vm_page_prot); 603 if (ret) 604 goto out; 605 offset += sg->length; 606 } 607 } 608 609 out: 610 return ret; 611 } 612 613 static void qaic_free_object(struct drm_gem_object *obj) 614 { 615 struct qaic_bo *bo = to_qaic_bo(obj); 616 617 if (obj->import_attach) { 618 /* DMABUF/PRIME Path */ 619 dma_buf_detach(obj->import_attach->dmabuf, obj->import_attach); 620 dma_buf_put(obj->import_attach->dmabuf); 621 } else { 622 /* Private buffer allocation path */ 623 qaic_free_sgt(bo->sgt); 624 } 625 626 drm_gem_object_release(obj); 627 kfree(bo); 628 } 629 630 static const struct drm_gem_object_funcs qaic_gem_funcs = { 631 .free = qaic_free_object, 632 .print_info = qaic_gem_print_info, 633 .mmap = qaic_gem_object_mmap, 634 .vm_ops = &drm_vm_ops, 635 }; 636 637 static struct qaic_bo *qaic_alloc_init_bo(void) 638 { 639 struct qaic_bo *bo; 640 641 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 642 if (!bo) 643 return ERR_PTR(-ENOMEM); 644 645 INIT_LIST_HEAD(&bo->slices); 646 init_completion(&bo->xfer_done); 647 complete_all(&bo->xfer_done); 648 649 return bo; 650 } 651 652 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 653 { 654 struct qaic_create_bo *args = data; 655 int usr_rcu_id, qdev_rcu_id; 656 struct drm_gem_object *obj; 657 struct qaic_device *qdev; 658 struct qaic_user *usr; 659 struct qaic_bo *bo; 660 size_t size; 661 int ret; 662 663 if (args->pad) 664 return -EINVAL; 665 666 size = PAGE_ALIGN(args->size); 667 if (size == 0) 668 return -EINVAL; 669 670 usr = file_priv->driver_priv; 671 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 672 if (!usr->qddev) { 673 ret = -ENODEV; 674 goto unlock_usr_srcu; 675 } 676 677 qdev = usr->qddev->qdev; 678 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 679 if (qdev->in_reset) { 680 ret = -ENODEV; 681 goto unlock_dev_srcu; 682 } 683 684 bo = qaic_alloc_init_bo(); 685 if (IS_ERR(bo)) { 686 ret = PTR_ERR(bo); 687 goto unlock_dev_srcu; 688 } 689 obj = &bo->base; 690 691 drm_gem_private_object_init(dev, obj, size); 692 693 obj->funcs = &qaic_gem_funcs; 694 ret = create_sgt(qdev, &bo->sgt, size); 695 if (ret) 696 goto free_bo; 697 698 bo->size = args->size; 699 700 ret = drm_gem_handle_create(file_priv, obj, &args->handle); 701 if (ret) 702 goto free_sgt; 703 704 bo->handle = args->handle; 705 drm_gem_object_put(obj); 706 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 707 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 708 709 return 0; 710 711 free_sgt: 712 qaic_free_sgt(bo->sgt); 713 free_bo: 714 kfree(bo); 715 unlock_dev_srcu: 716 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 717 unlock_usr_srcu: 718 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 719 return ret; 720 } 721 722 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 723 { 724 struct qaic_mmap_bo *args = data; 725 int usr_rcu_id, qdev_rcu_id; 726 struct drm_gem_object *obj; 727 struct qaic_device *qdev; 728 struct qaic_user *usr; 729 int ret; 730 731 usr = file_priv->driver_priv; 732 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 733 if (!usr->qddev) { 734 ret = -ENODEV; 735 goto unlock_usr_srcu; 736 } 737 738 qdev = usr->qddev->qdev; 739 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 740 if (qdev->in_reset) { 741 ret = -ENODEV; 742 goto unlock_dev_srcu; 743 } 744 745 obj = drm_gem_object_lookup(file_priv, args->handle); 746 if (!obj) { 747 ret = -ENOENT; 748 goto unlock_dev_srcu; 749 } 750 751 ret = drm_gem_create_mmap_offset(obj); 752 if (ret == 0) 753 args->offset = drm_vma_node_offset_addr(&obj->vma_node); 754 755 drm_gem_object_put(obj); 756 757 unlock_dev_srcu: 758 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 759 unlock_usr_srcu: 760 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 761 return ret; 762 } 763 764 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) 765 { 766 struct dma_buf_attachment *attach; 767 struct drm_gem_object *obj; 768 struct qaic_bo *bo; 769 size_t size; 770 int ret; 771 772 bo = qaic_alloc_init_bo(); 773 if (IS_ERR(bo)) { 774 ret = PTR_ERR(bo); 775 goto out; 776 } 777 778 obj = &bo->base; 779 get_dma_buf(dma_buf); 780 781 attach = dma_buf_attach(dma_buf, dev->dev); 782 if (IS_ERR(attach)) { 783 ret = PTR_ERR(attach); 784 goto attach_fail; 785 } 786 787 size = PAGE_ALIGN(attach->dmabuf->size); 788 if (size == 0) { 789 ret = -EINVAL; 790 goto size_align_fail; 791 } 792 793 drm_gem_private_object_init(dev, obj, size); 794 /* 795 * skipping dma_buf_map_attachment() as we do not know the direction 796 * just yet. Once the direction is known in the subsequent IOCTL to 797 * attach slicing, we can do it then. 798 */ 799 800 obj->funcs = &qaic_gem_funcs; 801 obj->import_attach = attach; 802 obj->resv = dma_buf->resv; 803 804 return obj; 805 806 size_align_fail: 807 dma_buf_detach(dma_buf, attach); 808 attach_fail: 809 dma_buf_put(dma_buf); 810 kfree(bo); 811 out: 812 return ERR_PTR(ret); 813 } 814 815 static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_hdr *hdr) 816 { 817 struct drm_gem_object *obj = &bo->base; 818 struct sg_table *sgt; 819 int ret; 820 821 if (obj->import_attach->dmabuf->size < hdr->size) 822 return -EINVAL; 823 824 sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir); 825 if (IS_ERR(sgt)) { 826 ret = PTR_ERR(sgt); 827 return ret; 828 } 829 830 bo->sgt = sgt; 831 bo->size = hdr->size; 832 833 return 0; 834 } 835 836 static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo, 837 struct qaic_attach_slice_hdr *hdr) 838 { 839 int ret; 840 841 if (bo->size != hdr->size) 842 return -EINVAL; 843 844 ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0); 845 if (ret) 846 return -EFAULT; 847 848 return 0; 849 } 850 851 static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo, 852 struct qaic_attach_slice_hdr *hdr) 853 { 854 int ret; 855 856 if (bo->base.import_attach) 857 ret = qaic_prepare_import_bo(bo, hdr); 858 else 859 ret = qaic_prepare_export_bo(qdev, bo, hdr); 860 861 if (ret == 0) 862 bo->dir = hdr->dir; 863 864 return ret; 865 } 866 867 static void qaic_unprepare_import_bo(struct qaic_bo *bo) 868 { 869 dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir); 870 bo->sgt = NULL; 871 bo->size = 0; 872 } 873 874 static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo) 875 { 876 dma_unmap_sgtable(&qdev->pdev->dev, bo->sgt, bo->dir, 0); 877 } 878 879 static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo) 880 { 881 if (bo->base.import_attach) 882 qaic_unprepare_import_bo(bo); 883 else 884 qaic_unprepare_export_bo(qdev, bo); 885 886 bo->dir = 0; 887 } 888 889 static void qaic_free_slices_bo(struct qaic_bo *bo) 890 { 891 struct bo_slice *slice, *temp; 892 893 list_for_each_entry_safe(slice, temp, &bo->slices, slice) 894 kref_put(&slice->ref_count, free_slice); 895 } 896 897 static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo, 898 struct qaic_attach_slice_hdr *hdr, 899 struct qaic_attach_slice_entry *slice_ent) 900 { 901 int ret, i; 902 903 for (i = 0; i < hdr->count; i++) { 904 ret = qaic_map_one_slice(qdev, bo, &slice_ent[i]); 905 if (ret) { 906 qaic_free_slices_bo(bo); 907 return ret; 908 } 909 } 910 911 if (bo->total_slice_nents > qdev->dbc[hdr->dbc_id].nelem) { 912 qaic_free_slices_bo(bo); 913 return -ENOSPC; 914 } 915 916 bo->sliced = true; 917 bo->nr_slice = hdr->count; 918 list_add_tail(&bo->bo_list, &qdev->dbc[hdr->dbc_id].bo_lists); 919 920 return 0; 921 } 922 923 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 924 { 925 struct qaic_attach_slice_entry *slice_ent; 926 struct qaic_attach_slice *args = data; 927 int rcu_id, usr_rcu_id, qdev_rcu_id; 928 struct dma_bridge_chan *dbc; 929 struct drm_gem_object *obj; 930 struct qaic_device *qdev; 931 unsigned long arg_size; 932 struct qaic_user *usr; 933 u8 __user *user_data; 934 struct qaic_bo *bo; 935 int ret; 936 937 if (args->hdr.count == 0) 938 return -EINVAL; 939 940 arg_size = args->hdr.count * sizeof(*slice_ent); 941 if (arg_size / args->hdr.count != sizeof(*slice_ent)) 942 return -EINVAL; 943 944 if (args->hdr.size == 0) 945 return -EINVAL; 946 947 if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) 948 return -EINVAL; 949 950 if (args->data == 0) 951 return -EINVAL; 952 953 usr = file_priv->driver_priv; 954 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 955 if (!usr->qddev) { 956 ret = -ENODEV; 957 goto unlock_usr_srcu; 958 } 959 960 qdev = usr->qddev->qdev; 961 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 962 if (qdev->in_reset) { 963 ret = -ENODEV; 964 goto unlock_dev_srcu; 965 } 966 967 if (args->hdr.dbc_id >= qdev->num_dbc) { 968 ret = -EINVAL; 969 goto unlock_dev_srcu; 970 } 971 972 user_data = u64_to_user_ptr(args->data); 973 974 slice_ent = kzalloc(arg_size, GFP_KERNEL); 975 if (!slice_ent) { 976 ret = -EINVAL; 977 goto unlock_dev_srcu; 978 } 979 980 ret = copy_from_user(slice_ent, user_data, arg_size); 981 if (ret) { 982 ret = -EFAULT; 983 goto free_slice_ent; 984 } 985 986 ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, args->hdr.size); 987 if (ret) 988 goto free_slice_ent; 989 990 obj = drm_gem_object_lookup(file_priv, args->hdr.handle); 991 if (!obj) { 992 ret = -ENOENT; 993 goto free_slice_ent; 994 } 995 996 bo = to_qaic_bo(obj); 997 998 if (bo->sliced) { 999 ret = -EINVAL; 1000 goto put_bo; 1001 } 1002 1003 dbc = &qdev->dbc[args->hdr.dbc_id]; 1004 rcu_id = srcu_read_lock(&dbc->ch_lock); 1005 if (dbc->usr != usr) { 1006 ret = -EINVAL; 1007 goto unlock_ch_srcu; 1008 } 1009 1010 ret = qaic_prepare_bo(qdev, bo, &args->hdr); 1011 if (ret) 1012 goto unlock_ch_srcu; 1013 1014 ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent); 1015 if (ret) 1016 goto unprepare_bo; 1017 1018 if (args->hdr.dir == DMA_TO_DEVICE) 1019 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir); 1020 1021 bo->dbc = dbc; 1022 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1023 drm_gem_object_put(obj); 1024 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1025 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1026 1027 return 0; 1028 1029 unprepare_bo: 1030 qaic_unprepare_bo(qdev, bo); 1031 unlock_ch_srcu: 1032 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1033 put_bo: 1034 drm_gem_object_put(obj); 1035 free_slice_ent: 1036 kfree(slice_ent); 1037 unlock_dev_srcu: 1038 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1039 unlock_usr_srcu: 1040 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1041 return ret; 1042 } 1043 1044 static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id, 1045 u32 head, u32 *ptail) 1046 { 1047 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; 1048 struct dbc_req *reqs = slice->reqs; 1049 u32 tail = *ptail; 1050 u32 avail; 1051 1052 avail = head - tail; 1053 if (head <= tail) 1054 avail += dbc->nelem; 1055 1056 --avail; 1057 1058 if (avail < slice->nents) 1059 return -EAGAIN; 1060 1061 if (tail + slice->nents > dbc->nelem) { 1062 avail = dbc->nelem - tail; 1063 avail = min_t(u32, avail, slice->nents); 1064 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs, 1065 sizeof(*reqs) * avail); 1066 reqs += avail; 1067 avail = slice->nents - avail; 1068 if (avail) 1069 memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail); 1070 } else { 1071 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs, 1072 sizeof(*reqs) * slice->nents); 1073 } 1074 1075 *ptail = (tail + slice->nents) % dbc->nelem; 1076 1077 return 0; 1078 } 1079 1080 /* 1081 * Based on the value of resize we may only need to transmit first_n 1082 * entries and the last entry, with last_bytes to send from the last entry. 1083 * Note that first_n could be 0. 1084 */ 1085 static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, 1086 u64 resize, u32 dbc_id, u32 head, u32 *ptail) 1087 { 1088 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; 1089 struct dbc_req *reqs = slice->reqs; 1090 struct dbc_req *last_req; 1091 u32 tail = *ptail; 1092 u64 total_bytes; 1093 u64 last_bytes; 1094 u32 first_n; 1095 u32 avail; 1096 int ret; 1097 int i; 1098 1099 avail = head - tail; 1100 if (head <= tail) 1101 avail += dbc->nelem; 1102 1103 --avail; 1104 1105 total_bytes = 0; 1106 for (i = 0; i < slice->nents; i++) { 1107 total_bytes += le32_to_cpu(reqs[i].len); 1108 if (total_bytes >= resize) 1109 break; 1110 } 1111 1112 if (total_bytes < resize) { 1113 /* User space should have used the full buffer path. */ 1114 ret = -EINVAL; 1115 return ret; 1116 } 1117 1118 first_n = i; 1119 last_bytes = i ? resize + le32_to_cpu(reqs[i].len) - total_bytes : resize; 1120 1121 if (avail < (first_n + 1)) 1122 return -EAGAIN; 1123 1124 if (first_n) { 1125 if (tail + first_n > dbc->nelem) { 1126 avail = dbc->nelem - tail; 1127 avail = min_t(u32, avail, first_n); 1128 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs, 1129 sizeof(*reqs) * avail); 1130 last_req = reqs + avail; 1131 avail = first_n - avail; 1132 if (avail) 1133 memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail); 1134 } else { 1135 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs, 1136 sizeof(*reqs) * first_n); 1137 } 1138 } 1139 1140 /* Copy over the last entry. Here we need to adjust len to the left over 1141 * size, and set src and dst to the entry it is copied to. 1142 */ 1143 last_req = dbc->req_q_base + (tail + first_n) % dbc->nelem * get_dbc_req_elem_size(); 1144 memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs)); 1145 1146 /* 1147 * last_bytes holds size of a DMA segment, maximum DMA segment size is 1148 * set to UINT_MAX by qaic and hence last_bytes can never exceed u32 1149 * range. So, by down sizing we are not corrupting the value. 1150 */ 1151 last_req->len = cpu_to_le32((u32)last_bytes); 1152 last_req->src_addr = reqs[first_n].src_addr; 1153 last_req->dest_addr = reqs[first_n].dest_addr; 1154 1155 *ptail = (tail + first_n + 1) % dbc->nelem; 1156 1157 return 0; 1158 } 1159 1160 static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *file_priv, 1161 struct qaic_execute_entry *exec, unsigned int count, 1162 bool is_partial, struct dma_bridge_chan *dbc, u32 head, 1163 u32 *tail) 1164 { 1165 struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec; 1166 struct drm_gem_object *obj; 1167 struct bo_slice *slice; 1168 unsigned long flags; 1169 struct qaic_bo *bo; 1170 bool queued; 1171 int i, j; 1172 int ret; 1173 1174 for (i = 0; i < count; i++) { 1175 /* 1176 * ref count will be decremented when the transfer of this 1177 * buffer is complete. It is inside dbc_irq_threaded_fn(). 1178 */ 1179 obj = drm_gem_object_lookup(file_priv, 1180 is_partial ? pexec[i].handle : exec[i].handle); 1181 if (!obj) { 1182 ret = -ENOENT; 1183 goto failed_to_send_bo; 1184 } 1185 1186 bo = to_qaic_bo(obj); 1187 1188 if (!bo->sliced) { 1189 ret = -EINVAL; 1190 goto failed_to_send_bo; 1191 } 1192 1193 if (is_partial && pexec[i].resize > bo->size) { 1194 ret = -EINVAL; 1195 goto failed_to_send_bo; 1196 } 1197 1198 spin_lock_irqsave(&dbc->xfer_lock, flags); 1199 queued = bo->queued; 1200 bo->queued = true; 1201 if (queued) { 1202 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1203 ret = -EINVAL; 1204 goto failed_to_send_bo; 1205 } 1206 1207 bo->req_id = dbc->next_req_id++; 1208 1209 list_for_each_entry(slice, &bo->slices, slice) { 1210 /* 1211 * If this slice does not fall under the given 1212 * resize then skip this slice and continue the loop 1213 */ 1214 if (is_partial && pexec[i].resize && pexec[i].resize <= slice->offset) 1215 continue; 1216 1217 for (j = 0; j < slice->nents; j++) 1218 slice->reqs[j].req_id = cpu_to_le16(bo->req_id); 1219 1220 /* 1221 * If it is a partial execute ioctl call then check if 1222 * resize has cut this slice short then do a partial copy 1223 * else do complete copy 1224 */ 1225 if (is_partial && pexec[i].resize && 1226 pexec[i].resize < slice->offset + slice->size) 1227 ret = copy_partial_exec_reqs(qdev, slice, 1228 pexec[i].resize - slice->offset, 1229 dbc->id, head, tail); 1230 else 1231 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail); 1232 if (ret) { 1233 bo->queued = false; 1234 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1235 goto failed_to_send_bo; 1236 } 1237 } 1238 reinit_completion(&bo->xfer_done); 1239 list_add_tail(&bo->xfer_list, &dbc->xfer_list); 1240 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1241 dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir); 1242 } 1243 1244 return 0; 1245 1246 failed_to_send_bo: 1247 if (likely(obj)) 1248 drm_gem_object_put(obj); 1249 for (j = 0; j < i; j++) { 1250 spin_lock_irqsave(&dbc->xfer_lock, flags); 1251 bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list); 1252 obj = &bo->base; 1253 bo->queued = false; 1254 list_del(&bo->xfer_list); 1255 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1256 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); 1257 drm_gem_object_put(obj); 1258 } 1259 return ret; 1260 } 1261 1262 static void update_profiling_data(struct drm_file *file_priv, 1263 struct qaic_execute_entry *exec, unsigned int count, 1264 bool is_partial, u64 received_ts, u64 submit_ts, u32 queue_level) 1265 { 1266 struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec; 1267 struct drm_gem_object *obj; 1268 struct qaic_bo *bo; 1269 int i; 1270 1271 for (i = 0; i < count; i++) { 1272 /* 1273 * Since we already committed the BO to hardware, the only way 1274 * this should fail is a pending signal. We can't cancel the 1275 * submit to hardware, so we have to just skip the profiling 1276 * data. In case the signal is not fatal to the process, we 1277 * return success so that the user doesn't try to resubmit. 1278 */ 1279 obj = drm_gem_object_lookup(file_priv, 1280 is_partial ? pexec[i].handle : exec[i].handle); 1281 if (!obj) 1282 break; 1283 bo = to_qaic_bo(obj); 1284 bo->perf_stats.req_received_ts = received_ts; 1285 bo->perf_stats.req_submit_ts = submit_ts; 1286 bo->perf_stats.queue_level_before = queue_level; 1287 queue_level += bo->total_slice_nents; 1288 drm_gem_object_put(obj); 1289 } 1290 } 1291 1292 static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv, 1293 bool is_partial) 1294 { 1295 struct qaic_partial_execute_entry *pexec; 1296 struct qaic_execute *args = data; 1297 struct qaic_execute_entry *exec; 1298 struct dma_bridge_chan *dbc; 1299 int usr_rcu_id, qdev_rcu_id; 1300 struct qaic_device *qdev; 1301 struct qaic_user *usr; 1302 u8 __user *user_data; 1303 unsigned long n; 1304 u64 received_ts; 1305 u32 queue_level; 1306 u64 submit_ts; 1307 int rcu_id; 1308 u32 head; 1309 u32 tail; 1310 u64 size; 1311 int ret; 1312 1313 received_ts = ktime_get_ns(); 1314 1315 size = is_partial ? sizeof(*pexec) : sizeof(*exec); 1316 n = (unsigned long)size * args->hdr.count; 1317 if (args->hdr.count == 0 || n / args->hdr.count != size) 1318 return -EINVAL; 1319 1320 user_data = u64_to_user_ptr(args->data); 1321 1322 exec = kcalloc(args->hdr.count, size, GFP_KERNEL); 1323 pexec = (struct qaic_partial_execute_entry *)exec; 1324 if (!exec) 1325 return -ENOMEM; 1326 1327 if (copy_from_user(exec, user_data, n)) { 1328 ret = -EFAULT; 1329 goto free_exec; 1330 } 1331 1332 usr = file_priv->driver_priv; 1333 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1334 if (!usr->qddev) { 1335 ret = -ENODEV; 1336 goto unlock_usr_srcu; 1337 } 1338 1339 qdev = usr->qddev->qdev; 1340 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1341 if (qdev->in_reset) { 1342 ret = -ENODEV; 1343 goto unlock_dev_srcu; 1344 } 1345 1346 if (args->hdr.dbc_id >= qdev->num_dbc) { 1347 ret = -EINVAL; 1348 goto unlock_dev_srcu; 1349 } 1350 1351 dbc = &qdev->dbc[args->hdr.dbc_id]; 1352 1353 rcu_id = srcu_read_lock(&dbc->ch_lock); 1354 if (!dbc->usr || dbc->usr->handle != usr->handle) { 1355 ret = -EPERM; 1356 goto release_ch_rcu; 1357 } 1358 1359 head = readl(dbc->dbc_base + REQHP_OFF); 1360 tail = readl(dbc->dbc_base + REQTP_OFF); 1361 1362 if (head == U32_MAX || tail == U32_MAX) { 1363 /* PCI link error */ 1364 ret = -ENODEV; 1365 goto release_ch_rcu; 1366 } 1367 1368 queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail); 1369 1370 ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc, 1371 head, &tail); 1372 if (ret) 1373 goto release_ch_rcu; 1374 1375 /* Finalize commit to hardware */ 1376 submit_ts = ktime_get_ns(); 1377 writel(tail, dbc->dbc_base + REQTP_OFF); 1378 1379 update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts, 1380 submit_ts, queue_level); 1381 1382 if (datapath_polling) 1383 schedule_work(&dbc->poll_work); 1384 1385 release_ch_rcu: 1386 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1387 unlock_dev_srcu: 1388 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1389 unlock_usr_srcu: 1390 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1391 free_exec: 1392 kfree(exec); 1393 return ret; 1394 } 1395 1396 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1397 { 1398 return __qaic_execute_bo_ioctl(dev, data, file_priv, false); 1399 } 1400 1401 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1402 { 1403 return __qaic_execute_bo_ioctl(dev, data, file_priv, true); 1404 } 1405 1406 /* 1407 * Our interrupt handling is a bit more complicated than a simple ideal, but 1408 * sadly necessary. 1409 * 1410 * Each dbc has a completion queue. Entries in the queue correspond to DMA 1411 * requests which the device has processed. The hardware already has a built 1412 * in irq mitigation. When the device puts an entry into the queue, it will 1413 * only trigger an interrupt if the queue was empty. Therefore, when adding 1414 * the Nth event to a non-empty queue, the hardware doesn't trigger an 1415 * interrupt. This means the host doesn't get additional interrupts signaling 1416 * the same thing - the queue has something to process. 1417 * This behavior can be overridden in the DMA request. 1418 * This means that when the host receives an interrupt, it is required to 1419 * drain the queue. 1420 * 1421 * This behavior is what NAPI attempts to accomplish, although we can't use 1422 * NAPI as we don't have a netdev. We use threaded irqs instead. 1423 * 1424 * However, there is a situation where the host drains the queue fast enough 1425 * that every event causes an interrupt. Typically this is not a problem as 1426 * the rate of events would be low. However, that is not the case with 1427 * lprnet for example. On an Intel Xeon D-2191 where we run 8 instances of 1428 * lprnet, the host receives roughly 80k interrupts per second from the device 1429 * (per /proc/interrupts). While NAPI documentation indicates the host should 1430 * just chug along, sadly that behavior causes instability in some hosts. 1431 * 1432 * Therefore, we implement an interrupt disable scheme similar to NAPI. The 1433 * key difference is that we will delay after draining the queue for a small 1434 * time to allow additional events to come in via polling. Using the above 1435 * lprnet workload, this reduces the number of interrupts processed from 1436 * ~80k/sec to about 64 in 5 minutes and appears to solve the system 1437 * instability. 1438 */ 1439 irqreturn_t dbc_irq_handler(int irq, void *data) 1440 { 1441 struct dma_bridge_chan *dbc = data; 1442 int rcu_id; 1443 u32 head; 1444 u32 tail; 1445 1446 rcu_id = srcu_read_lock(&dbc->ch_lock); 1447 1448 if (!dbc->usr) { 1449 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1450 return IRQ_HANDLED; 1451 } 1452 1453 head = readl(dbc->dbc_base + RSPHP_OFF); 1454 if (head == U32_MAX) { /* PCI link error */ 1455 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1456 return IRQ_NONE; 1457 } 1458 1459 tail = readl(dbc->dbc_base + RSPTP_OFF); 1460 if (tail == U32_MAX) { /* PCI link error */ 1461 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1462 return IRQ_NONE; 1463 } 1464 1465 if (head == tail) { /* queue empty */ 1466 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1467 return IRQ_NONE; 1468 } 1469 1470 disable_irq_nosync(irq); 1471 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1472 return IRQ_WAKE_THREAD; 1473 } 1474 1475 void irq_polling_work(struct work_struct *work) 1476 { 1477 struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work); 1478 unsigned long flags; 1479 int rcu_id; 1480 u32 head; 1481 u32 tail; 1482 1483 rcu_id = srcu_read_lock(&dbc->ch_lock); 1484 1485 while (1) { 1486 if (dbc->qdev->in_reset) { 1487 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1488 return; 1489 } 1490 if (!dbc->usr) { 1491 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1492 return; 1493 } 1494 spin_lock_irqsave(&dbc->xfer_lock, flags); 1495 if (list_empty(&dbc->xfer_list)) { 1496 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1497 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1498 return; 1499 } 1500 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1501 1502 head = readl(dbc->dbc_base + RSPHP_OFF); 1503 if (head == U32_MAX) { /* PCI link error */ 1504 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1505 return; 1506 } 1507 1508 tail = readl(dbc->dbc_base + RSPTP_OFF); 1509 if (tail == U32_MAX) { /* PCI link error */ 1510 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1511 return; 1512 } 1513 1514 if (head != tail) { 1515 irq_wake_thread(dbc->irq, dbc); 1516 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1517 return; 1518 } 1519 1520 cond_resched(); 1521 usleep_range(datapath_poll_interval_us, 2 * datapath_poll_interval_us); 1522 } 1523 } 1524 1525 irqreturn_t dbc_irq_threaded_fn(int irq, void *data) 1526 { 1527 struct dma_bridge_chan *dbc = data; 1528 int event_count = NUM_EVENTS; 1529 int delay_count = NUM_DELAYS; 1530 struct qaic_device *qdev; 1531 struct qaic_bo *bo, *i; 1532 struct dbc_rsp *rsp; 1533 unsigned long flags; 1534 int rcu_id; 1535 u16 status; 1536 u16 req_id; 1537 u32 head; 1538 u32 tail; 1539 1540 rcu_id = srcu_read_lock(&dbc->ch_lock); 1541 1542 head = readl(dbc->dbc_base + RSPHP_OFF); 1543 if (head == U32_MAX) /* PCI link error */ 1544 goto error_out; 1545 1546 qdev = dbc->qdev; 1547 read_fifo: 1548 1549 if (!event_count) { 1550 event_count = NUM_EVENTS; 1551 cond_resched(); 1552 } 1553 1554 /* 1555 * if this channel isn't assigned or gets unassigned during processing 1556 * we have nothing further to do 1557 */ 1558 if (!dbc->usr) 1559 goto error_out; 1560 1561 tail = readl(dbc->dbc_base + RSPTP_OFF); 1562 if (tail == U32_MAX) /* PCI link error */ 1563 goto error_out; 1564 1565 if (head == tail) { /* queue empty */ 1566 if (delay_count) { 1567 --delay_count; 1568 usleep_range(100, 200); 1569 goto read_fifo; /* check for a new event */ 1570 } 1571 goto normal_out; 1572 } 1573 1574 delay_count = NUM_DELAYS; 1575 while (head != tail) { 1576 if (!event_count) 1577 break; 1578 --event_count; 1579 rsp = dbc->rsp_q_base + head * sizeof(*rsp); 1580 req_id = le16_to_cpu(rsp->req_id); 1581 status = le16_to_cpu(rsp->status); 1582 if (status) 1583 pci_dbg(qdev->pdev, "req_id %d failed with status %d\n", req_id, status); 1584 spin_lock_irqsave(&dbc->xfer_lock, flags); 1585 /* 1586 * A BO can receive multiple interrupts, since a BO can be 1587 * divided into multiple slices and a buffer receives as many 1588 * interrupts as slices. So until it receives interrupts for 1589 * all the slices we cannot mark that buffer complete. 1590 */ 1591 list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) { 1592 if (bo->req_id == req_id) 1593 bo->nr_slice_xfer_done++; 1594 else 1595 continue; 1596 1597 if (bo->nr_slice_xfer_done < bo->nr_slice) 1598 break; 1599 1600 /* 1601 * At this point we have received all the interrupts for 1602 * BO, which means BO execution is complete. 1603 */ 1604 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); 1605 bo->nr_slice_xfer_done = 0; 1606 bo->queued = false; 1607 list_del(&bo->xfer_list); 1608 bo->perf_stats.req_processed_ts = ktime_get_ns(); 1609 complete_all(&bo->xfer_done); 1610 drm_gem_object_put(&bo->base); 1611 break; 1612 } 1613 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1614 head = (head + 1) % dbc->nelem; 1615 } 1616 1617 /* 1618 * Update the head pointer of response queue and let the device know 1619 * that we have consumed elements from the queue. 1620 */ 1621 writel(head, dbc->dbc_base + RSPHP_OFF); 1622 1623 /* elements might have been put in the queue while we were processing */ 1624 goto read_fifo; 1625 1626 normal_out: 1627 if (likely(!datapath_polling)) 1628 enable_irq(irq); 1629 else 1630 schedule_work(&dbc->poll_work); 1631 /* checking the fifo and enabling irqs is a race, missed event check */ 1632 tail = readl(dbc->dbc_base + RSPTP_OFF); 1633 if (tail != U32_MAX && head != tail) { 1634 if (likely(!datapath_polling)) 1635 disable_irq_nosync(irq); 1636 goto read_fifo; 1637 } 1638 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1639 return IRQ_HANDLED; 1640 1641 error_out: 1642 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1643 if (likely(!datapath_polling)) 1644 enable_irq(irq); 1645 else 1646 schedule_work(&dbc->poll_work); 1647 1648 return IRQ_HANDLED; 1649 } 1650 1651 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1652 { 1653 struct qaic_wait *args = data; 1654 int usr_rcu_id, qdev_rcu_id; 1655 struct dma_bridge_chan *dbc; 1656 struct drm_gem_object *obj; 1657 struct qaic_device *qdev; 1658 unsigned long timeout; 1659 struct qaic_user *usr; 1660 struct qaic_bo *bo; 1661 int rcu_id; 1662 int ret; 1663 1664 if (args->pad != 0) 1665 return -EINVAL; 1666 1667 usr = file_priv->driver_priv; 1668 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1669 if (!usr->qddev) { 1670 ret = -ENODEV; 1671 goto unlock_usr_srcu; 1672 } 1673 1674 qdev = usr->qddev->qdev; 1675 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1676 if (qdev->in_reset) { 1677 ret = -ENODEV; 1678 goto unlock_dev_srcu; 1679 } 1680 1681 if (args->dbc_id >= qdev->num_dbc) { 1682 ret = -EINVAL; 1683 goto unlock_dev_srcu; 1684 } 1685 1686 dbc = &qdev->dbc[args->dbc_id]; 1687 1688 rcu_id = srcu_read_lock(&dbc->ch_lock); 1689 if (dbc->usr != usr) { 1690 ret = -EPERM; 1691 goto unlock_ch_srcu; 1692 } 1693 1694 obj = drm_gem_object_lookup(file_priv, args->handle); 1695 if (!obj) { 1696 ret = -ENOENT; 1697 goto unlock_ch_srcu; 1698 } 1699 1700 bo = to_qaic_bo(obj); 1701 timeout = args->timeout ? args->timeout : wait_exec_default_timeout_ms; 1702 timeout = msecs_to_jiffies(timeout); 1703 ret = wait_for_completion_interruptible_timeout(&bo->xfer_done, timeout); 1704 if (!ret) { 1705 ret = -ETIMEDOUT; 1706 goto put_obj; 1707 } 1708 if (ret > 0) 1709 ret = 0; 1710 1711 if (!dbc->usr) 1712 ret = -EPERM; 1713 1714 put_obj: 1715 drm_gem_object_put(obj); 1716 unlock_ch_srcu: 1717 srcu_read_unlock(&dbc->ch_lock, rcu_id); 1718 unlock_dev_srcu: 1719 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1720 unlock_usr_srcu: 1721 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1722 return ret; 1723 } 1724 1725 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1726 { 1727 struct qaic_perf_stats_entry *ent = NULL; 1728 struct qaic_perf_stats *args = data; 1729 int usr_rcu_id, qdev_rcu_id; 1730 struct drm_gem_object *obj; 1731 struct qaic_device *qdev; 1732 struct qaic_user *usr; 1733 struct qaic_bo *bo; 1734 int ret, i; 1735 1736 usr = file_priv->driver_priv; 1737 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1738 if (!usr->qddev) { 1739 ret = -ENODEV; 1740 goto unlock_usr_srcu; 1741 } 1742 1743 qdev = usr->qddev->qdev; 1744 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1745 if (qdev->in_reset) { 1746 ret = -ENODEV; 1747 goto unlock_dev_srcu; 1748 } 1749 1750 if (args->hdr.dbc_id >= qdev->num_dbc) { 1751 ret = -EINVAL; 1752 goto unlock_dev_srcu; 1753 } 1754 1755 ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL); 1756 if (!ent) { 1757 ret = -EINVAL; 1758 goto unlock_dev_srcu; 1759 } 1760 1761 ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent)); 1762 if (ret) { 1763 ret = -EFAULT; 1764 goto free_ent; 1765 } 1766 1767 for (i = 0; i < args->hdr.count; i++) { 1768 obj = drm_gem_object_lookup(file_priv, ent[i].handle); 1769 if (!obj) { 1770 ret = -ENOENT; 1771 goto free_ent; 1772 } 1773 bo = to_qaic_bo(obj); 1774 /* 1775 * perf stats ioctl is called before wait ioctl is complete then 1776 * the latency information is invalid. 1777 */ 1778 if (bo->perf_stats.req_processed_ts < bo->perf_stats.req_submit_ts) { 1779 ent[i].device_latency_us = 0; 1780 } else { 1781 ent[i].device_latency_us = div_u64((bo->perf_stats.req_processed_ts - 1782 bo->perf_stats.req_submit_ts), 1000); 1783 } 1784 ent[i].submit_latency_us = div_u64((bo->perf_stats.req_submit_ts - 1785 bo->perf_stats.req_received_ts), 1000); 1786 ent[i].queue_level_before = bo->perf_stats.queue_level_before; 1787 ent[i].num_queue_element = bo->total_slice_nents; 1788 drm_gem_object_put(obj); 1789 } 1790 1791 if (copy_to_user(u64_to_user_ptr(args->data), ent, args->hdr.count * sizeof(*ent))) 1792 ret = -EFAULT; 1793 1794 free_ent: 1795 kfree(ent); 1796 unlock_dev_srcu: 1797 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1798 unlock_usr_srcu: 1799 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1800 return ret; 1801 } 1802 1803 static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc) 1804 { 1805 unsigned long flags; 1806 struct qaic_bo *bo; 1807 1808 spin_lock_irqsave(&dbc->xfer_lock, flags); 1809 while (!list_empty(&dbc->xfer_list)) { 1810 bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list); 1811 bo->queued = false; 1812 list_del(&bo->xfer_list); 1813 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1814 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); 1815 complete_all(&bo->xfer_done); 1816 drm_gem_object_put(&bo->base); 1817 spin_lock_irqsave(&dbc->xfer_lock, flags); 1818 } 1819 spin_unlock_irqrestore(&dbc->xfer_lock, flags); 1820 } 1821 1822 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) 1823 { 1824 if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle) 1825 return -EPERM; 1826 1827 qdev->dbc[dbc_id].usr = NULL; 1828 synchronize_srcu(&qdev->dbc[dbc_id].ch_lock); 1829 return 0; 1830 } 1831 1832 /** 1833 * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of 1834 * user. Add user context back to DBC to enable it. This function trusts the 1835 * DBC ID passed and expects the DBC to be disabled. 1836 * @qdev: Qranium device handle 1837 * @dbc_id: ID of the DBC 1838 * @usr: User context 1839 */ 1840 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) 1841 { 1842 qdev->dbc[dbc_id].usr = usr; 1843 } 1844 1845 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id) 1846 { 1847 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; 1848 1849 dbc->usr = NULL; 1850 empty_xfer_list(qdev, dbc); 1851 synchronize_srcu(&dbc->ch_lock); 1852 /* 1853 * Threads holding channel lock, may add more elements in the xfer_list. 1854 * Flush out these elements from xfer_list. 1855 */ 1856 empty_xfer_list(qdev, dbc); 1857 } 1858 1859 void release_dbc(struct qaic_device *qdev, u32 dbc_id) 1860 { 1861 struct bo_slice *slice, *slice_temp; 1862 struct qaic_bo *bo, *bo_temp; 1863 struct dma_bridge_chan *dbc; 1864 1865 dbc = &qdev->dbc[dbc_id]; 1866 if (!dbc->in_use) 1867 return; 1868 1869 wakeup_dbc(qdev, dbc_id); 1870 1871 dma_free_coherent(&qdev->pdev->dev, dbc->total_size, dbc->req_q_base, dbc->dma_addr); 1872 dbc->total_size = 0; 1873 dbc->req_q_base = NULL; 1874 dbc->dma_addr = 0; 1875 dbc->nelem = 0; 1876 dbc->usr = NULL; 1877 1878 list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) { 1879 list_for_each_entry_safe(slice, slice_temp, &bo->slices, slice) 1880 kref_put(&slice->ref_count, free_slice); 1881 bo->sliced = false; 1882 INIT_LIST_HEAD(&bo->slices); 1883 bo->total_slice_nents = 0; 1884 bo->dir = 0; 1885 bo->dbc = NULL; 1886 bo->nr_slice = 0; 1887 bo->nr_slice_xfer_done = 0; 1888 bo->queued = false; 1889 bo->req_id = 0; 1890 init_completion(&bo->xfer_done); 1891 complete_all(&bo->xfer_done); 1892 list_del(&bo->bo_list); 1893 bo->perf_stats.req_received_ts = 0; 1894 bo->perf_stats.req_submit_ts = 0; 1895 bo->perf_stats.req_processed_ts = 0; 1896 bo->perf_stats.queue_level_before = 0; 1897 } 1898 1899 dbc->in_use = false; 1900 wake_up(&dbc->dbc_release); 1901 } 1902