1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Copyright 2016-2021 HabanaLabs, Ltd. 5 * All Rights Reserved. 6 */ 7 8 #include <uapi/drm/habanalabs_accel.h> 9 #include "habanalabs.h" 10 11 #include <linux/uaccess.h> 12 #include <linux/slab.h> 13 14 #define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \ 15 HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \ 16 HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \ 17 HL_CS_FLAGS_ENGINES_COMMAND | HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES) 18 19 20 #define MAX_TS_ITER_NUM 100 21 22 /** 23 * enum hl_cs_wait_status - cs wait status 24 * @CS_WAIT_STATUS_BUSY: cs was not completed yet 25 * @CS_WAIT_STATUS_COMPLETED: cs completed 26 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone 27 */ 28 enum hl_cs_wait_status { 29 CS_WAIT_STATUS_BUSY, 30 CS_WAIT_STATUS_COMPLETED, 31 CS_WAIT_STATUS_GONE 32 }; 33 34 static void job_wq_completion(struct work_struct *work); 35 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq, 36 enum hl_cs_wait_status *status, s64 *timestamp); 37 static void cs_do_release(struct kref *ref); 38 39 static void hl_push_cs_outcome(struct hl_device *hdev, 40 struct hl_cs_outcome_store *outcome_store, 41 u64 seq, ktime_t ts, int error) 42 { 43 struct hl_cs_outcome *node; 44 unsigned long flags; 45 46 /* 47 * CS outcome store supports the following operations: 48 * push outcome - store a recent CS outcome in the store 49 * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store 50 * It uses 2 lists: used list and free list. 51 * It has a pre-allocated amount of nodes, each node stores 52 * a single CS outcome. 53 * Initially, all the nodes are in the free list. 54 * On push outcome, a node (any) is taken from the free list, its 55 * information is filled in, and the node is moved to the used list. 56 * It is possible, that there are no nodes left in the free list. 57 * In this case, we will lose some information about old outcomes. We 58 * will pop the OLDEST node from the used list, and make it free. 59 * On pop, the node is searched for in the used list (using a search 60 * index). 61 * If found, the node is then removed from the used list, and moved 62 * back to the free list. The outcome data that the node contained is 63 * returned back to the user. 64 */ 65 66 spin_lock_irqsave(&outcome_store->db_lock, flags); 67 68 if (list_empty(&outcome_store->free_list)) { 69 node = list_last_entry(&outcome_store->used_list, 70 struct hl_cs_outcome, list_link); 71 hash_del(&node->map_link); 72 dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq); 73 } else { 74 node = list_last_entry(&outcome_store->free_list, 75 struct hl_cs_outcome, list_link); 76 } 77 78 list_del_init(&node->list_link); 79 80 node->seq = seq; 81 node->ts = ts; 82 node->error = error; 83 84 list_add(&node->list_link, &outcome_store->used_list); 85 hash_add(outcome_store->outcome_map, &node->map_link, node->seq); 86 87 spin_unlock_irqrestore(&outcome_store->db_lock, flags); 88 } 89 90 static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store, 91 u64 seq, ktime_t *ts, int *error) 92 { 93 struct hl_cs_outcome *node; 94 unsigned long flags; 95 96 spin_lock_irqsave(&outcome_store->db_lock, flags); 97 98 hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq) 99 if (node->seq == seq) { 100 *ts = node->ts; 101 *error = node->error; 102 103 hash_del(&node->map_link); 104 list_del_init(&node->list_link); 105 list_add(&node->list_link, &outcome_store->free_list); 106 107 spin_unlock_irqrestore(&outcome_store->db_lock, flags); 108 109 return true; 110 } 111 112 spin_unlock_irqrestore(&outcome_store->db_lock, flags); 113 114 return false; 115 } 116 117 static void hl_sob_reset(struct kref *ref) 118 { 119 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob, 120 kref); 121 struct hl_device *hdev = hw_sob->hdev; 122 123 dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id); 124 125 hdev->asic_funcs->reset_sob(hdev, hw_sob); 126 127 hw_sob->need_reset = false; 128 } 129 130 void hl_sob_reset_error(struct kref *ref) 131 { 132 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob, 133 kref); 134 struct hl_device *hdev = hw_sob->hdev; 135 136 dev_crit(hdev->dev, 137 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n", 138 hw_sob->q_idx, hw_sob->sob_id); 139 } 140 141 void hw_sob_put(struct hl_hw_sob *hw_sob) 142 { 143 if (hw_sob) 144 kref_put(&hw_sob->kref, hl_sob_reset); 145 } 146 147 static void hw_sob_put_err(struct hl_hw_sob *hw_sob) 148 { 149 if (hw_sob) 150 kref_put(&hw_sob->kref, hl_sob_reset_error); 151 } 152 153 void hw_sob_get(struct hl_hw_sob *hw_sob) 154 { 155 if (hw_sob) 156 kref_get(&hw_sob->kref); 157 } 158 159 /** 160 * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet 161 * @sob_base: sob base id 162 * @sob_mask: sob user mask, each bit represents a sob offset from sob base 163 * @mask: generated mask 164 * 165 * Return: 0 if given parameters are valid 166 */ 167 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask) 168 { 169 int i; 170 171 if (sob_mask == 0) 172 return -EINVAL; 173 174 if (sob_mask == 0x1) { 175 *mask = ~(1 << (sob_base & 0x7)); 176 } else { 177 /* find msb in order to verify sob range is valid */ 178 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--) 179 if (BIT(i) & sob_mask) 180 break; 181 182 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1)) 183 return -EINVAL; 184 185 *mask = ~sob_mask; 186 } 187 188 return 0; 189 } 190 191 static void hl_fence_release(struct kref *kref) 192 { 193 struct hl_fence *fence = 194 container_of(kref, struct hl_fence, refcount); 195 struct hl_cs_compl *hl_cs_cmpl = 196 container_of(fence, struct hl_cs_compl, base_fence); 197 198 kfree(hl_cs_cmpl); 199 } 200 201 void hl_fence_put(struct hl_fence *fence) 202 { 203 if (IS_ERR_OR_NULL(fence)) 204 return; 205 kref_put(&fence->refcount, hl_fence_release); 206 } 207 208 void hl_fences_put(struct hl_fence **fence, int len) 209 { 210 int i; 211 212 for (i = 0; i < len; i++, fence++) 213 hl_fence_put(*fence); 214 } 215 216 void hl_fence_get(struct hl_fence *fence) 217 { 218 if (fence) 219 kref_get(&fence->refcount); 220 } 221 222 static void hl_fence_init(struct hl_fence *fence, u64 sequence) 223 { 224 kref_init(&fence->refcount); 225 fence->cs_sequence = sequence; 226 fence->error = 0; 227 fence->timestamp = ktime_set(0, 0); 228 fence->mcs_handling_done = false; 229 init_completion(&fence->completion); 230 } 231 232 void cs_get(struct hl_cs *cs) 233 { 234 kref_get(&cs->refcount); 235 } 236 237 static int cs_get_unless_zero(struct hl_cs *cs) 238 { 239 return kref_get_unless_zero(&cs->refcount); 240 } 241 242 static void cs_put(struct hl_cs *cs) 243 { 244 kref_put(&cs->refcount, cs_do_release); 245 } 246 247 static void cs_job_do_release(struct kref *ref) 248 { 249 struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount); 250 251 kfree(job); 252 } 253 254 static void hl_cs_job_put(struct hl_cs_job *job) 255 { 256 kref_put(&job->refcount, cs_job_do_release); 257 } 258 259 bool cs_needs_completion(struct hl_cs *cs) 260 { 261 /* In case this is a staged CS, only the last CS in sequence should 262 * get a completion, any non staged CS will always get a completion 263 */ 264 if (cs->staged_cs && !cs->staged_last) 265 return false; 266 267 return true; 268 } 269 270 bool cs_needs_timeout(struct hl_cs *cs) 271 { 272 /* In case this is a staged CS, only the first CS in sequence should 273 * get a timeout, any non staged CS will always get a timeout 274 */ 275 if (cs->staged_cs && !cs->staged_first) 276 return false; 277 278 return true; 279 } 280 281 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) 282 { 283 /* 284 * Patched CB is created for external queues jobs, and for H/W queues 285 * jobs if the user CB was allocated by driver and MMU is disabled. 286 */ 287 return (job->queue_type == QUEUE_TYPE_EXT || 288 (job->queue_type == QUEUE_TYPE_HW && 289 job->is_kernel_allocated_cb && 290 !hdev->mmu_enable)); 291 } 292 293 /* 294 * cs_parser - parse the user command submission 295 * 296 * @hpriv : pointer to the private data of the fd 297 * @job : pointer to the job that holds the command submission info 298 * 299 * The function parses the command submission of the user. It calls the 300 * ASIC specific parser, which returns a list of memory blocks to send 301 * to the device as different command buffers 302 * 303 */ 304 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) 305 { 306 struct hl_device *hdev = hpriv->hdev; 307 struct hl_cs_parser parser; 308 int rc; 309 310 parser.ctx_id = job->cs->ctx->asid; 311 parser.cs_sequence = job->cs->sequence; 312 parser.job_id = job->id; 313 314 parser.hw_queue_id = job->hw_queue_id; 315 parser.job_userptr_list = &job->userptr_list; 316 parser.patched_cb = NULL; 317 parser.user_cb = job->user_cb; 318 parser.user_cb_size = job->user_cb_size; 319 parser.queue_type = job->queue_type; 320 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb; 321 job->patched_cb = NULL; 322 parser.completion = cs_needs_completion(job->cs); 323 324 rc = hdev->asic_funcs->cs_parser(hdev, &parser); 325 326 if (is_cb_patched(hdev, job)) { 327 if (!rc) { 328 job->patched_cb = parser.patched_cb; 329 job->job_cb_size = parser.patched_cb_size; 330 job->contains_dma_pkt = parser.contains_dma_pkt; 331 atomic_inc(&job->patched_cb->cs_cnt); 332 } 333 334 /* 335 * Whether the parsing worked or not, we don't need the 336 * original CB anymore because it was already parsed and 337 * won't be accessed again for this CS 338 */ 339 atomic_dec(&job->user_cb->cs_cnt); 340 hl_cb_put(job->user_cb); 341 job->user_cb = NULL; 342 } else if (!rc) { 343 job->job_cb_size = job->user_cb_size; 344 } 345 346 return rc; 347 } 348 349 static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job) 350 { 351 struct hl_cs *cs = job->cs; 352 353 if (is_cb_patched(hdev, job)) { 354 hl_userptr_delete_list(hdev, &job->userptr_list); 355 356 /* 357 * We might arrive here from rollback and patched CB wasn't 358 * created, so we need to check it's not NULL 359 */ 360 if (job->patched_cb) { 361 atomic_dec(&job->patched_cb->cs_cnt); 362 hl_cb_put(job->patched_cb); 363 } 364 } 365 366 /* For H/W queue jobs, if a user CB was allocated by driver and MMU is 367 * enabled, the user CB isn't released in cs_parser() and thus should be 368 * released here. This is also true for INT queues jobs which were 369 * allocated by driver. 370 */ 371 if ((job->is_kernel_allocated_cb && 372 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) || 373 job->queue_type == QUEUE_TYPE_INT))) { 374 atomic_dec(&job->user_cb->cs_cnt); 375 hl_cb_put(job->user_cb); 376 } 377 378 /* 379 * This is the only place where there can be multiple threads 380 * modifying the list at the same time 381 */ 382 spin_lock(&cs->job_lock); 383 list_del(&job->cs_node); 384 spin_unlock(&cs->job_lock); 385 386 hl_debugfs_remove_job(hdev, job); 387 388 /* We decrement reference only for a CS that gets completion 389 * because the reference was incremented only for this kind of CS 390 * right before it was scheduled. 391 * 392 * In staged submission, only the last CS marked as 'staged_last' 393 * gets completion, hence its release function will be called from here. 394 * As for all the rest CS's in the staged submission which do not get 395 * completion, their CS reference will be decremented by the 396 * 'staged_last' CS during the CS release flow. 397 * All relevant PQ CI counters will be incremented during the CS release 398 * flow by calling 'hl_hw_queue_update_ci'. 399 */ 400 if (cs_needs_completion(cs) && 401 (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) { 402 403 /* In CS based completions, the timestamp is already available, 404 * so no need to extract it from job 405 */ 406 if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB) 407 cs->completion_timestamp = job->timestamp; 408 409 cs_put(cs); 410 } 411 412 hl_cs_job_put(job); 413 } 414 415 /* 416 * hl_staged_cs_find_first - locate the first CS in this staged submission 417 * 418 * @hdev: pointer to device structure 419 * @cs_seq: staged submission sequence number 420 * 421 * @note: This function must be called under 'hdev->cs_mirror_lock' 422 * 423 * Find and return a CS pointer with the given sequence 424 */ 425 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq) 426 { 427 struct hl_cs *cs; 428 429 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node) 430 if (cs->staged_cs && cs->staged_first && 431 cs->sequence == cs_seq) 432 return cs; 433 434 return NULL; 435 } 436 437 /* 438 * is_staged_cs_last_exists - returns true if the last CS in sequence exists 439 * 440 * @hdev: pointer to device structure 441 * @cs: staged submission member 442 * 443 */ 444 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs) 445 { 446 struct hl_cs *last_entry; 447 448 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs, 449 staged_cs_node); 450 451 if (last_entry->staged_last) 452 return true; 453 454 return false; 455 } 456 457 /* 458 * staged_cs_get - get CS reference if this CS is a part of a staged CS 459 * 460 * @hdev: pointer to device structure 461 * @cs: current CS 462 * @cs_seq: staged submission sequence number 463 * 464 * Increment CS reference for every CS in this staged submission except for 465 * the CS which get completion. 466 */ 467 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs) 468 { 469 /* Only the last CS in this staged submission will get a completion. 470 * We must increment the reference for all other CS's in this 471 * staged submission. 472 * Once we get a completion we will release the whole staged submission. 473 */ 474 if (!cs->staged_last) 475 cs_get(cs); 476 } 477 478 /* 479 * staged_cs_put - put a CS in case it is part of staged submission 480 * 481 * @hdev: pointer to device structure 482 * @cs: CS to put 483 * 484 * This function decrements a CS reference (for a non completion CS) 485 */ 486 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs) 487 { 488 /* We release all CS's in a staged submission except the last 489 * CS which we have never incremented its reference. 490 */ 491 if (!cs_needs_completion(cs)) 492 cs_put(cs); 493 } 494 495 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) 496 { 497 struct hl_cs *next = NULL, *iter, *first_cs; 498 499 if (!cs_needs_timeout(cs)) 500 return; 501 502 spin_lock(&hdev->cs_mirror_lock); 503 504 /* We need to handle tdr only once for the complete staged submission. 505 * Hence, we choose the CS that reaches this function first which is 506 * the CS marked as 'staged_last'. 507 * In case single staged cs was submitted which has both first and last 508 * indications, then "cs_find_first" below will return NULL, since we 509 * removed the cs node from the list before getting here, 510 * in such cases just continue with the cs to cancel it's TDR work. 511 */ 512 if (cs->staged_cs && cs->staged_last) { 513 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); 514 if (first_cs) 515 cs = first_cs; 516 } 517 518 spin_unlock(&hdev->cs_mirror_lock); 519 520 /* Don't cancel TDR in case this CS was timedout because we might be 521 * running from the TDR context 522 */ 523 if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT) 524 return; 525 526 if (cs->tdr_active) 527 cancel_delayed_work_sync(&cs->work_tdr); 528 529 spin_lock(&hdev->cs_mirror_lock); 530 531 /* queue TDR for next CS */ 532 list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node) 533 if (cs_needs_timeout(iter)) { 534 next = iter; 535 break; 536 } 537 538 if (next && !next->tdr_active) { 539 next->tdr_active = true; 540 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies); 541 } 542 543 spin_unlock(&hdev->cs_mirror_lock); 544 } 545 546 /* 547 * force_complete_multi_cs - complete all contexts that wait on multi-CS 548 * 549 * @hdev: pointer to habanalabs device structure 550 */ 551 static void force_complete_multi_cs(struct hl_device *hdev) 552 { 553 int i; 554 555 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) { 556 struct multi_cs_completion *mcs_compl; 557 558 mcs_compl = &hdev->multi_cs_completion[i]; 559 560 spin_lock(&mcs_compl->lock); 561 562 if (!mcs_compl->used) { 563 spin_unlock(&mcs_compl->lock); 564 continue; 565 } 566 567 /* when calling force complete no context should be waiting on 568 * multi-cS. 569 * We are calling the function as a protection for such case 570 * to free any pending context and print error message 571 */ 572 dev_err(hdev->dev, 573 "multi-CS completion context %d still waiting when calling force completion\n", 574 i); 575 complete_all(&mcs_compl->completion); 576 spin_unlock(&mcs_compl->lock); 577 } 578 } 579 580 /* 581 * complete_multi_cs - complete all waiting entities on multi-CS 582 * 583 * @hdev: pointer to habanalabs device structure 584 * @cs: CS structure 585 * The function signals a waiting entity that has an overlapping stream masters 586 * with the completed CS. 587 * For example: 588 * - a completed CS worked on stream master QID 4, multi CS completion 589 * is actively waiting on stream master QIDs 3, 5. don't send signal as no 590 * common stream master QID 591 * - a completed CS worked on stream master QID 4, multi CS completion 592 * is actively waiting on stream master QIDs 3, 4. send signal as stream 593 * master QID 4 is common 594 */ 595 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs) 596 { 597 struct hl_fence *fence = cs->fence; 598 int i; 599 600 /* in case of multi CS check for completion only for the first CS */ 601 if (cs->staged_cs && !cs->staged_first) 602 return; 603 604 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) { 605 struct multi_cs_completion *mcs_compl; 606 607 mcs_compl = &hdev->multi_cs_completion[i]; 608 if (!mcs_compl->used) 609 continue; 610 611 spin_lock(&mcs_compl->lock); 612 613 /* 614 * complete if: 615 * 1. still waiting for completion 616 * 2. the completed CS has at least one overlapping stream 617 * master with the stream masters in the completion 618 */ 619 if (mcs_compl->used && 620 (fence->stream_master_qid_map & 621 mcs_compl->stream_master_qid_map)) { 622 /* extract the timestamp only of first completed CS */ 623 if (!mcs_compl->timestamp) 624 mcs_compl->timestamp = ktime_to_ns(fence->timestamp); 625 626 complete_all(&mcs_compl->completion); 627 628 /* 629 * Setting mcs_handling_done inside the lock ensures 630 * at least one fence have mcs_handling_done set to 631 * true before wait for mcs finish. This ensures at 632 * least one CS will be set as completed when polling 633 * mcs fences. 634 */ 635 fence->mcs_handling_done = true; 636 } 637 638 spin_unlock(&mcs_compl->lock); 639 } 640 /* In case CS completed without mcs completion initialized */ 641 fence->mcs_handling_done = true; 642 } 643 644 static inline void cs_release_sob_reset_handler(struct hl_device *hdev, 645 struct hl_cs *cs, 646 struct hl_cs_compl *hl_cs_cmpl) 647 { 648 /* Skip this handler if the cs wasn't submitted, to avoid putting 649 * the hw_sob twice, since this case already handled at this point, 650 * also skip if the hw_sob pointer wasn't set. 651 */ 652 if (!hl_cs_cmpl->hw_sob || !cs->submitted) 653 return; 654 655 spin_lock(&hl_cs_cmpl->lock); 656 657 /* 658 * we get refcount upon reservation of signals or signal/wait cs for the 659 * hw_sob object, and need to put it when the first staged cs 660 * (which contains the encaps signals) or cs signal/wait is completed. 661 */ 662 if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) || 663 (hl_cs_cmpl->type == CS_TYPE_WAIT) || 664 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) || 665 (!!hl_cs_cmpl->encaps_signals)) { 666 dev_dbg(hdev->dev, 667 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n", 668 hl_cs_cmpl->cs_seq, 669 hl_cs_cmpl->type, 670 hl_cs_cmpl->hw_sob->sob_id, 671 hl_cs_cmpl->sob_val); 672 673 hw_sob_put(hl_cs_cmpl->hw_sob); 674 675 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) 676 hdev->asic_funcs->reset_sob_group(hdev, 677 hl_cs_cmpl->sob_group); 678 } 679 680 spin_unlock(&hl_cs_cmpl->lock); 681 } 682 683 static void cs_do_release(struct kref *ref) 684 { 685 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount); 686 struct hl_device *hdev = cs->ctx->hdev; 687 struct hl_cs_job *job, *tmp; 688 struct hl_cs_compl *hl_cs_cmpl = 689 container_of(cs->fence, struct hl_cs_compl, base_fence); 690 691 cs->completed = true; 692 693 /* 694 * Although if we reached here it means that all external jobs have 695 * finished, because each one of them took refcnt to CS, we still 696 * need to go over the internal jobs and complete them. Otherwise, we 697 * will have leaked memory and what's worse, the CS object (and 698 * potentially the CTX object) could be released, while the JOB 699 * still holds a pointer to them (but no reference). 700 */ 701 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) 702 hl_complete_job(hdev, job); 703 704 if (!cs->submitted) { 705 /* 706 * In case the wait for signal CS was submitted, the fence put 707 * occurs in init_signal_wait_cs() or collective_wait_init_cs() 708 * right before hanging on the PQ. 709 */ 710 if (cs->type == CS_TYPE_WAIT || 711 cs->type == CS_TYPE_COLLECTIVE_WAIT) 712 hl_fence_put(cs->signal_fence); 713 714 goto out; 715 } 716 717 /* Need to update CI for all queue jobs that does not get completion */ 718 hl_hw_queue_update_ci(cs); 719 720 /* remove CS from CS mirror list */ 721 spin_lock(&hdev->cs_mirror_lock); 722 list_del_init(&cs->mirror_node); 723 spin_unlock(&hdev->cs_mirror_lock); 724 725 cs_handle_tdr(hdev, cs); 726 727 if (cs->staged_cs) { 728 /* the completion CS decrements reference for the entire 729 * staged submission 730 */ 731 if (cs->staged_last) { 732 struct hl_cs *staged_cs, *tmp_cs; 733 734 list_for_each_entry_safe(staged_cs, tmp_cs, 735 &cs->staged_cs_node, staged_cs_node) 736 staged_cs_put(hdev, staged_cs); 737 } 738 739 /* A staged CS will be a member in the list only after it 740 * was submitted. We used 'cs_mirror_lock' when inserting 741 * it to list so we will use it again when removing it 742 */ 743 if (cs->submitted) { 744 spin_lock(&hdev->cs_mirror_lock); 745 list_del(&cs->staged_cs_node); 746 spin_unlock(&hdev->cs_mirror_lock); 747 } 748 749 /* decrement refcount to handle when first staged cs 750 * with encaps signals is completed. 751 */ 752 if (hl_cs_cmpl->encaps_signals) 753 kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount, 754 hl_encaps_release_handle_and_put_ctx); 755 } 756 757 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals) 758 kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx); 759 760 out: 761 /* Must be called before hl_ctx_put because inside we use ctx to get 762 * the device 763 */ 764 hl_debugfs_remove_cs(cs); 765 766 hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL; 767 768 /* We need to mark an error for not submitted because in that case 769 * the hl fence release flow is different. Mainly, we don't need 770 * to handle hw_sob for signal/wait 771 */ 772 if (cs->timedout) 773 cs->fence->error = -ETIMEDOUT; 774 else if (cs->aborted) 775 cs->fence->error = -EIO; 776 else if (!cs->submitted) 777 cs->fence->error = -EBUSY; 778 779 if (unlikely(cs->skip_reset_on_timeout)) { 780 dev_err(hdev->dev, 781 "Command submission %llu completed after %llu (s)\n", 782 cs->sequence, 783 div_u64(jiffies - cs->submission_time_jiffies, HZ)); 784 } 785 786 if (cs->timestamp) { 787 cs->fence->timestamp = cs->completion_timestamp; 788 hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence, 789 cs->fence->timestamp, cs->fence->error); 790 } 791 792 hl_ctx_put(cs->ctx); 793 794 complete_all(&cs->fence->completion); 795 complete_multi_cs(hdev, cs); 796 797 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl); 798 799 hl_fence_put(cs->fence); 800 801 kfree(cs->jobs_in_queue_cnt); 802 kfree(cs); 803 } 804 805 static void cs_timedout(struct work_struct *work) 806 { 807 struct hl_device *hdev; 808 u64 event_mask = 0x0; 809 int rc; 810 struct hl_cs *cs = container_of(work, struct hl_cs, 811 work_tdr.work); 812 bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false; 813 814 rc = cs_get_unless_zero(cs); 815 if (!rc) 816 return; 817 818 if ((!cs->submitted) || (cs->completed)) { 819 cs_put(cs); 820 return; 821 } 822 823 hdev = cs->ctx->hdev; 824 825 if (likely(!skip_reset_on_timeout)) { 826 if (hdev->reset_on_lockup) 827 device_reset = true; 828 else 829 hdev->reset_info.needs_reset = true; 830 831 /* Mark the CS is timed out so we won't try to cancel its TDR */ 832 cs->timedout = true; 833 } 834 835 /* Save only the first CS timeout parameters */ 836 rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0); 837 if (rc) { 838 hdev->captured_err_info.cs_timeout.timestamp = ktime_get(); 839 hdev->captured_err_info.cs_timeout.seq = cs->sequence; 840 event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT; 841 } 842 843 switch (cs->type) { 844 case CS_TYPE_SIGNAL: 845 dev_err(hdev->dev, 846 "Signal command submission %llu has not finished in time!\n", 847 cs->sequence); 848 break; 849 850 case CS_TYPE_WAIT: 851 dev_err(hdev->dev, 852 "Wait command submission %llu has not finished in time!\n", 853 cs->sequence); 854 break; 855 856 case CS_TYPE_COLLECTIVE_WAIT: 857 dev_err(hdev->dev, 858 "Collective Wait command submission %llu has not finished in time!\n", 859 cs->sequence); 860 break; 861 862 default: 863 dev_err(hdev->dev, 864 "Command submission %llu has not finished in time!\n", 865 cs->sequence); 866 break; 867 } 868 869 rc = hl_state_dump(hdev); 870 if (rc) 871 dev_err(hdev->dev, "Error during system state dump %d\n", rc); 872 873 cs_put(cs); 874 875 if (device_reset) { 876 event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET; 877 hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask); 878 } else if (event_mask) { 879 hl_notifier_event_send_all(hdev, event_mask); 880 } 881 } 882 883 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, 884 enum hl_cs_type cs_type, u64 user_sequence, 885 struct hl_cs **cs_new, u32 flags, u32 timeout) 886 { 887 struct hl_cs_counters_atomic *cntr; 888 struct hl_fence *other = NULL; 889 struct hl_cs_compl *cs_cmpl; 890 struct hl_cs *cs; 891 int rc; 892 893 cntr = &hdev->aggregated_cs_counters; 894 895 cs = kzalloc(sizeof(*cs), GFP_ATOMIC); 896 if (!cs) 897 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 898 899 if (!cs) { 900 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 901 atomic64_inc(&cntr->out_of_mem_drop_cnt); 902 return -ENOMEM; 903 } 904 905 /* increment refcnt for context */ 906 hl_ctx_get(ctx); 907 908 cs->ctx = ctx; 909 cs->submitted = false; 910 cs->completed = false; 911 cs->type = cs_type; 912 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP); 913 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS); 914 cs->timeout_jiffies = timeout; 915 cs->skip_reset_on_timeout = 916 hdev->reset_info.skip_reset_on_timeout || 917 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT); 918 cs->submission_time_jiffies = jiffies; 919 INIT_LIST_HEAD(&cs->job_list); 920 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); 921 kref_init(&cs->refcount); 922 spin_lock_init(&cs->job_lock); 923 924 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC); 925 if (!cs_cmpl) 926 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL); 927 928 if (!cs_cmpl) { 929 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 930 atomic64_inc(&cntr->out_of_mem_drop_cnt); 931 rc = -ENOMEM; 932 goto free_cs; 933 } 934 935 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, 936 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC); 937 if (!cs->jobs_in_queue_cnt) 938 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, 939 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL); 940 941 if (!cs->jobs_in_queue_cnt) { 942 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 943 atomic64_inc(&cntr->out_of_mem_drop_cnt); 944 rc = -ENOMEM; 945 goto free_cs_cmpl; 946 } 947 948 cs_cmpl->hdev = hdev; 949 cs_cmpl->type = cs->type; 950 spin_lock_init(&cs_cmpl->lock); 951 cs->fence = &cs_cmpl->base_fence; 952 953 spin_lock(&ctx->cs_lock); 954 955 cs_cmpl->cs_seq = ctx->cs_sequence; 956 other = ctx->cs_pending[cs_cmpl->cs_seq & 957 (hdev->asic_prop.max_pending_cs - 1)]; 958 959 if (other && !completion_done(&other->completion)) { 960 /* If the following statement is true, it means we have reached 961 * a point in which only part of the staged submission was 962 * submitted and we don't have enough room in the 'cs_pending' 963 * array for the rest of the submission. 964 * This causes a deadlock because this CS will never be 965 * completed as it depends on future CS's for completion. 966 */ 967 if (other->cs_sequence == user_sequence) 968 dev_crit_ratelimited(hdev->dev, 969 "Staged CS %llu deadlock due to lack of resources", 970 user_sequence); 971 972 dev_dbg_ratelimited(hdev->dev, 973 "Rejecting CS because of too many in-flights CS\n"); 974 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt); 975 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt); 976 rc = -EAGAIN; 977 goto free_fence; 978 } 979 980 /* init hl_fence */ 981 hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq); 982 983 cs->sequence = cs_cmpl->cs_seq; 984 985 ctx->cs_pending[cs_cmpl->cs_seq & 986 (hdev->asic_prop.max_pending_cs - 1)] = 987 &cs_cmpl->base_fence; 988 ctx->cs_sequence++; 989 990 hl_fence_get(&cs_cmpl->base_fence); 991 992 hl_fence_put(other); 993 994 spin_unlock(&ctx->cs_lock); 995 996 *cs_new = cs; 997 998 return 0; 999 1000 free_fence: 1001 spin_unlock(&ctx->cs_lock); 1002 kfree(cs->jobs_in_queue_cnt); 1003 free_cs_cmpl: 1004 kfree(cs_cmpl); 1005 free_cs: 1006 kfree(cs); 1007 hl_ctx_put(ctx); 1008 return rc; 1009 } 1010 1011 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) 1012 { 1013 struct hl_cs_job *job, *tmp; 1014 1015 staged_cs_put(hdev, cs); 1016 1017 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) 1018 hl_complete_job(hdev, job); 1019 } 1020 1021 /* 1022 * release_reserved_encaps_signals() - release reserved encapsulated signals. 1023 * @hdev: pointer to habanalabs device structure 1024 * 1025 * Release reserved encapsulated signals which weren't un-reserved, or for which a CS with 1026 * encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back. 1027 * For these signals need also to put the refcount of the H/W SOB which was taken at the 1028 * reservation. 1029 */ 1030 static void release_reserved_encaps_signals(struct hl_device *hdev) 1031 { 1032 struct hl_ctx *ctx = hl_get_compute_ctx(hdev); 1033 struct hl_cs_encaps_sig_handle *handle; 1034 struct hl_encaps_signals_mgr *mgr; 1035 u32 id; 1036 1037 if (!ctx) 1038 return; 1039 1040 mgr = &ctx->sig_mgr; 1041 1042 idr_for_each_entry(&mgr->handles, handle, id) 1043 if (handle->cs_seq == ULLONG_MAX) 1044 kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx); 1045 1046 hl_ctx_put(ctx); 1047 } 1048 1049 void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush) 1050 { 1051 int i; 1052 struct hl_cs *cs, *tmp; 1053 1054 if (!skip_wq_flush) { 1055 flush_workqueue(hdev->ts_free_obj_wq); 1056 1057 /* flush all completions before iterating over the CS mirror list in 1058 * order to avoid a race with the release functions 1059 */ 1060 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) 1061 flush_workqueue(hdev->cq_wq[i]); 1062 1063 flush_workqueue(hdev->cs_cmplt_wq); 1064 } 1065 1066 /* Make sure we don't have leftovers in the CS mirror list */ 1067 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) { 1068 cs_get(cs); 1069 cs->aborted = true; 1070 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", 1071 cs->ctx->asid, cs->sequence); 1072 cs_rollback(hdev, cs); 1073 cs_put(cs); 1074 } 1075 1076 force_complete_multi_cs(hdev); 1077 1078 release_reserved_encaps_signals(hdev); 1079 } 1080 1081 static void 1082 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt) 1083 { 1084 struct hl_user_pending_interrupt *pend, *temp; 1085 1086 spin_lock(&interrupt->wait_list_lock); 1087 list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) { 1088 if (pend->ts_reg_info.buf) { 1089 list_del(&pend->wait_list_node); 1090 hl_mmap_mem_buf_put(pend->ts_reg_info.buf); 1091 hl_cb_put(pend->ts_reg_info.cq_cb); 1092 } else { 1093 pend->fence.error = -EIO; 1094 complete_all(&pend->fence.completion); 1095 } 1096 } 1097 spin_unlock(&interrupt->wait_list_lock); 1098 } 1099 1100 void hl_release_pending_user_interrupts(struct hl_device *hdev) 1101 { 1102 struct asic_fixed_properties *prop = &hdev->asic_prop; 1103 struct hl_user_interrupt *interrupt; 1104 int i; 1105 1106 if (!prop->user_interrupt_count) 1107 return; 1108 1109 /* We iterate through the user interrupt requests and waking up all 1110 * user threads waiting for interrupt completion. We iterate the 1111 * list under a lock, this is why all user threads, once awake, 1112 * will wait on the same lock and will release the waiting object upon 1113 * unlock. 1114 */ 1115 1116 for (i = 0 ; i < prop->user_interrupt_count ; i++) { 1117 interrupt = &hdev->user_interrupt[i]; 1118 wake_pending_user_interrupt_threads(interrupt); 1119 } 1120 1121 interrupt = &hdev->common_user_cq_interrupt; 1122 wake_pending_user_interrupt_threads(interrupt); 1123 1124 interrupt = &hdev->common_decoder_interrupt; 1125 wake_pending_user_interrupt_threads(interrupt); 1126 } 1127 1128 static void force_complete_cs(struct hl_device *hdev) 1129 { 1130 struct hl_cs *cs; 1131 1132 spin_lock(&hdev->cs_mirror_lock); 1133 1134 list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) { 1135 cs->fence->error = -EIO; 1136 complete_all(&cs->fence->completion); 1137 } 1138 1139 spin_unlock(&hdev->cs_mirror_lock); 1140 } 1141 1142 void hl_abort_waitings_for_completion(struct hl_device *hdev) 1143 { 1144 force_complete_cs(hdev); 1145 force_complete_multi_cs(hdev); 1146 hl_release_pending_user_interrupts(hdev); 1147 } 1148 1149 static void job_wq_completion(struct work_struct *work) 1150 { 1151 struct hl_cs_job *job = container_of(work, struct hl_cs_job, 1152 finish_work); 1153 struct hl_cs *cs = job->cs; 1154 struct hl_device *hdev = cs->ctx->hdev; 1155 1156 /* job is no longer needed */ 1157 hl_complete_job(hdev, job); 1158 } 1159 1160 static void cs_completion(struct work_struct *work) 1161 { 1162 struct hl_cs *cs = container_of(work, struct hl_cs, finish_work); 1163 struct hl_device *hdev = cs->ctx->hdev; 1164 struct hl_cs_job *job, *tmp; 1165 1166 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) 1167 hl_complete_job(hdev, job); 1168 } 1169 1170 u32 hl_get_active_cs_num(struct hl_device *hdev) 1171 { 1172 u32 active_cs_num = 0; 1173 struct hl_cs *cs; 1174 1175 spin_lock(&hdev->cs_mirror_lock); 1176 1177 list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) 1178 if (!cs->completed) 1179 active_cs_num++; 1180 1181 spin_unlock(&hdev->cs_mirror_lock); 1182 1183 return active_cs_num; 1184 } 1185 1186 static int validate_queue_index(struct hl_device *hdev, 1187 struct hl_cs_chunk *chunk, 1188 enum hl_queue_type *queue_type, 1189 bool *is_kernel_allocated_cb) 1190 { 1191 struct asic_fixed_properties *asic = &hdev->asic_prop; 1192 struct hw_queue_properties *hw_queue_prop; 1193 1194 /* This must be checked here to prevent out-of-bounds access to 1195 * hw_queues_props array 1196 */ 1197 if (chunk->queue_index >= asic->max_queues) { 1198 dev_err(hdev->dev, "Queue index %d is invalid\n", 1199 chunk->queue_index); 1200 return -EINVAL; 1201 } 1202 1203 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; 1204 1205 if (hw_queue_prop->type == QUEUE_TYPE_NA) { 1206 dev_err(hdev->dev, "Queue index %d is not applicable\n", 1207 chunk->queue_index); 1208 return -EINVAL; 1209 } 1210 1211 if (hw_queue_prop->binned) { 1212 dev_err(hdev->dev, "Queue index %d is binned out\n", 1213 chunk->queue_index); 1214 return -EINVAL; 1215 } 1216 1217 if (hw_queue_prop->driver_only) { 1218 dev_err(hdev->dev, 1219 "Queue index %d is restricted for the kernel driver\n", 1220 chunk->queue_index); 1221 return -EINVAL; 1222 } 1223 1224 /* When hw queue type isn't QUEUE_TYPE_HW, 1225 * USER_ALLOC_CB flag shall be referred as "don't care". 1226 */ 1227 if (hw_queue_prop->type == QUEUE_TYPE_HW) { 1228 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) { 1229 if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) { 1230 dev_err(hdev->dev, 1231 "Queue index %d doesn't support user CB\n", 1232 chunk->queue_index); 1233 return -EINVAL; 1234 } 1235 1236 *is_kernel_allocated_cb = false; 1237 } else { 1238 if (!(hw_queue_prop->cb_alloc_flags & 1239 CB_ALLOC_KERNEL)) { 1240 dev_err(hdev->dev, 1241 "Queue index %d doesn't support kernel CB\n", 1242 chunk->queue_index); 1243 return -EINVAL; 1244 } 1245 1246 *is_kernel_allocated_cb = true; 1247 } 1248 } else { 1249 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags 1250 & CB_ALLOC_KERNEL); 1251 } 1252 1253 *queue_type = hw_queue_prop->type; 1254 return 0; 1255 } 1256 1257 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev, 1258 struct hl_mem_mgr *mmg, 1259 struct hl_cs_chunk *chunk) 1260 { 1261 struct hl_cb *cb; 1262 1263 cb = hl_cb_get(mmg, chunk->cb_handle); 1264 if (!cb) { 1265 dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle); 1266 return NULL; 1267 } 1268 1269 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) { 1270 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size); 1271 goto release_cb; 1272 } 1273 1274 atomic_inc(&cb->cs_cnt); 1275 1276 return cb; 1277 1278 release_cb: 1279 hl_cb_put(cb); 1280 return NULL; 1281 } 1282 1283 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, 1284 enum hl_queue_type queue_type, bool is_kernel_allocated_cb) 1285 { 1286 struct hl_cs_job *job; 1287 1288 job = kzalloc(sizeof(*job), GFP_ATOMIC); 1289 if (!job) 1290 job = kzalloc(sizeof(*job), GFP_KERNEL); 1291 1292 if (!job) 1293 return NULL; 1294 1295 kref_init(&job->refcount); 1296 job->queue_type = queue_type; 1297 job->is_kernel_allocated_cb = is_kernel_allocated_cb; 1298 1299 if (is_cb_patched(hdev, job)) 1300 INIT_LIST_HEAD(&job->userptr_list); 1301 1302 if (job->queue_type == QUEUE_TYPE_EXT) 1303 INIT_WORK(&job->finish_work, job_wq_completion); 1304 1305 return job; 1306 } 1307 1308 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags) 1309 { 1310 if (cs_type_flags & HL_CS_FLAGS_SIGNAL) 1311 return CS_TYPE_SIGNAL; 1312 else if (cs_type_flags & HL_CS_FLAGS_WAIT) 1313 return CS_TYPE_WAIT; 1314 else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT) 1315 return CS_TYPE_COLLECTIVE_WAIT; 1316 else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY) 1317 return CS_RESERVE_SIGNALS; 1318 else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY) 1319 return CS_UNRESERVE_SIGNALS; 1320 else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND) 1321 return CS_TYPE_ENGINE_CORE; 1322 else if (cs_type_flags & HL_CS_FLAGS_ENGINES_COMMAND) 1323 return CS_TYPE_ENGINES; 1324 else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES) 1325 return CS_TYPE_FLUSH_PCI_HBW_WRITES; 1326 else 1327 return CS_TYPE_DEFAULT; 1328 } 1329 1330 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) 1331 { 1332 struct hl_device *hdev = hpriv->hdev; 1333 struct hl_ctx *ctx = hpriv->ctx; 1334 u32 cs_type_flags, num_chunks; 1335 enum hl_device_status status; 1336 enum hl_cs_type cs_type; 1337 bool is_sync_stream; 1338 int i; 1339 1340 for (i = 0 ; i < sizeof(args->in.pad) ; i++) 1341 if (args->in.pad[i]) { 1342 dev_dbg(hdev->dev, "Padding bytes must be 0\n"); 1343 return -EINVAL; 1344 } 1345 1346 if (!hl_device_operational(hdev, &status)) { 1347 return -EBUSY; 1348 } 1349 1350 if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) && 1351 !hdev->supports_staged_submission) { 1352 dev_err(hdev->dev, "staged submission not supported"); 1353 return -EPERM; 1354 } 1355 1356 cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK; 1357 1358 if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) { 1359 dev_err(hdev->dev, 1360 "CS type flags are mutually exclusive, context %d\n", 1361 ctx->asid); 1362 return -EINVAL; 1363 } 1364 1365 cs_type = hl_cs_get_cs_type(cs_type_flags); 1366 num_chunks = args->in.num_chunks_execute; 1367 1368 is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT || 1369 cs_type == CS_TYPE_COLLECTIVE_WAIT); 1370 1371 if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) { 1372 dev_err(hdev->dev, "Sync stream CS is not supported\n"); 1373 return -EINVAL; 1374 } 1375 1376 if (cs_type == CS_TYPE_DEFAULT) { 1377 if (!num_chunks) { 1378 dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid); 1379 return -EINVAL; 1380 } 1381 } else if (is_sync_stream && num_chunks != 1) { 1382 dev_err(hdev->dev, 1383 "Sync stream CS mandates one chunk only, context %d\n", 1384 ctx->asid); 1385 return -EINVAL; 1386 } 1387 1388 return 0; 1389 } 1390 1391 static int hl_cs_copy_chunk_array(struct hl_device *hdev, 1392 struct hl_cs_chunk **cs_chunk_array, 1393 void __user *chunks, u32 num_chunks, 1394 struct hl_ctx *ctx) 1395 { 1396 u32 size_to_copy; 1397 1398 if (num_chunks > HL_MAX_JOBS_PER_CS) { 1399 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1400 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt); 1401 dev_err(hdev->dev, 1402 "Number of chunks can NOT be larger than %d\n", 1403 HL_MAX_JOBS_PER_CS); 1404 return -EINVAL; 1405 } 1406 1407 *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array), 1408 GFP_ATOMIC); 1409 if (!*cs_chunk_array) 1410 *cs_chunk_array = kmalloc_array(num_chunks, 1411 sizeof(**cs_chunk_array), GFP_KERNEL); 1412 if (!*cs_chunk_array) { 1413 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1414 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt); 1415 return -ENOMEM; 1416 } 1417 1418 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); 1419 if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) { 1420 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1421 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt); 1422 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); 1423 kfree(*cs_chunk_array); 1424 return -EFAULT; 1425 } 1426 1427 return 0; 1428 } 1429 1430 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs, 1431 u64 sequence, u32 flags, 1432 u32 encaps_signal_handle) 1433 { 1434 if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION)) 1435 return 0; 1436 1437 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST); 1438 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST); 1439 1440 if (cs->staged_first) { 1441 /* Staged CS sequence is the first CS sequence */ 1442 INIT_LIST_HEAD(&cs->staged_cs_node); 1443 cs->staged_sequence = cs->sequence; 1444 1445 if (cs->encaps_signals) 1446 cs->encaps_sig_hdl_id = encaps_signal_handle; 1447 } else { 1448 /* User sequence will be validated in 'hl_hw_queue_schedule_cs' 1449 * under the cs_mirror_lock 1450 */ 1451 cs->staged_sequence = sequence; 1452 } 1453 1454 /* Increment CS reference if needed */ 1455 staged_cs_get(hdev, cs); 1456 1457 cs->staged_cs = true; 1458 1459 return 0; 1460 } 1461 1462 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid) 1463 { 1464 int i; 1465 1466 for (i = 0; i < hdev->stream_master_qid_arr_size; i++) 1467 if (qid == hdev->stream_master_qid_arr[i]) 1468 return BIT(i); 1469 1470 return 0; 1471 } 1472 1473 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, 1474 u32 num_chunks, u64 *cs_seq, u32 flags, 1475 u32 encaps_signals_handle, u32 timeout, 1476 u16 *signal_initial_sob_count) 1477 { 1478 bool staged_mid, int_queues_only = true, using_hw_queues = false; 1479 struct hl_device *hdev = hpriv->hdev; 1480 struct hl_cs_chunk *cs_chunk_array; 1481 struct hl_cs_counters_atomic *cntr; 1482 struct hl_ctx *ctx = hpriv->ctx; 1483 struct hl_cs_job *job; 1484 struct hl_cs *cs; 1485 struct hl_cb *cb; 1486 u64 user_sequence; 1487 u8 stream_master_qid_map = 0; 1488 int rc, i; 1489 1490 cntr = &hdev->aggregated_cs_counters; 1491 user_sequence = *cs_seq; 1492 *cs_seq = ULLONG_MAX; 1493 1494 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks, 1495 hpriv->ctx); 1496 if (rc) 1497 goto out; 1498 1499 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) && 1500 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST)) 1501 staged_mid = true; 1502 else 1503 staged_mid = false; 1504 1505 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, 1506 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags, 1507 timeout); 1508 if (rc) 1509 goto free_cs_chunk_array; 1510 1511 *cs_seq = cs->sequence; 1512 1513 hl_debugfs_add_cs(cs); 1514 1515 rc = cs_staged_submission(hdev, cs, user_sequence, flags, 1516 encaps_signals_handle); 1517 if (rc) 1518 goto free_cs_object; 1519 1520 /* If this is a staged submission we must return the staged sequence 1521 * rather than the internal CS sequence 1522 */ 1523 if (cs->staged_cs) 1524 *cs_seq = cs->staged_sequence; 1525 1526 /* Validate ALL the CS chunks before submitting the CS */ 1527 for (i = 0 ; i < num_chunks ; i++) { 1528 struct hl_cs_chunk *chunk = &cs_chunk_array[i]; 1529 enum hl_queue_type queue_type; 1530 bool is_kernel_allocated_cb; 1531 1532 rc = validate_queue_index(hdev, chunk, &queue_type, 1533 &is_kernel_allocated_cb); 1534 if (rc) { 1535 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1536 atomic64_inc(&cntr->validation_drop_cnt); 1537 goto free_cs_object; 1538 } 1539 1540 if (is_kernel_allocated_cb) { 1541 cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk); 1542 if (!cb) { 1543 atomic64_inc( 1544 &ctx->cs_counters.validation_drop_cnt); 1545 atomic64_inc(&cntr->validation_drop_cnt); 1546 rc = -EINVAL; 1547 goto free_cs_object; 1548 } 1549 } else { 1550 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle; 1551 } 1552 1553 if (queue_type == QUEUE_TYPE_EXT || 1554 queue_type == QUEUE_TYPE_HW) { 1555 int_queues_only = false; 1556 1557 /* 1558 * store which stream are being used for external/HW 1559 * queues of this CS 1560 */ 1561 if (hdev->supports_wait_for_multi_cs) 1562 stream_master_qid_map |= 1563 get_stream_master_qid_mask(hdev, 1564 chunk->queue_index); 1565 } 1566 1567 if (queue_type == QUEUE_TYPE_HW) 1568 using_hw_queues = true; 1569 1570 job = hl_cs_allocate_job(hdev, queue_type, 1571 is_kernel_allocated_cb); 1572 if (!job) { 1573 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1574 atomic64_inc(&cntr->out_of_mem_drop_cnt); 1575 dev_err(hdev->dev, "Failed to allocate a new job\n"); 1576 rc = -ENOMEM; 1577 if (is_kernel_allocated_cb) 1578 goto release_cb; 1579 1580 goto free_cs_object; 1581 } 1582 1583 job->id = i + 1; 1584 job->cs = cs; 1585 job->user_cb = cb; 1586 job->user_cb_size = chunk->cb_size; 1587 job->hw_queue_id = chunk->queue_index; 1588 1589 cs->jobs_in_queue_cnt[job->hw_queue_id]++; 1590 cs->jobs_cnt++; 1591 1592 list_add_tail(&job->cs_node, &cs->job_list); 1593 1594 /* 1595 * Increment CS reference. When CS reference is 0, CS is 1596 * done and can be signaled to user and free all its resources 1597 * Only increment for JOB on external or H/W queues, because 1598 * only for those JOBs we get completion 1599 */ 1600 if (cs_needs_completion(cs) && 1601 (job->queue_type == QUEUE_TYPE_EXT || 1602 job->queue_type == QUEUE_TYPE_HW)) 1603 cs_get(cs); 1604 1605 hl_debugfs_add_job(hdev, job); 1606 1607 rc = cs_parser(hpriv, job); 1608 if (rc) { 1609 atomic64_inc(&ctx->cs_counters.parsing_drop_cnt); 1610 atomic64_inc(&cntr->parsing_drop_cnt); 1611 dev_err(hdev->dev, 1612 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", 1613 cs->ctx->asid, cs->sequence, job->id, rc); 1614 goto free_cs_object; 1615 } 1616 } 1617 1618 /* We allow a CS with any queue type combination as long as it does 1619 * not get a completion 1620 */ 1621 if (int_queues_only && cs_needs_completion(cs)) { 1622 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1623 atomic64_inc(&cntr->validation_drop_cnt); 1624 dev_err(hdev->dev, 1625 "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n", 1626 cs->ctx->asid, cs->sequence); 1627 rc = -EINVAL; 1628 goto free_cs_object; 1629 } 1630 1631 if (using_hw_queues) 1632 INIT_WORK(&cs->finish_work, cs_completion); 1633 1634 /* 1635 * store the (external/HW queues) streams used by the CS in the 1636 * fence object for multi-CS completion 1637 */ 1638 if (hdev->supports_wait_for_multi_cs) 1639 cs->fence->stream_master_qid_map = stream_master_qid_map; 1640 1641 rc = hl_hw_queue_schedule_cs(cs); 1642 if (rc) { 1643 if (rc != -EAGAIN) 1644 dev_err(hdev->dev, 1645 "Failed to submit CS %d.%llu to H/W queues, error %d\n", 1646 cs->ctx->asid, cs->sequence, rc); 1647 goto free_cs_object; 1648 } 1649 1650 *signal_initial_sob_count = cs->initial_sob_count; 1651 1652 rc = HL_CS_STATUS_SUCCESS; 1653 goto put_cs; 1654 1655 release_cb: 1656 atomic_dec(&cb->cs_cnt); 1657 hl_cb_put(cb); 1658 free_cs_object: 1659 cs_rollback(hdev, cs); 1660 *cs_seq = ULLONG_MAX; 1661 /* The path below is both for good and erroneous exits */ 1662 put_cs: 1663 /* We finished with the CS in this function, so put the ref */ 1664 cs_put(cs); 1665 free_cs_chunk_array: 1666 kfree(cs_chunk_array); 1667 out: 1668 return rc; 1669 } 1670 1671 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, 1672 u64 *cs_seq) 1673 { 1674 struct hl_device *hdev = hpriv->hdev; 1675 struct hl_ctx *ctx = hpriv->ctx; 1676 bool need_soft_reset = false; 1677 int rc = 0, do_ctx_switch = 0; 1678 void __user *chunks; 1679 u32 num_chunks, tmp; 1680 u16 sob_count; 1681 int ret; 1682 1683 if (hdev->supports_ctx_switch) 1684 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); 1685 1686 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { 1687 mutex_lock(&hpriv->restore_phase_mutex); 1688 1689 if (do_ctx_switch) { 1690 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); 1691 if (rc) { 1692 dev_err_ratelimited(hdev->dev, 1693 "Failed to switch to context %d, rejecting CS! %d\n", 1694 ctx->asid, rc); 1695 /* 1696 * If we timedout, or if the device is not IDLE 1697 * while we want to do context-switch (-EBUSY), 1698 * we need to soft-reset because QMAN is 1699 * probably stuck. However, we can't call to 1700 * reset here directly because of deadlock, so 1701 * need to do it at the very end of this 1702 * function 1703 */ 1704 if ((rc == -ETIMEDOUT) || (rc == -EBUSY)) 1705 need_soft_reset = true; 1706 mutex_unlock(&hpriv->restore_phase_mutex); 1707 goto out; 1708 } 1709 } 1710 1711 hdev->asic_funcs->restore_phase_topology(hdev); 1712 1713 chunks = (void __user *) (uintptr_t) args->in.chunks_restore; 1714 num_chunks = args->in.num_chunks_restore; 1715 1716 if (!num_chunks) { 1717 dev_dbg(hdev->dev, 1718 "Need to run restore phase but restore CS is empty\n"); 1719 rc = 0; 1720 } else { 1721 rc = cs_ioctl_default(hpriv, chunks, num_chunks, 1722 cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count); 1723 } 1724 1725 mutex_unlock(&hpriv->restore_phase_mutex); 1726 1727 if (rc) { 1728 dev_err(hdev->dev, 1729 "Failed to submit restore CS for context %d (%d)\n", 1730 ctx->asid, rc); 1731 goto out; 1732 } 1733 1734 /* Need to wait for restore completion before execution phase */ 1735 if (num_chunks) { 1736 enum hl_cs_wait_status status; 1737 wait_again: 1738 ret = _hl_cs_wait_ioctl(hdev, ctx, 1739 jiffies_to_usecs(hdev->timeout_jiffies), 1740 *cs_seq, &status, NULL); 1741 if (ret) { 1742 if (ret == -ERESTARTSYS) { 1743 usleep_range(100, 200); 1744 goto wait_again; 1745 } 1746 1747 dev_err(hdev->dev, 1748 "Restore CS for context %d failed to complete %d\n", 1749 ctx->asid, ret); 1750 rc = -ENOEXEC; 1751 goto out; 1752 } 1753 } 1754 1755 if (hdev->supports_ctx_switch) 1756 ctx->thread_ctx_switch_wait_token = 1; 1757 1758 } else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) { 1759 rc = hl_poll_timeout_memory(hdev, 1760 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), 1761 100, jiffies_to_usecs(hdev->timeout_jiffies), false); 1762 1763 if (rc == -ETIMEDOUT) { 1764 dev_err(hdev->dev, 1765 "context switch phase timeout (%d)\n", tmp); 1766 goto out; 1767 } 1768 } 1769 1770 out: 1771 if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset)) 1772 hl_device_reset(hdev, 0); 1773 1774 return rc; 1775 } 1776 1777 /* 1778 * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case. 1779 * if the SOB value reaches the max value move to the other SOB reserved 1780 * to the queue. 1781 * @hdev: pointer to device structure 1782 * @q_idx: stream queue index 1783 * @hw_sob: the H/W SOB used in this signal CS. 1784 * @count: signals count 1785 * @encaps_sig: tells whether it's reservation for encaps signals or not. 1786 * 1787 * Note that this function must be called while hw_queues_lock is taken. 1788 */ 1789 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx, 1790 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig) 1791 1792 { 1793 struct hl_sync_stream_properties *prop; 1794 struct hl_hw_sob *sob = *hw_sob, *other_sob; 1795 u8 other_sob_offset; 1796 1797 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; 1798 1799 hw_sob_get(sob); 1800 1801 /* check for wraparound */ 1802 if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) { 1803 /* 1804 * Decrement as we reached the max value. 1805 * The release function won't be called here as we've 1806 * just incremented the refcount right before calling this 1807 * function. 1808 */ 1809 hw_sob_put_err(sob); 1810 1811 /* 1812 * check the other sob value, if it still in use then fail 1813 * otherwise make the switch 1814 */ 1815 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS; 1816 other_sob = &prop->hw_sob[other_sob_offset]; 1817 1818 if (kref_read(&other_sob->kref) != 1) { 1819 dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n", 1820 q_idx); 1821 return -EINVAL; 1822 } 1823 1824 /* 1825 * next_sob_val always points to the next available signal 1826 * in the sob, so in encaps signals it will be the next one 1827 * after reserving the required amount. 1828 */ 1829 if (encaps_sig) 1830 prop->next_sob_val = count + 1; 1831 else 1832 prop->next_sob_val = count; 1833 1834 /* only two SOBs are currently in use */ 1835 prop->curr_sob_offset = other_sob_offset; 1836 *hw_sob = other_sob; 1837 1838 /* 1839 * check if other_sob needs reset, then do it before using it 1840 * for the reservation or the next signal cs. 1841 * we do it here, and for both encaps and regular signal cs 1842 * cases in order to avoid possible races of two kref_put 1843 * of the sob which can occur at the same time if we move the 1844 * sob reset(kref_put) to cs_do_release function. 1845 * in addition, if we have combination of cs signal and 1846 * encaps, and at the point we need to reset the sob there was 1847 * no more reservations and only signal cs keep coming, 1848 * in such case we need signal_cs to put the refcount and 1849 * reset the sob. 1850 */ 1851 if (other_sob->need_reset) 1852 hw_sob_put(other_sob); 1853 1854 if (encaps_sig) { 1855 /* set reset indication for the sob */ 1856 sob->need_reset = true; 1857 hw_sob_get(other_sob); 1858 } 1859 1860 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n", 1861 prop->curr_sob_offset, q_idx); 1862 } else { 1863 prop->next_sob_val += count; 1864 } 1865 1866 return 0; 1867 } 1868 1869 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev, 1870 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx, 1871 bool encaps_signals) 1872 { 1873 u64 *signal_seq_arr = NULL; 1874 u32 size_to_copy, signal_seq_arr_len; 1875 int rc = 0; 1876 1877 if (encaps_signals) { 1878 *signal_seq = chunk->encaps_signal_seq; 1879 return 0; 1880 } 1881 1882 signal_seq_arr_len = chunk->num_signal_seq_arr; 1883 1884 /* currently only one signal seq is supported */ 1885 if (signal_seq_arr_len != 1) { 1886 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1887 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt); 1888 dev_err(hdev->dev, 1889 "Wait for signal CS supports only one signal CS seq\n"); 1890 return -EINVAL; 1891 } 1892 1893 signal_seq_arr = kmalloc_array(signal_seq_arr_len, 1894 sizeof(*signal_seq_arr), 1895 GFP_ATOMIC); 1896 if (!signal_seq_arr) 1897 signal_seq_arr = kmalloc_array(signal_seq_arr_len, 1898 sizeof(*signal_seq_arr), 1899 GFP_KERNEL); 1900 if (!signal_seq_arr) { 1901 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1902 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt); 1903 return -ENOMEM; 1904 } 1905 1906 size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr); 1907 if (copy_from_user(signal_seq_arr, 1908 u64_to_user_ptr(chunk->signal_seq_arr), 1909 size_to_copy)) { 1910 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1911 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt); 1912 dev_err(hdev->dev, 1913 "Failed to copy signal seq array from user\n"); 1914 rc = -EFAULT; 1915 goto out; 1916 } 1917 1918 /* currently it is guaranteed to have only one signal seq */ 1919 *signal_seq = signal_seq_arr[0]; 1920 1921 out: 1922 kfree(signal_seq_arr); 1923 1924 return rc; 1925 } 1926 1927 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, 1928 struct hl_ctx *ctx, struct hl_cs *cs, 1929 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset) 1930 { 1931 struct hl_cs_counters_atomic *cntr; 1932 struct hl_cs_job *job; 1933 struct hl_cb *cb; 1934 u32 cb_size; 1935 1936 cntr = &hdev->aggregated_cs_counters; 1937 1938 job = hl_cs_allocate_job(hdev, q_type, true); 1939 if (!job) { 1940 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1941 atomic64_inc(&cntr->out_of_mem_drop_cnt); 1942 dev_err(hdev->dev, "Failed to allocate a new job\n"); 1943 return -ENOMEM; 1944 } 1945 1946 if (cs->type == CS_TYPE_WAIT) 1947 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev); 1948 else 1949 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev); 1950 1951 cb = hl_cb_kernel_create(hdev, cb_size, 1952 q_type == QUEUE_TYPE_HW && hdev->mmu_enable); 1953 if (!cb) { 1954 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1955 atomic64_inc(&cntr->out_of_mem_drop_cnt); 1956 kfree(job); 1957 return -EFAULT; 1958 } 1959 1960 job->id = 0; 1961 job->cs = cs; 1962 job->user_cb = cb; 1963 atomic_inc(&job->user_cb->cs_cnt); 1964 job->user_cb_size = cb_size; 1965 job->hw_queue_id = q_idx; 1966 1967 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) 1968 && cs->encaps_signals) 1969 job->encaps_sig_wait_offset = encaps_signal_offset; 1970 /* 1971 * No need in parsing, user CB is the patched CB. 1972 * We call hl_cb_destroy() out of two reasons - we don't need the CB in 1973 * the CB idr anymore and to decrement its refcount as it was 1974 * incremented inside hl_cb_kernel_create(). 1975 */ 1976 job->patched_cb = job->user_cb; 1977 job->job_cb_size = job->user_cb_size; 1978 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); 1979 1980 /* increment refcount as for external queues we get completion */ 1981 cs_get(cs); 1982 1983 cs->jobs_in_queue_cnt[job->hw_queue_id]++; 1984 cs->jobs_cnt++; 1985 1986 list_add_tail(&job->cs_node, &cs->job_list); 1987 1988 hl_debugfs_add_job(hdev, job); 1989 1990 return 0; 1991 } 1992 1993 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv, 1994 u32 q_idx, u32 count, 1995 u32 *handle_id, u32 *sob_addr, 1996 u32 *signals_count) 1997 { 1998 struct hw_queue_properties *hw_queue_prop; 1999 struct hl_sync_stream_properties *prop; 2000 struct hl_device *hdev = hpriv->hdev; 2001 struct hl_cs_encaps_sig_handle *handle; 2002 struct hl_encaps_signals_mgr *mgr; 2003 struct hl_hw_sob *hw_sob; 2004 int hdl_id; 2005 int rc = 0; 2006 2007 if (count >= HL_MAX_SOB_VAL) { 2008 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n", 2009 count); 2010 rc = -EINVAL; 2011 goto out; 2012 } 2013 2014 if (q_idx >= hdev->asic_prop.max_queues) { 2015 dev_err(hdev->dev, "Queue index %d is invalid\n", 2016 q_idx); 2017 rc = -EINVAL; 2018 goto out; 2019 } 2020 2021 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx]; 2022 2023 if (!hw_queue_prop->supports_sync_stream) { 2024 dev_err(hdev->dev, 2025 "Queue index %d does not support sync stream operations\n", 2026 q_idx); 2027 rc = -EINVAL; 2028 goto out; 2029 } 2030 2031 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; 2032 2033 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 2034 if (!handle) { 2035 rc = -ENOMEM; 2036 goto out; 2037 } 2038 2039 handle->count = count; 2040 2041 hl_ctx_get(hpriv->ctx); 2042 handle->ctx = hpriv->ctx; 2043 mgr = &hpriv->ctx->sig_mgr; 2044 2045 spin_lock(&mgr->lock); 2046 hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC); 2047 spin_unlock(&mgr->lock); 2048 2049 if (hdl_id < 0) { 2050 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n"); 2051 rc = -EINVAL; 2052 goto put_ctx; 2053 } 2054 2055 handle->id = hdl_id; 2056 handle->q_idx = q_idx; 2057 handle->hdev = hdev; 2058 kref_init(&handle->refcount); 2059 2060 hdev->asic_funcs->hw_queues_lock(hdev); 2061 2062 hw_sob = &prop->hw_sob[prop->curr_sob_offset]; 2063 2064 /* 2065 * Increment the SOB value by count by user request 2066 * to reserve those signals 2067 * check if the signals amount to reserve is not exceeding the max sob 2068 * value, if yes then switch sob. 2069 */ 2070 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count, 2071 true); 2072 if (rc) { 2073 dev_err(hdev->dev, "Failed to switch SOB\n"); 2074 hdev->asic_funcs->hw_queues_unlock(hdev); 2075 rc = -EINVAL; 2076 goto remove_idr; 2077 } 2078 /* set the hw_sob to the handle after calling the sob wraparound handler 2079 * since sob could have changed. 2080 */ 2081 handle->hw_sob = hw_sob; 2082 2083 /* store the current sob value for unreserve validity check, and 2084 * signal offset support 2085 */ 2086 handle->pre_sob_val = prop->next_sob_val - handle->count; 2087 2088 handle->cs_seq = ULLONG_MAX; 2089 2090 *signals_count = prop->next_sob_val; 2091 hdev->asic_funcs->hw_queues_unlock(hdev); 2092 2093 *sob_addr = handle->hw_sob->sob_addr; 2094 *handle_id = hdl_id; 2095 2096 dev_dbg(hdev->dev, 2097 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n", 2098 hw_sob->sob_id, handle->hw_sob->sob_addr, 2099 prop->next_sob_val - 1, q_idx, hdl_id); 2100 goto out; 2101 2102 remove_idr: 2103 spin_lock(&mgr->lock); 2104 idr_remove(&mgr->handles, hdl_id); 2105 spin_unlock(&mgr->lock); 2106 2107 put_ctx: 2108 hl_ctx_put(handle->ctx); 2109 kfree(handle); 2110 2111 out: 2112 return rc; 2113 } 2114 2115 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id) 2116 { 2117 struct hl_cs_encaps_sig_handle *encaps_sig_hdl; 2118 struct hl_sync_stream_properties *prop; 2119 struct hl_device *hdev = hpriv->hdev; 2120 struct hl_encaps_signals_mgr *mgr; 2121 struct hl_hw_sob *hw_sob; 2122 u32 q_idx, sob_addr; 2123 int rc = 0; 2124 2125 mgr = &hpriv->ctx->sig_mgr; 2126 2127 spin_lock(&mgr->lock); 2128 encaps_sig_hdl = idr_find(&mgr->handles, handle_id); 2129 if (encaps_sig_hdl) { 2130 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n", 2131 handle_id, encaps_sig_hdl->hw_sob->sob_addr, 2132 encaps_sig_hdl->count); 2133 2134 hdev->asic_funcs->hw_queues_lock(hdev); 2135 2136 q_idx = encaps_sig_hdl->q_idx; 2137 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; 2138 hw_sob = &prop->hw_sob[prop->curr_sob_offset]; 2139 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id); 2140 2141 /* Check if sob_val got out of sync due to other 2142 * signal submission requests which were handled 2143 * between the reserve-unreserve calls or SOB switch 2144 * upon reaching SOB max value. 2145 */ 2146 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count 2147 != prop->next_sob_val || 2148 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) { 2149 dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n", 2150 encaps_sig_hdl->pre_sob_val, 2151 (prop->next_sob_val - encaps_sig_hdl->count)); 2152 2153 hdev->asic_funcs->hw_queues_unlock(hdev); 2154 rc = -EINVAL; 2155 goto out; 2156 } 2157 2158 /* 2159 * Decrement the SOB value by count by user request 2160 * to unreserve those signals 2161 */ 2162 prop->next_sob_val -= encaps_sig_hdl->count; 2163 2164 hdev->asic_funcs->hw_queues_unlock(hdev); 2165 2166 hw_sob_put(hw_sob); 2167 2168 /* Release the id and free allocated memory of the handle */ 2169 idr_remove(&mgr->handles, handle_id); 2170 hl_ctx_put(encaps_sig_hdl->ctx); 2171 kfree(encaps_sig_hdl); 2172 } else { 2173 rc = -EINVAL; 2174 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n"); 2175 } 2176 out: 2177 spin_unlock(&mgr->lock); 2178 2179 return rc; 2180 } 2181 2182 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, 2183 void __user *chunks, u32 num_chunks, 2184 u64 *cs_seq, u32 flags, u32 timeout, 2185 u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count) 2186 { 2187 struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL; 2188 bool handle_found = false, is_wait_cs = false, 2189 wait_cs_submitted = false, 2190 cs_encaps_signals = false; 2191 struct hl_cs_chunk *cs_chunk_array, *chunk; 2192 bool staged_cs_with_encaps_signals = false; 2193 struct hw_queue_properties *hw_queue_prop; 2194 struct hl_device *hdev = hpriv->hdev; 2195 struct hl_cs_compl *sig_waitcs_cmpl; 2196 u32 q_idx, collective_engine_id = 0; 2197 struct hl_cs_counters_atomic *cntr; 2198 struct hl_fence *sig_fence = NULL; 2199 struct hl_ctx *ctx = hpriv->ctx; 2200 enum hl_queue_type q_type; 2201 struct hl_cs *cs; 2202 u64 signal_seq; 2203 int rc; 2204 2205 cntr = &hdev->aggregated_cs_counters; 2206 *cs_seq = ULLONG_MAX; 2207 2208 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks, 2209 ctx); 2210 if (rc) 2211 goto out; 2212 2213 /* currently it is guaranteed to have only one chunk */ 2214 chunk = &cs_chunk_array[0]; 2215 2216 if (chunk->queue_index >= hdev->asic_prop.max_queues) { 2217 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2218 atomic64_inc(&cntr->validation_drop_cnt); 2219 dev_err(hdev->dev, "Queue index %d is invalid\n", 2220 chunk->queue_index); 2221 rc = -EINVAL; 2222 goto free_cs_chunk_array; 2223 } 2224 2225 q_idx = chunk->queue_index; 2226 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx]; 2227 q_type = hw_queue_prop->type; 2228 2229 if (!hw_queue_prop->supports_sync_stream) { 2230 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2231 atomic64_inc(&cntr->validation_drop_cnt); 2232 dev_err(hdev->dev, 2233 "Queue index %d does not support sync stream operations\n", 2234 q_idx); 2235 rc = -EINVAL; 2236 goto free_cs_chunk_array; 2237 } 2238 2239 if (cs_type == CS_TYPE_COLLECTIVE_WAIT) { 2240 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) { 2241 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2242 atomic64_inc(&cntr->validation_drop_cnt); 2243 dev_err(hdev->dev, 2244 "Queue index %d is invalid\n", q_idx); 2245 rc = -EINVAL; 2246 goto free_cs_chunk_array; 2247 } 2248 2249 if (!hdev->nic_ports_mask) { 2250 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2251 atomic64_inc(&cntr->validation_drop_cnt); 2252 dev_err(hdev->dev, 2253 "Collective operations not supported when NIC ports are disabled"); 2254 rc = -EINVAL; 2255 goto free_cs_chunk_array; 2256 } 2257 2258 collective_engine_id = chunk->collective_engine_id; 2259 } 2260 2261 is_wait_cs = !!(cs_type == CS_TYPE_WAIT || 2262 cs_type == CS_TYPE_COLLECTIVE_WAIT); 2263 2264 cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS); 2265 2266 if (is_wait_cs) { 2267 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, 2268 ctx, cs_encaps_signals); 2269 if (rc) 2270 goto free_cs_chunk_array; 2271 2272 if (cs_encaps_signals) { 2273 /* check if cs sequence has encapsulated 2274 * signals handle 2275 */ 2276 struct idr *idp; 2277 u32 id; 2278 2279 spin_lock(&ctx->sig_mgr.lock); 2280 idp = &ctx->sig_mgr.handles; 2281 idr_for_each_entry(idp, encaps_sig_hdl, id) { 2282 if (encaps_sig_hdl->cs_seq == signal_seq) { 2283 /* get refcount to protect removing this handle from idr, 2284 * needed when multiple wait cs are used with offset 2285 * to wait on reserved encaps signals. 2286 * Since kref_put of this handle is executed outside the 2287 * current lock, it is possible that the handle refcount 2288 * is 0 but it yet to be removed from the list. In this 2289 * case need to consider the handle as not valid. 2290 */ 2291 if (kref_get_unless_zero(&encaps_sig_hdl->refcount)) 2292 handle_found = true; 2293 break; 2294 } 2295 } 2296 spin_unlock(&ctx->sig_mgr.lock); 2297 2298 if (!handle_found) { 2299 /* treat as signal CS already finished */ 2300 dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n", 2301 signal_seq); 2302 rc = 0; 2303 goto free_cs_chunk_array; 2304 } 2305 2306 /* validate also the signal offset value */ 2307 if (chunk->encaps_signal_offset > 2308 encaps_sig_hdl->count) { 2309 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n", 2310 chunk->encaps_signal_offset, 2311 encaps_sig_hdl->count); 2312 rc = -EINVAL; 2313 goto free_cs_chunk_array; 2314 } 2315 } 2316 2317 sig_fence = hl_ctx_get_fence(ctx, signal_seq); 2318 if (IS_ERR(sig_fence)) { 2319 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2320 atomic64_inc(&cntr->validation_drop_cnt); 2321 dev_err(hdev->dev, 2322 "Failed to get signal CS with seq 0x%llx\n", 2323 signal_seq); 2324 rc = PTR_ERR(sig_fence); 2325 goto free_cs_chunk_array; 2326 } 2327 2328 if (!sig_fence) { 2329 /* signal CS already finished */ 2330 rc = 0; 2331 goto free_cs_chunk_array; 2332 } 2333 2334 sig_waitcs_cmpl = 2335 container_of(sig_fence, struct hl_cs_compl, base_fence); 2336 2337 staged_cs_with_encaps_signals = !! 2338 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT && 2339 (flags & HL_CS_FLAGS_ENCAP_SIGNALS)); 2340 2341 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL && 2342 !staged_cs_with_encaps_signals) { 2343 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2344 atomic64_inc(&cntr->validation_drop_cnt); 2345 dev_err(hdev->dev, 2346 "CS seq 0x%llx is not of a signal/encaps-signal CS\n", 2347 signal_seq); 2348 hl_fence_put(sig_fence); 2349 rc = -EINVAL; 2350 goto free_cs_chunk_array; 2351 } 2352 2353 if (completion_done(&sig_fence->completion)) { 2354 /* signal CS already finished */ 2355 hl_fence_put(sig_fence); 2356 rc = 0; 2357 goto free_cs_chunk_array; 2358 } 2359 } 2360 2361 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout); 2362 if (rc) { 2363 if (is_wait_cs) 2364 hl_fence_put(sig_fence); 2365 2366 goto free_cs_chunk_array; 2367 } 2368 2369 /* 2370 * Save the signal CS fence for later initialization right before 2371 * hanging the wait CS on the queue. 2372 * for encaps signals case, we save the cs sequence and handle pointer 2373 * for later initialization. 2374 */ 2375 if (is_wait_cs) { 2376 cs->signal_fence = sig_fence; 2377 /* store the handle pointer, so we don't have to 2378 * look for it again, later on the flow 2379 * when we need to set SOB info in hw_queue. 2380 */ 2381 if (cs->encaps_signals) 2382 cs->encaps_sig_hdl = encaps_sig_hdl; 2383 } 2384 2385 hl_debugfs_add_cs(cs); 2386 2387 *cs_seq = cs->sequence; 2388 2389 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL) 2390 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, 2391 q_idx, chunk->encaps_signal_offset); 2392 else if (cs_type == CS_TYPE_COLLECTIVE_WAIT) 2393 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx, 2394 cs, q_idx, collective_engine_id, 2395 chunk->encaps_signal_offset); 2396 else { 2397 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2398 atomic64_inc(&cntr->validation_drop_cnt); 2399 rc = -EINVAL; 2400 } 2401 2402 if (rc) 2403 goto free_cs_object; 2404 2405 if (q_type == QUEUE_TYPE_HW) 2406 INIT_WORK(&cs->finish_work, cs_completion); 2407 2408 rc = hl_hw_queue_schedule_cs(cs); 2409 if (rc) { 2410 /* In case wait cs failed here, it means the signal cs 2411 * already completed. we want to free all it's related objects 2412 * but we don't want to fail the ioctl. 2413 */ 2414 if (is_wait_cs) 2415 rc = 0; 2416 else if (rc != -EAGAIN) 2417 dev_err(hdev->dev, 2418 "Failed to submit CS %d.%llu to H/W queues, error %d\n", 2419 ctx->asid, cs->sequence, rc); 2420 goto free_cs_object; 2421 } 2422 2423 *signal_sob_addr_offset = cs->sob_addr_offset; 2424 *signal_initial_sob_count = cs->initial_sob_count; 2425 2426 rc = HL_CS_STATUS_SUCCESS; 2427 if (is_wait_cs) 2428 wait_cs_submitted = true; 2429 goto put_cs; 2430 2431 free_cs_object: 2432 cs_rollback(hdev, cs); 2433 *cs_seq = ULLONG_MAX; 2434 /* The path below is both for good and erroneous exits */ 2435 put_cs: 2436 /* We finished with the CS in this function, so put the ref */ 2437 cs_put(cs); 2438 free_cs_chunk_array: 2439 if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs) 2440 kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx); 2441 kfree(cs_chunk_array); 2442 out: 2443 return rc; 2444 } 2445 2446 static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores, 2447 u32 num_engine_cores, u32 core_command) 2448 { 2449 struct hl_device *hdev = hpriv->hdev; 2450 void __user *engine_cores_arr; 2451 u32 *cores; 2452 int rc; 2453 2454 if (!hdev->asic_prop.supports_engine_modes) 2455 return -EPERM; 2456 2457 if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) { 2458 dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores); 2459 return -EINVAL; 2460 } 2461 2462 if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) { 2463 dev_err(hdev->dev, "Engine core command is invalid\n"); 2464 return -EINVAL; 2465 } 2466 2467 engine_cores_arr = (void __user *) (uintptr_t) engine_cores; 2468 cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL); 2469 if (!cores) 2470 return -ENOMEM; 2471 2472 if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) { 2473 dev_err(hdev->dev, "Failed to copy core-ids array from user\n"); 2474 kfree(cores); 2475 return -EFAULT; 2476 } 2477 2478 rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command); 2479 kfree(cores); 2480 2481 return rc; 2482 } 2483 2484 static int cs_ioctl_engines(struct hl_fpriv *hpriv, u64 engines_arr_user_addr, 2485 u32 num_engines, enum hl_engine_command command) 2486 { 2487 struct hl_device *hdev = hpriv->hdev; 2488 u32 *engines, max_num_of_engines; 2489 void __user *engines_arr; 2490 int rc; 2491 2492 if (!hdev->asic_prop.supports_engine_modes) 2493 return -EPERM; 2494 2495 if (command >= HL_ENGINE_COMMAND_MAX) { 2496 dev_err(hdev->dev, "Engine command is invalid\n"); 2497 return -EINVAL; 2498 } 2499 2500 max_num_of_engines = hdev->asic_prop.max_num_of_engines; 2501 if (command == HL_ENGINE_CORE_RUN || command == HL_ENGINE_CORE_HALT) 2502 max_num_of_engines = hdev->asic_prop.num_engine_cores; 2503 2504 if (!num_engines || num_engines > max_num_of_engines) { 2505 dev_err(hdev->dev, "Number of engines %d is invalid\n", num_engines); 2506 return -EINVAL; 2507 } 2508 2509 engines_arr = (void __user *) (uintptr_t) engines_arr_user_addr; 2510 engines = kmalloc_array(num_engines, sizeof(u32), GFP_KERNEL); 2511 if (!engines) 2512 return -ENOMEM; 2513 2514 if (copy_from_user(engines, engines_arr, num_engines * sizeof(u32))) { 2515 dev_err(hdev->dev, "Failed to copy engine-ids array from user\n"); 2516 kfree(engines); 2517 return -EFAULT; 2518 } 2519 2520 rc = hdev->asic_funcs->set_engines(hdev, engines, num_engines, command); 2521 kfree(engines); 2522 2523 return rc; 2524 } 2525 2526 static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv) 2527 { 2528 struct hl_device *hdev = hpriv->hdev; 2529 struct asic_fixed_properties *prop = &hdev->asic_prop; 2530 2531 if (!prop->hbw_flush_reg) { 2532 dev_dbg(hdev->dev, "HBW flush is not supported\n"); 2533 return -EOPNOTSUPP; 2534 } 2535 2536 RREG32(prop->hbw_flush_reg); 2537 2538 return 0; 2539 } 2540 2541 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) 2542 { 2543 union hl_cs_args *args = data; 2544 enum hl_cs_type cs_type = 0; 2545 u64 cs_seq = ULONG_MAX; 2546 void __user *chunks; 2547 u32 num_chunks, flags, timeout, 2548 signals_count = 0, sob_addr = 0, handle_id = 0; 2549 u16 sob_initial_count = 0; 2550 int rc; 2551 2552 rc = hl_cs_sanity_checks(hpriv, args); 2553 if (rc) 2554 goto out; 2555 2556 rc = hl_cs_ctx_switch(hpriv, args, &cs_seq); 2557 if (rc) 2558 goto out; 2559 2560 cs_type = hl_cs_get_cs_type(args->in.cs_flags & 2561 ~HL_CS_FLAGS_FORCE_RESTORE); 2562 chunks = (void __user *) (uintptr_t) args->in.chunks_execute; 2563 num_chunks = args->in.num_chunks_execute; 2564 flags = args->in.cs_flags; 2565 2566 /* In case this is a staged CS, user should supply the CS sequence */ 2567 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) && 2568 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST)) 2569 cs_seq = args->in.seq; 2570 2571 timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT 2572 ? msecs_to_jiffies(args->in.timeout * 1000) 2573 : hpriv->hdev->timeout_jiffies; 2574 2575 switch (cs_type) { 2576 case CS_TYPE_SIGNAL: 2577 case CS_TYPE_WAIT: 2578 case CS_TYPE_COLLECTIVE_WAIT: 2579 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks, 2580 &cs_seq, args->in.cs_flags, timeout, 2581 &sob_addr, &sob_initial_count); 2582 break; 2583 case CS_RESERVE_SIGNALS: 2584 rc = cs_ioctl_reserve_signals(hpriv, 2585 args->in.encaps_signals_q_idx, 2586 args->in.encaps_signals_count, 2587 &handle_id, &sob_addr, &signals_count); 2588 break; 2589 case CS_UNRESERVE_SIGNALS: 2590 rc = cs_ioctl_unreserve_signals(hpriv, 2591 args->in.encaps_sig_handle_id); 2592 break; 2593 case CS_TYPE_ENGINE_CORE: 2594 rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores, 2595 args->in.num_engine_cores, args->in.core_command); 2596 break; 2597 case CS_TYPE_ENGINES: 2598 rc = cs_ioctl_engines(hpriv, args->in.engines, 2599 args->in.num_engines, args->in.engine_command); 2600 break; 2601 case CS_TYPE_FLUSH_PCI_HBW_WRITES: 2602 rc = cs_ioctl_flush_pci_hbw_writes(hpriv); 2603 break; 2604 default: 2605 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq, 2606 args->in.cs_flags, 2607 args->in.encaps_sig_handle_id, 2608 timeout, &sob_initial_count); 2609 break; 2610 } 2611 out: 2612 if (rc != -EAGAIN) { 2613 memset(args, 0, sizeof(*args)); 2614 2615 switch (cs_type) { 2616 case CS_RESERVE_SIGNALS: 2617 args->out.handle_id = handle_id; 2618 args->out.sob_base_addr_offset = sob_addr; 2619 args->out.count = signals_count; 2620 break; 2621 case CS_TYPE_SIGNAL: 2622 args->out.sob_base_addr_offset = sob_addr; 2623 args->out.sob_count_before_submission = sob_initial_count; 2624 args->out.seq = cs_seq; 2625 break; 2626 case CS_TYPE_DEFAULT: 2627 args->out.sob_count_before_submission = sob_initial_count; 2628 args->out.seq = cs_seq; 2629 break; 2630 default: 2631 args->out.seq = cs_seq; 2632 break; 2633 } 2634 2635 args->out.status = rc; 2636 } 2637 2638 return rc; 2639 } 2640 2641 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence, 2642 enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp) 2643 { 2644 struct hl_device *hdev = ctx->hdev; 2645 ktime_t timestamp_kt; 2646 long completion_rc; 2647 int rc = 0, error; 2648 2649 if (IS_ERR(fence)) { 2650 rc = PTR_ERR(fence); 2651 if (rc == -EINVAL) 2652 dev_notice_ratelimited(hdev->dev, 2653 "Can't wait on CS %llu because current CS is at seq %llu\n", 2654 seq, ctx->cs_sequence); 2655 return rc; 2656 } 2657 2658 if (!fence) { 2659 if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, ×tamp_kt, &error)) { 2660 dev_dbg(hdev->dev, 2661 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n", 2662 seq, ctx->cs_sequence); 2663 *status = CS_WAIT_STATUS_GONE; 2664 return 0; 2665 } 2666 2667 completion_rc = 1; 2668 goto report_results; 2669 } 2670 2671 if (!timeout_us) { 2672 completion_rc = completion_done(&fence->completion); 2673 } else { 2674 unsigned long timeout; 2675 2676 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ? 2677 timeout_us : usecs_to_jiffies(timeout_us); 2678 completion_rc = 2679 wait_for_completion_interruptible_timeout( 2680 &fence->completion, timeout); 2681 } 2682 2683 error = fence->error; 2684 timestamp_kt = fence->timestamp; 2685 2686 report_results: 2687 if (completion_rc > 0) { 2688 *status = CS_WAIT_STATUS_COMPLETED; 2689 if (timestamp) 2690 *timestamp = ktime_to_ns(timestamp_kt); 2691 } else { 2692 *status = CS_WAIT_STATUS_BUSY; 2693 } 2694 2695 if (completion_rc == -ERESTARTSYS) 2696 rc = completion_rc; 2697 else if (error == -ETIMEDOUT || error == -EIO) 2698 rc = error; 2699 2700 return rc; 2701 } 2702 2703 /* 2704 * hl_cs_poll_fences - iterate CS fences to check for CS completion 2705 * 2706 * @mcs_data: multi-CS internal data 2707 * @mcs_compl: multi-CS completion structure 2708 * 2709 * @return 0 on success, otherwise non 0 error code 2710 * 2711 * The function iterates on all CS sequence in the list and set bit in 2712 * completion_bitmap for each completed CS. 2713 * While iterating, the function sets the stream map of each fence in the fence 2714 * array in the completion QID stream map to be used by CSs to perform 2715 * completion to the multi-CS context. 2716 * This function shall be called after taking context ref 2717 */ 2718 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl) 2719 { 2720 struct hl_fence **fence_ptr = mcs_data->fence_arr; 2721 struct hl_device *hdev = mcs_data->ctx->hdev; 2722 int i, rc, arr_len = mcs_data->arr_len; 2723 u64 *seq_arr = mcs_data->seq_arr; 2724 ktime_t max_ktime, first_cs_time; 2725 enum hl_cs_wait_status status; 2726 2727 memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *)); 2728 2729 /* get all fences under the same lock */ 2730 rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len); 2731 if (rc) 2732 return rc; 2733 2734 /* 2735 * re-initialize the completion here to handle 2 possible cases: 2736 * 1. CS will complete the multi-CS prior clearing the completion. in which 2737 * case the fence iteration is guaranteed to catch the CS completion. 2738 * 2. the completion will occur after re-init of the completion. 2739 * in which case we will wake up immediately in wait_for_completion. 2740 */ 2741 reinit_completion(&mcs_compl->completion); 2742 2743 /* 2744 * set to maximum time to verify timestamp is valid: if at the end 2745 * this value is maintained- no timestamp was updated 2746 */ 2747 max_ktime = ktime_set(KTIME_SEC_MAX, 0); 2748 first_cs_time = max_ktime; 2749 2750 for (i = 0; i < arr_len; i++, fence_ptr++) { 2751 struct hl_fence *fence = *fence_ptr; 2752 2753 /* 2754 * In order to prevent case where we wait until timeout even though a CS associated 2755 * with the multi-CS actually completed we do things in the below order: 2756 * 1. for each fence set it's QID map in the multi-CS completion QID map. This way 2757 * any CS can, potentially, complete the multi CS for the specific QID (note 2758 * that once completion is initialized, calling complete* and then wait on the 2759 * completion will cause it to return at once) 2760 * 2. only after allowing multi-CS completion for the specific QID we check whether 2761 * the specific CS already completed (and thus the wait for completion part will 2762 * be skipped). if the CS not completed it is guaranteed that completing CS will 2763 * wake up the completion. 2764 */ 2765 if (fence) 2766 mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map; 2767 2768 /* 2769 * function won't sleep as it is called with timeout 0 (i.e. 2770 * poll the fence) 2771 */ 2772 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL); 2773 if (rc) { 2774 dev_err(hdev->dev, 2775 "wait_for_fence error :%d for CS seq %llu\n", 2776 rc, seq_arr[i]); 2777 break; 2778 } 2779 2780 switch (status) { 2781 case CS_WAIT_STATUS_BUSY: 2782 /* CS did not finished, QID to wait on already stored */ 2783 break; 2784 case CS_WAIT_STATUS_COMPLETED: 2785 /* 2786 * Using mcs_handling_done to avoid possibility of mcs_data 2787 * returns to user indicating CS completed before it finished 2788 * all of its mcs handling, to avoid race the next time the 2789 * user waits for mcs. 2790 * note: when reaching this case fence is definitely not NULL 2791 * but NULL check was added to overcome static analysis 2792 */ 2793 if (fence && !fence->mcs_handling_done) { 2794 /* 2795 * in case multi CS is completed but MCS handling not done 2796 * we "complete" the multi CS to prevent it from waiting 2797 * until time-out and the "multi-CS handling done" will have 2798 * another chance at the next iteration 2799 */ 2800 complete_all(&mcs_compl->completion); 2801 break; 2802 } 2803 2804 mcs_data->completion_bitmap |= BIT(i); 2805 /* 2806 * For all completed CSs we take the earliest timestamp. 2807 * For this we have to validate that the timestamp is 2808 * earliest of all timestamps so far. 2809 */ 2810 if (fence && mcs_data->update_ts && 2811 (ktime_compare(fence->timestamp, first_cs_time) < 0)) 2812 first_cs_time = fence->timestamp; 2813 break; 2814 case CS_WAIT_STATUS_GONE: 2815 mcs_data->update_ts = false; 2816 mcs_data->gone_cs = true; 2817 /* 2818 * It is possible to get an old sequence numbers from user 2819 * which related to already completed CSs and their fences 2820 * already gone. In this case, CS set as completed but 2821 * no need to consider its QID for mcs completion. 2822 */ 2823 mcs_data->completion_bitmap |= BIT(i); 2824 break; 2825 default: 2826 dev_err(hdev->dev, "Invalid fence status\n"); 2827 rc = -EINVAL; 2828 break; 2829 } 2830 2831 } 2832 2833 hl_fences_put(mcs_data->fence_arr, arr_len); 2834 2835 if (mcs_data->update_ts && 2836 (ktime_compare(first_cs_time, max_ktime) != 0)) 2837 mcs_data->timestamp = ktime_to_ns(first_cs_time); 2838 2839 return rc; 2840 } 2841 2842 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq, 2843 enum hl_cs_wait_status *status, s64 *timestamp) 2844 { 2845 struct hl_fence *fence; 2846 int rc = 0; 2847 2848 if (timestamp) 2849 *timestamp = 0; 2850 2851 hl_ctx_get(ctx); 2852 2853 fence = hl_ctx_get_fence(ctx, seq); 2854 2855 rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp); 2856 hl_fence_put(fence); 2857 hl_ctx_put(ctx); 2858 2859 return rc; 2860 } 2861 2862 static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs) 2863 { 2864 if (usecs <= U32_MAX) 2865 return usecs_to_jiffies(usecs); 2866 2867 /* 2868 * If the value in nanoseconds is larger than 64 bit, use the largest 2869 * 64 bit value. 2870 */ 2871 if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC))) 2872 return nsecs_to_jiffies(U64_MAX); 2873 2874 return nsecs_to_jiffies(usecs * NSEC_PER_USEC); 2875 } 2876 2877 /* 2878 * hl_wait_multi_cs_completion_init - init completion structure 2879 * 2880 * @hdev: pointer to habanalabs device structure 2881 * @stream_master_bitmap: stream master QIDs map, set bit indicates stream 2882 * master QID to wait on 2883 * 2884 * @return valid completion struct pointer on success, otherwise error pointer 2885 * 2886 * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver. 2887 * the function gets the first available completion (by marking it "used") 2888 * and initialize its values. 2889 */ 2890 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev) 2891 { 2892 struct multi_cs_completion *mcs_compl; 2893 int i; 2894 2895 /* find free multi_cs completion structure */ 2896 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) { 2897 mcs_compl = &hdev->multi_cs_completion[i]; 2898 spin_lock(&mcs_compl->lock); 2899 if (!mcs_compl->used) { 2900 mcs_compl->used = 1; 2901 mcs_compl->timestamp = 0; 2902 /* 2903 * init QID map to 0 to avoid completion by CSs. the actual QID map 2904 * to multi-CS CSs will be set incrementally at a later stage 2905 */ 2906 mcs_compl->stream_master_qid_map = 0; 2907 spin_unlock(&mcs_compl->lock); 2908 break; 2909 } 2910 spin_unlock(&mcs_compl->lock); 2911 } 2912 2913 if (i == MULTI_CS_MAX_USER_CTX) { 2914 dev_err(hdev->dev, "no available multi-CS completion structure\n"); 2915 return ERR_PTR(-ENOMEM); 2916 } 2917 return mcs_compl; 2918 } 2919 2920 /* 2921 * hl_wait_multi_cs_completion_fini - return completion structure and set as 2922 * unused 2923 * 2924 * @mcs_compl: pointer to the completion structure 2925 */ 2926 static void hl_wait_multi_cs_completion_fini( 2927 struct multi_cs_completion *mcs_compl) 2928 { 2929 /* 2930 * free completion structure, do it under lock to be in-sync with the 2931 * thread that signals completion 2932 */ 2933 spin_lock(&mcs_compl->lock); 2934 mcs_compl->used = 0; 2935 spin_unlock(&mcs_compl->lock); 2936 } 2937 2938 /* 2939 * hl_wait_multi_cs_completion - wait for first CS to complete 2940 * 2941 * @mcs_data: multi-CS internal data 2942 * 2943 * @return 0 on success, otherwise non 0 error code 2944 */ 2945 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data, 2946 struct multi_cs_completion *mcs_compl) 2947 { 2948 long completion_rc; 2949 2950 completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion, 2951 mcs_data->timeout_jiffies); 2952 2953 /* update timestamp */ 2954 if (completion_rc > 0) 2955 mcs_data->timestamp = mcs_compl->timestamp; 2956 2957 if (completion_rc == -ERESTARTSYS) 2958 return completion_rc; 2959 2960 mcs_data->wait_status = completion_rc; 2961 2962 return 0; 2963 } 2964 2965 /* 2966 * hl_multi_cs_completion_init - init array of multi-CS completion structures 2967 * 2968 * @hdev: pointer to habanalabs device structure 2969 */ 2970 void hl_multi_cs_completion_init(struct hl_device *hdev) 2971 { 2972 struct multi_cs_completion *mcs_cmpl; 2973 int i; 2974 2975 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) { 2976 mcs_cmpl = &hdev->multi_cs_completion[i]; 2977 mcs_cmpl->used = 0; 2978 spin_lock_init(&mcs_cmpl->lock); 2979 init_completion(&mcs_cmpl->completion); 2980 } 2981 } 2982 2983 /* 2984 * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl 2985 * 2986 * @hpriv: pointer to the private data of the fd 2987 * @data: pointer to multi-CS wait ioctl in/out args 2988 * 2989 */ 2990 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) 2991 { 2992 struct multi_cs_completion *mcs_compl; 2993 struct hl_device *hdev = hpriv->hdev; 2994 struct multi_cs_data mcs_data = {}; 2995 union hl_wait_cs_args *args = data; 2996 struct hl_ctx *ctx = hpriv->ctx; 2997 struct hl_fence **fence_arr; 2998 void __user *seq_arr; 2999 u32 size_to_copy; 3000 u64 *cs_seq_arr; 3001 u8 seq_arr_len; 3002 int rc, i; 3003 3004 for (i = 0 ; i < sizeof(args->in.pad) ; i++) 3005 if (args->in.pad[i]) { 3006 dev_dbg(hdev->dev, "Padding bytes must be 0\n"); 3007 return -EINVAL; 3008 } 3009 3010 if (!hdev->supports_wait_for_multi_cs) { 3011 dev_err(hdev->dev, "Wait for multi CS is not supported\n"); 3012 return -EPERM; 3013 } 3014 3015 seq_arr_len = args->in.seq_arr_len; 3016 3017 if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) { 3018 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n", 3019 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len); 3020 return -EINVAL; 3021 } 3022 3023 /* allocate memory for sequence array */ 3024 cs_seq_arr = 3025 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL); 3026 if (!cs_seq_arr) 3027 return -ENOMEM; 3028 3029 /* copy CS sequence array from user */ 3030 seq_arr = (void __user *) (uintptr_t) args->in.seq; 3031 size_to_copy = seq_arr_len * sizeof(*cs_seq_arr); 3032 if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) { 3033 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n"); 3034 rc = -EFAULT; 3035 goto free_seq_arr; 3036 } 3037 3038 /* allocate array for the fences */ 3039 fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL); 3040 if (!fence_arr) { 3041 rc = -ENOMEM; 3042 goto free_seq_arr; 3043 } 3044 3045 /* initialize the multi-CS internal data */ 3046 mcs_data.ctx = ctx; 3047 mcs_data.seq_arr = cs_seq_arr; 3048 mcs_data.fence_arr = fence_arr; 3049 mcs_data.arr_len = seq_arr_len; 3050 3051 hl_ctx_get(ctx); 3052 3053 /* wait (with timeout) for the first CS to be completed */ 3054 mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us); 3055 mcs_compl = hl_wait_multi_cs_completion_init(hdev); 3056 if (IS_ERR(mcs_compl)) { 3057 rc = PTR_ERR(mcs_compl); 3058 goto put_ctx; 3059 } 3060 3061 /* poll all CS fences, extract timestamp */ 3062 mcs_data.update_ts = true; 3063 rc = hl_cs_poll_fences(&mcs_data, mcs_compl); 3064 /* 3065 * skip wait for CS completion when one of the below is true: 3066 * - an error on the poll function 3067 * - one or more CS in the list completed 3068 * - the user called ioctl with timeout 0 3069 */ 3070 if (rc || mcs_data.completion_bitmap || !args->in.timeout_us) 3071 goto completion_fini; 3072 3073 while (true) { 3074 rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl); 3075 if (rc || (mcs_data.wait_status == 0)) 3076 break; 3077 3078 /* 3079 * poll fences once again to update the CS map. 3080 * no timestamp should be updated this time. 3081 */ 3082 mcs_data.update_ts = false; 3083 rc = hl_cs_poll_fences(&mcs_data, mcs_compl); 3084 3085 if (rc || mcs_data.completion_bitmap) 3086 break; 3087 3088 /* 3089 * if hl_wait_multi_cs_completion returned before timeout (i.e. 3090 * it got a completion) it either got completed by CS in the multi CS list 3091 * (in which case the indication will be non empty completion_bitmap) or it 3092 * got completed by CS submitted to one of the shared stream master but 3093 * not in the multi CS list (in which case we should wait again but modify 3094 * the timeout and set timestamp as zero to let a CS related to the current 3095 * multi-CS set a new, relevant, timestamp) 3096 */ 3097 mcs_data.timeout_jiffies = mcs_data.wait_status; 3098 mcs_compl->timestamp = 0; 3099 } 3100 3101 completion_fini: 3102 hl_wait_multi_cs_completion_fini(mcs_compl); 3103 3104 put_ctx: 3105 hl_ctx_put(ctx); 3106 kfree(fence_arr); 3107 3108 free_seq_arr: 3109 kfree(cs_seq_arr); 3110 3111 if (rc == -ERESTARTSYS) { 3112 dev_err_ratelimited(hdev->dev, 3113 "user process got signal while waiting for Multi-CS\n"); 3114 rc = -EINTR; 3115 } 3116 3117 if (rc) 3118 return rc; 3119 3120 /* update output args */ 3121 memset(args, 0, sizeof(*args)); 3122 3123 if (mcs_data.completion_bitmap) { 3124 args->out.status = HL_WAIT_CS_STATUS_COMPLETED; 3125 args->out.cs_completion_map = mcs_data.completion_bitmap; 3126 3127 /* if timestamp not 0- it's valid */ 3128 if (mcs_data.timestamp) { 3129 args->out.timestamp_nsec = mcs_data.timestamp; 3130 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD; 3131 } 3132 3133 /* update if some CS was gone */ 3134 if (!mcs_data.timestamp) 3135 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE; 3136 } else { 3137 args->out.status = HL_WAIT_CS_STATUS_BUSY; 3138 } 3139 3140 return 0; 3141 } 3142 3143 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) 3144 { 3145 struct hl_device *hdev = hpriv->hdev; 3146 union hl_wait_cs_args *args = data; 3147 enum hl_cs_wait_status status; 3148 u64 seq = args->in.seq; 3149 s64 timestamp; 3150 int rc; 3151 3152 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, ×tamp); 3153 3154 if (rc == -ERESTARTSYS) { 3155 dev_err_ratelimited(hdev->dev, 3156 "user process got signal while waiting for CS handle %llu\n", 3157 seq); 3158 return -EINTR; 3159 } 3160 3161 memset(args, 0, sizeof(*args)); 3162 3163 if (rc) { 3164 if (rc == -ETIMEDOUT) { 3165 dev_err_ratelimited(hdev->dev, 3166 "CS %llu has timed-out while user process is waiting for it\n", 3167 seq); 3168 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT; 3169 } else if (rc == -EIO) { 3170 dev_err_ratelimited(hdev->dev, 3171 "CS %llu has been aborted while user process is waiting for it\n", 3172 seq); 3173 args->out.status = HL_WAIT_CS_STATUS_ABORTED; 3174 } 3175 return rc; 3176 } 3177 3178 if (timestamp) { 3179 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD; 3180 args->out.timestamp_nsec = timestamp; 3181 } 3182 3183 switch (status) { 3184 case CS_WAIT_STATUS_GONE: 3185 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE; 3186 fallthrough; 3187 case CS_WAIT_STATUS_COMPLETED: 3188 args->out.status = HL_WAIT_CS_STATUS_COMPLETED; 3189 break; 3190 case CS_WAIT_STATUS_BUSY: 3191 default: 3192 args->out.status = HL_WAIT_CS_STATUS_BUSY; 3193 break; 3194 } 3195 3196 return 0; 3197 } 3198 3199 static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf, 3200 struct hl_cb *cq_cb, 3201 u64 ts_offset, u64 cq_offset, u64 target_value, 3202 spinlock_t *wait_list_lock, 3203 struct hl_user_pending_interrupt **pend) 3204 { 3205 struct hl_ts_buff *ts_buff = buf->private; 3206 struct hl_user_pending_interrupt *requested_offset_record = 3207 (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address + 3208 ts_offset; 3209 struct hl_user_pending_interrupt *cb_last = 3210 (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address + 3211 (ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt)); 3212 unsigned long iter_counter = 0; 3213 u64 current_cq_counter; 3214 ktime_t timestamp; 3215 3216 /* Validate ts_offset not exceeding last max */ 3217 if (requested_offset_record >= cb_last) { 3218 dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n", 3219 (u64)(uintptr_t)cb_last); 3220 return -EINVAL; 3221 } 3222 3223 timestamp = ktime_get(); 3224 3225 start_over: 3226 spin_lock(wait_list_lock); 3227 3228 /* Unregister only if we didn't reach the target value 3229 * since in this case there will be no handling in irq context 3230 * and then it's safe to delete the node out of the interrupt list 3231 * then re-use it on other interrupt 3232 */ 3233 if (requested_offset_record->ts_reg_info.in_use) { 3234 current_cq_counter = *requested_offset_record->cq_kernel_addr; 3235 if (current_cq_counter < requested_offset_record->cq_target_value) { 3236 list_del(&requested_offset_record->wait_list_node); 3237 spin_unlock(wait_list_lock); 3238 3239 hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf); 3240 hl_cb_put(requested_offset_record->ts_reg_info.cq_cb); 3241 3242 dev_dbg(buf->mmg->dev, 3243 "ts node removed from interrupt list now can re-use\n"); 3244 } else { 3245 dev_dbg(buf->mmg->dev, 3246 "ts node in middle of irq handling\n"); 3247 3248 /* irq thread handling in the middle give it time to finish */ 3249 spin_unlock(wait_list_lock); 3250 usleep_range(100, 1000); 3251 if (++iter_counter == MAX_TS_ITER_NUM) { 3252 dev_err(buf->mmg->dev, 3253 "Timestamp offset processing reached timeout of %lld ms\n", 3254 ktime_ms_delta(ktime_get(), timestamp)); 3255 return -EAGAIN; 3256 } 3257 3258 goto start_over; 3259 } 3260 } else { 3261 /* Fill up the new registration node info */ 3262 requested_offset_record->ts_reg_info.buf = buf; 3263 requested_offset_record->ts_reg_info.cq_cb = cq_cb; 3264 requested_offset_record->ts_reg_info.timestamp_kernel_addr = 3265 (u64 *) ts_buff->user_buff_address + ts_offset; 3266 requested_offset_record->cq_kernel_addr = 3267 (u64 *) cq_cb->kernel_address + cq_offset; 3268 requested_offset_record->cq_target_value = target_value; 3269 3270 spin_unlock(wait_list_lock); 3271 } 3272 3273 *pend = requested_offset_record; 3274 3275 dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n", 3276 requested_offset_record); 3277 return 0; 3278 } 3279 3280 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, 3281 struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg, 3282 u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset, 3283 u64 target_value, struct hl_user_interrupt *interrupt, 3284 bool register_ts_record, u64 ts_handle, u64 ts_offset, 3285 u32 *status, u64 *timestamp) 3286 { 3287 struct hl_user_pending_interrupt *pend; 3288 struct hl_mmap_mem_buf *buf; 3289 struct hl_cb *cq_cb; 3290 unsigned long timeout; 3291 long completion_rc; 3292 int rc = 0; 3293 3294 timeout = hl_usecs64_to_jiffies(timeout_us); 3295 3296 hl_ctx_get(ctx); 3297 3298 cq_cb = hl_cb_get(cb_mmg, cq_counters_handle); 3299 if (!cq_cb) { 3300 rc = -EINVAL; 3301 goto put_ctx; 3302 } 3303 3304 /* Validate the cq offset */ 3305 if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >= 3306 ((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) { 3307 rc = -EINVAL; 3308 goto put_cq_cb; 3309 } 3310 3311 if (register_ts_record) { 3312 dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n", 3313 interrupt->interrupt_id, ts_offset, cq_counters_offset); 3314 buf = hl_mmap_mem_buf_get(mmg, ts_handle); 3315 if (!buf) { 3316 rc = -EINVAL; 3317 goto put_cq_cb; 3318 } 3319 3320 /* get ts buffer record */ 3321 rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset, 3322 cq_counters_offset, target_value, 3323 &interrupt->wait_list_lock, &pend); 3324 if (rc) 3325 goto put_ts_buff; 3326 } else { 3327 pend = kzalloc(sizeof(*pend), GFP_KERNEL); 3328 if (!pend) { 3329 rc = -ENOMEM; 3330 goto put_cq_cb; 3331 } 3332 hl_fence_init(&pend->fence, ULONG_MAX); 3333 pend->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_counters_offset; 3334 pend->cq_target_value = target_value; 3335 } 3336 3337 spin_lock(&interrupt->wait_list_lock); 3338 3339 /* We check for completion value as interrupt could have been received 3340 * before we added the node to the wait list 3341 */ 3342 if (*pend->cq_kernel_addr >= target_value) { 3343 if (register_ts_record) 3344 pend->ts_reg_info.in_use = 0; 3345 spin_unlock(&interrupt->wait_list_lock); 3346 3347 *status = HL_WAIT_CS_STATUS_COMPLETED; 3348 3349 if (register_ts_record) { 3350 *pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns(); 3351 goto put_ts_buff; 3352 } else { 3353 pend->fence.timestamp = ktime_get(); 3354 goto set_timestamp; 3355 } 3356 } else if (!timeout_us) { 3357 spin_unlock(&interrupt->wait_list_lock); 3358 *status = HL_WAIT_CS_STATUS_BUSY; 3359 pend->fence.timestamp = ktime_get(); 3360 goto set_timestamp; 3361 } 3362 3363 /* Add pending user interrupt to relevant list for the interrupt 3364 * handler to monitor. 3365 * Note that we cannot have sorted list by target value, 3366 * in order to shorten the list pass loop, since 3367 * same list could have nodes for different cq counter handle. 3368 * Note: 3369 * Mark ts buff offset as in use here in the spinlock protection area 3370 * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record 3371 * before adding the node to the list. this scenario might happen when 3372 * multiple threads are racing on same offset and one thread could 3373 * set the ts buff in ts_buff_get_kernel_ts_record then the other thread 3374 * takes over and get to ts_buff_get_kernel_ts_record and then we will try 3375 * to re-use the same ts buff offset, and will try to delete a non existing 3376 * node from the list. 3377 */ 3378 if (register_ts_record) 3379 pend->ts_reg_info.in_use = 1; 3380 3381 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head); 3382 spin_unlock(&interrupt->wait_list_lock); 3383 3384 if (register_ts_record) { 3385 rc = *status = HL_WAIT_CS_STATUS_COMPLETED; 3386 goto ts_registration_exit; 3387 } 3388 3389 /* Wait for interrupt handler to signal completion */ 3390 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion, 3391 timeout); 3392 if (completion_rc > 0) { 3393 *status = HL_WAIT_CS_STATUS_COMPLETED; 3394 } else { 3395 if (completion_rc == -ERESTARTSYS) { 3396 dev_err_ratelimited(hdev->dev, 3397 "user process got signal while waiting for interrupt ID %d\n", 3398 interrupt->interrupt_id); 3399 rc = -EINTR; 3400 *status = HL_WAIT_CS_STATUS_ABORTED; 3401 } else { 3402 if (pend->fence.error == -EIO) { 3403 dev_err_ratelimited(hdev->dev, 3404 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n", 3405 pend->fence.error); 3406 rc = -EIO; 3407 *status = HL_WAIT_CS_STATUS_ABORTED; 3408 } else { 3409 /* The wait has timed-out. We don't know anything beyond that 3410 * because the workload wasn't submitted through the driver. 3411 * Therefore, from driver's perspective, the workload is still 3412 * executing. 3413 */ 3414 rc = 0; 3415 *status = HL_WAIT_CS_STATUS_BUSY; 3416 } 3417 } 3418 } 3419 3420 /* 3421 * We keep removing the node from list here, and not at the irq handler 3422 * for completion timeout case. and if it's a registration 3423 * for ts record, the node will be deleted in the irq handler after 3424 * we reach the target value. 3425 */ 3426 spin_lock(&interrupt->wait_list_lock); 3427 list_del(&pend->wait_list_node); 3428 spin_unlock(&interrupt->wait_list_lock); 3429 3430 set_timestamp: 3431 *timestamp = ktime_to_ns(pend->fence.timestamp); 3432 kfree(pend); 3433 hl_cb_put(cq_cb); 3434 ts_registration_exit: 3435 hl_ctx_put(ctx); 3436 3437 return rc; 3438 3439 put_ts_buff: 3440 hl_mmap_mem_buf_put(buf); 3441 put_cq_cb: 3442 hl_cb_put(cq_cb); 3443 put_ctx: 3444 hl_ctx_put(ctx); 3445 3446 return rc; 3447 } 3448 3449 static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx, 3450 u64 timeout_us, u64 user_address, 3451 u64 target_value, struct hl_user_interrupt *interrupt, 3452 u32 *status, 3453 u64 *timestamp) 3454 { 3455 struct hl_user_pending_interrupt *pend; 3456 unsigned long timeout; 3457 u64 completion_value; 3458 long completion_rc; 3459 int rc = 0; 3460 3461 timeout = hl_usecs64_to_jiffies(timeout_us); 3462 3463 hl_ctx_get(ctx); 3464 3465 pend = kzalloc(sizeof(*pend), GFP_KERNEL); 3466 if (!pend) { 3467 hl_ctx_put(ctx); 3468 return -ENOMEM; 3469 } 3470 3471 hl_fence_init(&pend->fence, ULONG_MAX); 3472 3473 /* Add pending user interrupt to relevant list for the interrupt 3474 * handler to monitor 3475 */ 3476 spin_lock(&interrupt->wait_list_lock); 3477 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head); 3478 spin_unlock(&interrupt->wait_list_lock); 3479 3480 /* We check for completion value as interrupt could have been received 3481 * before we added the node to the wait list 3482 */ 3483 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) { 3484 dev_err(hdev->dev, "Failed to copy completion value from user\n"); 3485 rc = -EFAULT; 3486 goto remove_pending_user_interrupt; 3487 } 3488 3489 if (completion_value >= target_value) { 3490 *status = HL_WAIT_CS_STATUS_COMPLETED; 3491 /* There was no interrupt, we assume the completion is now. */ 3492 pend->fence.timestamp = ktime_get(); 3493 } else { 3494 *status = HL_WAIT_CS_STATUS_BUSY; 3495 } 3496 3497 if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED)) 3498 goto remove_pending_user_interrupt; 3499 3500 wait_again: 3501 /* Wait for interrupt handler to signal completion */ 3502 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion, 3503 timeout); 3504 3505 /* If timeout did not expire we need to perform the comparison. 3506 * If comparison fails, keep waiting until timeout expires 3507 */ 3508 if (completion_rc > 0) { 3509 spin_lock(&interrupt->wait_list_lock); 3510 /* reinit_completion must be called before we check for user 3511 * completion value, otherwise, if interrupt is received after 3512 * the comparison and before the next wait_for_completion, 3513 * we will reach timeout and fail 3514 */ 3515 reinit_completion(&pend->fence.completion); 3516 spin_unlock(&interrupt->wait_list_lock); 3517 3518 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) { 3519 dev_err(hdev->dev, "Failed to copy completion value from user\n"); 3520 rc = -EFAULT; 3521 3522 goto remove_pending_user_interrupt; 3523 } 3524 3525 if (completion_value >= target_value) { 3526 *status = HL_WAIT_CS_STATUS_COMPLETED; 3527 } else if (pend->fence.error) { 3528 dev_err_ratelimited(hdev->dev, 3529 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n", 3530 pend->fence.error); 3531 /* set the command completion status as ABORTED */ 3532 *status = HL_WAIT_CS_STATUS_ABORTED; 3533 } else { 3534 timeout = completion_rc; 3535 goto wait_again; 3536 } 3537 } else if (completion_rc == -ERESTARTSYS) { 3538 dev_err_ratelimited(hdev->dev, 3539 "user process got signal while waiting for interrupt ID %d\n", 3540 interrupt->interrupt_id); 3541 rc = -EINTR; 3542 } else { 3543 /* The wait has timed-out. We don't know anything beyond that 3544 * because the workload wasn't submitted through the driver. 3545 * Therefore, from driver's perspective, the workload is still 3546 * executing. 3547 */ 3548 rc = 0; 3549 *status = HL_WAIT_CS_STATUS_BUSY; 3550 } 3551 3552 remove_pending_user_interrupt: 3553 spin_lock(&interrupt->wait_list_lock); 3554 list_del(&pend->wait_list_node); 3555 spin_unlock(&interrupt->wait_list_lock); 3556 3557 *timestamp = ktime_to_ns(pend->fence.timestamp); 3558 3559 kfree(pend); 3560 hl_ctx_put(ctx); 3561 3562 return rc; 3563 } 3564 3565 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data) 3566 { 3567 u16 interrupt_id, first_interrupt, last_interrupt; 3568 struct hl_device *hdev = hpriv->hdev; 3569 struct asic_fixed_properties *prop; 3570 struct hl_user_interrupt *interrupt; 3571 union hl_wait_cs_args *args = data; 3572 u32 status = HL_WAIT_CS_STATUS_BUSY; 3573 u64 timestamp = 0; 3574 int rc, int_idx; 3575 3576 prop = &hdev->asic_prop; 3577 3578 if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) { 3579 dev_err(hdev->dev, "no user interrupts allowed"); 3580 return -EPERM; 3581 } 3582 3583 interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags); 3584 3585 first_interrupt = prop->first_available_user_interrupt; 3586 last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1; 3587 3588 if (interrupt_id < prop->user_dec_intr_count) { 3589 3590 /* Check if the requested core is enabled */ 3591 if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) { 3592 dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed", 3593 interrupt_id); 3594 return -EINVAL; 3595 } 3596 3597 interrupt = &hdev->user_interrupt[interrupt_id]; 3598 3599 } else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) { 3600 3601 int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count; 3602 interrupt = &hdev->user_interrupt[int_idx]; 3603 3604 } else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) { 3605 interrupt = &hdev->common_user_cq_interrupt; 3606 } else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) { 3607 interrupt = &hdev->common_decoder_interrupt; 3608 } else { 3609 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id); 3610 return -EINVAL; 3611 } 3612 3613 if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ) 3614 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr, 3615 args->in.interrupt_timeout_us, args->in.cq_counters_handle, 3616 args->in.cq_counters_offset, 3617 args->in.target, interrupt, 3618 !!(args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT), 3619 args->in.timestamp_handle, args->in.timestamp_offset, 3620 &status, ×tamp); 3621 else 3622 rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx, 3623 args->in.interrupt_timeout_us, args->in.addr, 3624 args->in.target, interrupt, &status, 3625 ×tamp); 3626 if (rc) 3627 return rc; 3628 3629 memset(args, 0, sizeof(*args)); 3630 args->out.status = status; 3631 3632 if (timestamp) { 3633 args->out.timestamp_nsec = timestamp; 3634 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD; 3635 } 3636 3637 return 0; 3638 } 3639 3640 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data) 3641 { 3642 struct hl_device *hdev = hpriv->hdev; 3643 union hl_wait_cs_args *args = data; 3644 u32 flags = args->in.flags; 3645 int rc; 3646 3647 /* If the device is not operational, or if an error has happened and user should release the 3648 * device, there is no point in waiting for any command submission or user interrupt. 3649 */ 3650 if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active) 3651 return -EBUSY; 3652 3653 if (flags & HL_WAIT_CS_FLAGS_INTERRUPT) 3654 rc = hl_interrupt_wait_ioctl(hpriv, data); 3655 else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS) 3656 rc = hl_multi_cs_wait_ioctl(hpriv, data); 3657 else 3658 rc = hl_cs_wait_ioctl(hpriv, data); 3659 3660 return rc; 3661 } 3662