1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Copyright 2016-2021 HabanaLabs, Ltd. 5 * All Rights Reserved. 6 */ 7 8 #include <uapi/drm/habanalabs_accel.h> 9 #include "habanalabs.h" 10 11 #include <linux/uaccess.h> 12 #include <linux/slab.h> 13 14 #define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \ 15 HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \ 16 HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \ 17 HL_CS_FLAGS_ENGINES_COMMAND | HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES) 18 19 20 #define MAX_TS_ITER_NUM 100 21 22 /** 23 * enum hl_cs_wait_status - cs wait status 24 * @CS_WAIT_STATUS_BUSY: cs was not completed yet 25 * @CS_WAIT_STATUS_COMPLETED: cs completed 26 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone 27 */ 28 enum hl_cs_wait_status { 29 CS_WAIT_STATUS_BUSY, 30 CS_WAIT_STATUS_COMPLETED, 31 CS_WAIT_STATUS_GONE 32 }; 33 34 static void job_wq_completion(struct work_struct *work); 35 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq, 36 enum hl_cs_wait_status *status, s64 *timestamp); 37 static void cs_do_release(struct kref *ref); 38 39 static void hl_push_cs_outcome(struct hl_device *hdev, 40 struct hl_cs_outcome_store *outcome_store, 41 u64 seq, ktime_t ts, int error) 42 { 43 struct hl_cs_outcome *node; 44 unsigned long flags; 45 46 /* 47 * CS outcome store supports the following operations: 48 * push outcome - store a recent CS outcome in the store 49 * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store 50 * It uses 2 lists: used list and free list. 51 * It has a pre-allocated amount of nodes, each node stores 52 * a single CS outcome. 53 * Initially, all the nodes are in the free list. 54 * On push outcome, a node (any) is taken from the free list, its 55 * information is filled in, and the node is moved to the used list. 56 * It is possible, that there are no nodes left in the free list. 57 * In this case, we will lose some information about old outcomes. We 58 * will pop the OLDEST node from the used list, and make it free. 59 * On pop, the node is searched for in the used list (using a search 60 * index). 61 * If found, the node is then removed from the used list, and moved 62 * back to the free list. The outcome data that the node contained is 63 * returned back to the user. 64 */ 65 66 spin_lock_irqsave(&outcome_store->db_lock, flags); 67 68 if (list_empty(&outcome_store->free_list)) { 69 node = list_last_entry(&outcome_store->used_list, 70 struct hl_cs_outcome, list_link); 71 hash_del(&node->map_link); 72 dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq); 73 } else { 74 node = list_last_entry(&outcome_store->free_list, 75 struct hl_cs_outcome, list_link); 76 } 77 78 list_del_init(&node->list_link); 79 80 node->seq = seq; 81 node->ts = ts; 82 node->error = error; 83 84 list_add(&node->list_link, &outcome_store->used_list); 85 hash_add(outcome_store->outcome_map, &node->map_link, node->seq); 86 87 spin_unlock_irqrestore(&outcome_store->db_lock, flags); 88 } 89 90 static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store, 91 u64 seq, ktime_t *ts, int *error) 92 { 93 struct hl_cs_outcome *node; 94 unsigned long flags; 95 96 spin_lock_irqsave(&outcome_store->db_lock, flags); 97 98 hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq) 99 if (node->seq == seq) { 100 *ts = node->ts; 101 *error = node->error; 102 103 hash_del(&node->map_link); 104 list_del_init(&node->list_link); 105 list_add(&node->list_link, &outcome_store->free_list); 106 107 spin_unlock_irqrestore(&outcome_store->db_lock, flags); 108 109 return true; 110 } 111 112 spin_unlock_irqrestore(&outcome_store->db_lock, flags); 113 114 return false; 115 } 116 117 static void hl_sob_reset(struct kref *ref) 118 { 119 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob, 120 kref); 121 struct hl_device *hdev = hw_sob->hdev; 122 123 dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id); 124 125 hdev->asic_funcs->reset_sob(hdev, hw_sob); 126 127 hw_sob->need_reset = false; 128 } 129 130 void hl_sob_reset_error(struct kref *ref) 131 { 132 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob, 133 kref); 134 struct hl_device *hdev = hw_sob->hdev; 135 136 dev_crit(hdev->dev, 137 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n", 138 hw_sob->q_idx, hw_sob->sob_id); 139 } 140 141 void hw_sob_put(struct hl_hw_sob *hw_sob) 142 { 143 if (hw_sob) 144 kref_put(&hw_sob->kref, hl_sob_reset); 145 } 146 147 static void hw_sob_put_err(struct hl_hw_sob *hw_sob) 148 { 149 if (hw_sob) 150 kref_put(&hw_sob->kref, hl_sob_reset_error); 151 } 152 153 void hw_sob_get(struct hl_hw_sob *hw_sob) 154 { 155 if (hw_sob) 156 kref_get(&hw_sob->kref); 157 } 158 159 /** 160 * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet 161 * @sob_base: sob base id 162 * @sob_mask: sob user mask, each bit represents a sob offset from sob base 163 * @mask: generated mask 164 * 165 * Return: 0 if given parameters are valid 166 */ 167 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask) 168 { 169 int i; 170 171 if (sob_mask == 0) 172 return -EINVAL; 173 174 if (sob_mask == 0x1) { 175 *mask = ~(1 << (sob_base & 0x7)); 176 } else { 177 /* find msb in order to verify sob range is valid */ 178 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--) 179 if (BIT(i) & sob_mask) 180 break; 181 182 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1)) 183 return -EINVAL; 184 185 *mask = ~sob_mask; 186 } 187 188 return 0; 189 } 190 191 static void hl_fence_release(struct kref *kref) 192 { 193 struct hl_fence *fence = 194 container_of(kref, struct hl_fence, refcount); 195 struct hl_cs_compl *hl_cs_cmpl = 196 container_of(fence, struct hl_cs_compl, base_fence); 197 198 kfree(hl_cs_cmpl); 199 } 200 201 void hl_fence_put(struct hl_fence *fence) 202 { 203 if (IS_ERR_OR_NULL(fence)) 204 return; 205 kref_put(&fence->refcount, hl_fence_release); 206 } 207 208 void hl_fences_put(struct hl_fence **fence, int len) 209 { 210 int i; 211 212 for (i = 0; i < len; i++, fence++) 213 hl_fence_put(*fence); 214 } 215 216 void hl_fence_get(struct hl_fence *fence) 217 { 218 if (fence) 219 kref_get(&fence->refcount); 220 } 221 222 static void hl_fence_init(struct hl_fence *fence, u64 sequence) 223 { 224 kref_init(&fence->refcount); 225 fence->cs_sequence = sequence; 226 fence->error = 0; 227 fence->timestamp = ktime_set(0, 0); 228 fence->mcs_handling_done = false; 229 init_completion(&fence->completion); 230 } 231 232 void cs_get(struct hl_cs *cs) 233 { 234 kref_get(&cs->refcount); 235 } 236 237 static int cs_get_unless_zero(struct hl_cs *cs) 238 { 239 return kref_get_unless_zero(&cs->refcount); 240 } 241 242 static void cs_put(struct hl_cs *cs) 243 { 244 kref_put(&cs->refcount, cs_do_release); 245 } 246 247 static void cs_job_do_release(struct kref *ref) 248 { 249 struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount); 250 251 kfree(job); 252 } 253 254 static void hl_cs_job_put(struct hl_cs_job *job) 255 { 256 kref_put(&job->refcount, cs_job_do_release); 257 } 258 259 bool cs_needs_completion(struct hl_cs *cs) 260 { 261 /* In case this is a staged CS, only the last CS in sequence should 262 * get a completion, any non staged CS will always get a completion 263 */ 264 if (cs->staged_cs && !cs->staged_last) 265 return false; 266 267 return true; 268 } 269 270 bool cs_needs_timeout(struct hl_cs *cs) 271 { 272 /* In case this is a staged CS, only the first CS in sequence should 273 * get a timeout, any non staged CS will always get a timeout 274 */ 275 if (cs->staged_cs && !cs->staged_first) 276 return false; 277 278 return true; 279 } 280 281 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) 282 { 283 /* 284 * Patched CB is created for external queues jobs, and for H/W queues 285 * jobs if the user CB was allocated by driver and MMU is disabled. 286 */ 287 return (job->queue_type == QUEUE_TYPE_EXT || 288 (job->queue_type == QUEUE_TYPE_HW && 289 job->is_kernel_allocated_cb && 290 !hdev->mmu_enable)); 291 } 292 293 /* 294 * cs_parser - parse the user command submission 295 * 296 * @hpriv : pointer to the private data of the fd 297 * @job : pointer to the job that holds the command submission info 298 * 299 * The function parses the command submission of the user. It calls the 300 * ASIC specific parser, which returns a list of memory blocks to send 301 * to the device as different command buffers 302 * 303 */ 304 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) 305 { 306 struct hl_device *hdev = hpriv->hdev; 307 struct hl_cs_parser parser; 308 int rc; 309 310 parser.ctx_id = job->cs->ctx->asid; 311 parser.cs_sequence = job->cs->sequence; 312 parser.job_id = job->id; 313 314 parser.hw_queue_id = job->hw_queue_id; 315 parser.job_userptr_list = &job->userptr_list; 316 parser.patched_cb = NULL; 317 parser.user_cb = job->user_cb; 318 parser.user_cb_size = job->user_cb_size; 319 parser.queue_type = job->queue_type; 320 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb; 321 job->patched_cb = NULL; 322 parser.completion = cs_needs_completion(job->cs); 323 324 rc = hdev->asic_funcs->cs_parser(hdev, &parser); 325 326 if (is_cb_patched(hdev, job)) { 327 if (!rc) { 328 job->patched_cb = parser.patched_cb; 329 job->job_cb_size = parser.patched_cb_size; 330 job->contains_dma_pkt = parser.contains_dma_pkt; 331 atomic_inc(&job->patched_cb->cs_cnt); 332 } 333 334 /* 335 * Whether the parsing worked or not, we don't need the 336 * original CB anymore because it was already parsed and 337 * won't be accessed again for this CS 338 */ 339 atomic_dec(&job->user_cb->cs_cnt); 340 hl_cb_put(job->user_cb); 341 job->user_cb = NULL; 342 } else if (!rc) { 343 job->job_cb_size = job->user_cb_size; 344 } 345 346 return rc; 347 } 348 349 static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job) 350 { 351 struct hl_cs *cs = job->cs; 352 353 if (is_cb_patched(hdev, job)) { 354 hl_userptr_delete_list(hdev, &job->userptr_list); 355 356 /* 357 * We might arrive here from rollback and patched CB wasn't 358 * created, so we need to check it's not NULL 359 */ 360 if (job->patched_cb) { 361 atomic_dec(&job->patched_cb->cs_cnt); 362 hl_cb_put(job->patched_cb); 363 } 364 } 365 366 /* For H/W queue jobs, if a user CB was allocated by driver and MMU is 367 * enabled, the user CB isn't released in cs_parser() and thus should be 368 * released here. This is also true for INT queues jobs which were 369 * allocated by driver. 370 */ 371 if ((job->is_kernel_allocated_cb && 372 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) || 373 job->queue_type == QUEUE_TYPE_INT))) { 374 atomic_dec(&job->user_cb->cs_cnt); 375 hl_cb_put(job->user_cb); 376 } 377 378 /* 379 * This is the only place where there can be multiple threads 380 * modifying the list at the same time 381 */ 382 spin_lock(&cs->job_lock); 383 list_del(&job->cs_node); 384 spin_unlock(&cs->job_lock); 385 386 hl_debugfs_remove_job(hdev, job); 387 388 /* We decrement reference only for a CS that gets completion 389 * because the reference was incremented only for this kind of CS 390 * right before it was scheduled. 391 * 392 * In staged submission, only the last CS marked as 'staged_last' 393 * gets completion, hence its release function will be called from here. 394 * As for all the rest CS's in the staged submission which do not get 395 * completion, their CS reference will be decremented by the 396 * 'staged_last' CS during the CS release flow. 397 * All relevant PQ CI counters will be incremented during the CS release 398 * flow by calling 'hl_hw_queue_update_ci'. 399 */ 400 if (cs_needs_completion(cs) && 401 (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) { 402 403 /* In CS based completions, the timestamp is already available, 404 * so no need to extract it from job 405 */ 406 if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB) 407 cs->completion_timestamp = job->timestamp; 408 409 cs_put(cs); 410 } 411 412 hl_cs_job_put(job); 413 } 414 415 /* 416 * hl_staged_cs_find_first - locate the first CS in this staged submission 417 * 418 * @hdev: pointer to device structure 419 * @cs_seq: staged submission sequence number 420 * 421 * @note: This function must be called under 'hdev->cs_mirror_lock' 422 * 423 * Find and return a CS pointer with the given sequence 424 */ 425 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq) 426 { 427 struct hl_cs *cs; 428 429 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node) 430 if (cs->staged_cs && cs->staged_first && 431 cs->sequence == cs_seq) 432 return cs; 433 434 return NULL; 435 } 436 437 /* 438 * is_staged_cs_last_exists - returns true if the last CS in sequence exists 439 * 440 * @hdev: pointer to device structure 441 * @cs: staged submission member 442 * 443 */ 444 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs) 445 { 446 struct hl_cs *last_entry; 447 448 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs, 449 staged_cs_node); 450 451 if (last_entry->staged_last) 452 return true; 453 454 return false; 455 } 456 457 /* 458 * staged_cs_get - get CS reference if this CS is a part of a staged CS 459 * 460 * @hdev: pointer to device structure 461 * @cs: current CS 462 * @cs_seq: staged submission sequence number 463 * 464 * Increment CS reference for every CS in this staged submission except for 465 * the CS which get completion. 466 */ 467 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs) 468 { 469 /* Only the last CS in this staged submission will get a completion. 470 * We must increment the reference for all other CS's in this 471 * staged submission. 472 * Once we get a completion we will release the whole staged submission. 473 */ 474 if (!cs->staged_last) 475 cs_get(cs); 476 } 477 478 /* 479 * staged_cs_put - put a CS in case it is part of staged submission 480 * 481 * @hdev: pointer to device structure 482 * @cs: CS to put 483 * 484 * This function decrements a CS reference (for a non completion CS) 485 */ 486 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs) 487 { 488 /* We release all CS's in a staged submission except the last 489 * CS which we have never incremented its reference. 490 */ 491 if (!cs_needs_completion(cs)) 492 cs_put(cs); 493 } 494 495 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) 496 { 497 struct hl_cs *next = NULL, *iter, *first_cs; 498 499 if (!cs_needs_timeout(cs)) 500 return; 501 502 spin_lock(&hdev->cs_mirror_lock); 503 504 /* We need to handle tdr only once for the complete staged submission. 505 * Hence, we choose the CS that reaches this function first which is 506 * the CS marked as 'staged_last'. 507 * In case single staged cs was submitted which has both first and last 508 * indications, then "cs_find_first" below will return NULL, since we 509 * removed the cs node from the list before getting here, 510 * in such cases just continue with the cs to cancel it's TDR work. 511 */ 512 if (cs->staged_cs && cs->staged_last) { 513 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); 514 if (first_cs) 515 cs = first_cs; 516 } 517 518 spin_unlock(&hdev->cs_mirror_lock); 519 520 /* Don't cancel TDR in case this CS was timedout because we might be 521 * running from the TDR context 522 */ 523 if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT) 524 return; 525 526 if (cs->tdr_active) 527 cancel_delayed_work_sync(&cs->work_tdr); 528 529 spin_lock(&hdev->cs_mirror_lock); 530 531 /* queue TDR for next CS */ 532 list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node) 533 if (cs_needs_timeout(iter)) { 534 next = iter; 535 break; 536 } 537 538 if (next && !next->tdr_active) { 539 next->tdr_active = true; 540 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies); 541 } 542 543 spin_unlock(&hdev->cs_mirror_lock); 544 } 545 546 /* 547 * force_complete_multi_cs - complete all contexts that wait on multi-CS 548 * 549 * @hdev: pointer to habanalabs device structure 550 */ 551 static void force_complete_multi_cs(struct hl_device *hdev) 552 { 553 int i; 554 555 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) { 556 struct multi_cs_completion *mcs_compl; 557 558 mcs_compl = &hdev->multi_cs_completion[i]; 559 560 spin_lock(&mcs_compl->lock); 561 562 if (!mcs_compl->used) { 563 spin_unlock(&mcs_compl->lock); 564 continue; 565 } 566 567 /* when calling force complete no context should be waiting on 568 * multi-cS. 569 * We are calling the function as a protection for such case 570 * to free any pending context and print error message 571 */ 572 dev_err(hdev->dev, 573 "multi-CS completion context %d still waiting when calling force completion\n", 574 i); 575 complete_all(&mcs_compl->completion); 576 spin_unlock(&mcs_compl->lock); 577 } 578 } 579 580 /* 581 * complete_multi_cs - complete all waiting entities on multi-CS 582 * 583 * @hdev: pointer to habanalabs device structure 584 * @cs: CS structure 585 * The function signals a waiting entity that has an overlapping stream masters 586 * with the completed CS. 587 * For example: 588 * - a completed CS worked on stream master QID 4, multi CS completion 589 * is actively waiting on stream master QIDs 3, 5. don't send signal as no 590 * common stream master QID 591 * - a completed CS worked on stream master QID 4, multi CS completion 592 * is actively waiting on stream master QIDs 3, 4. send signal as stream 593 * master QID 4 is common 594 */ 595 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs) 596 { 597 struct hl_fence *fence = cs->fence; 598 int i; 599 600 /* in case of multi CS check for completion only for the first CS */ 601 if (cs->staged_cs && !cs->staged_first) 602 return; 603 604 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) { 605 struct multi_cs_completion *mcs_compl; 606 607 mcs_compl = &hdev->multi_cs_completion[i]; 608 if (!mcs_compl->used) 609 continue; 610 611 spin_lock(&mcs_compl->lock); 612 613 /* 614 * complete if: 615 * 1. still waiting for completion 616 * 2. the completed CS has at least one overlapping stream 617 * master with the stream masters in the completion 618 */ 619 if (mcs_compl->used && 620 (fence->stream_master_qid_map & 621 mcs_compl->stream_master_qid_map)) { 622 /* extract the timestamp only of first completed CS */ 623 if (!mcs_compl->timestamp) 624 mcs_compl->timestamp = ktime_to_ns(fence->timestamp); 625 626 complete_all(&mcs_compl->completion); 627 628 /* 629 * Setting mcs_handling_done inside the lock ensures 630 * at least one fence have mcs_handling_done set to 631 * true before wait for mcs finish. This ensures at 632 * least one CS will be set as completed when polling 633 * mcs fences. 634 */ 635 fence->mcs_handling_done = true; 636 } 637 638 spin_unlock(&mcs_compl->lock); 639 } 640 /* In case CS completed without mcs completion initialized */ 641 fence->mcs_handling_done = true; 642 } 643 644 static inline void cs_release_sob_reset_handler(struct hl_device *hdev, 645 struct hl_cs *cs, 646 struct hl_cs_compl *hl_cs_cmpl) 647 { 648 /* Skip this handler if the cs wasn't submitted, to avoid putting 649 * the hw_sob twice, since this case already handled at this point, 650 * also skip if the hw_sob pointer wasn't set. 651 */ 652 if (!hl_cs_cmpl->hw_sob || !cs->submitted) 653 return; 654 655 spin_lock(&hl_cs_cmpl->lock); 656 657 /* 658 * we get refcount upon reservation of signals or signal/wait cs for the 659 * hw_sob object, and need to put it when the first staged cs 660 * (which contains the encaps signals) or cs signal/wait is completed. 661 */ 662 if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) || 663 (hl_cs_cmpl->type == CS_TYPE_WAIT) || 664 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) || 665 (!!hl_cs_cmpl->encaps_signals)) { 666 dev_dbg(hdev->dev, 667 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n", 668 hl_cs_cmpl->cs_seq, 669 hl_cs_cmpl->type, 670 hl_cs_cmpl->hw_sob->sob_id, 671 hl_cs_cmpl->sob_val); 672 673 hw_sob_put(hl_cs_cmpl->hw_sob); 674 675 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) 676 hdev->asic_funcs->reset_sob_group(hdev, 677 hl_cs_cmpl->sob_group); 678 } 679 680 spin_unlock(&hl_cs_cmpl->lock); 681 } 682 683 static void cs_do_release(struct kref *ref) 684 { 685 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount); 686 struct hl_device *hdev = cs->ctx->hdev; 687 struct hl_cs_job *job, *tmp; 688 struct hl_cs_compl *hl_cs_cmpl = 689 container_of(cs->fence, struct hl_cs_compl, base_fence); 690 691 cs->completed = true; 692 693 /* 694 * Although if we reached here it means that all external jobs have 695 * finished, because each one of them took refcnt to CS, we still 696 * need to go over the internal jobs and complete them. Otherwise, we 697 * will have leaked memory and what's worse, the CS object (and 698 * potentially the CTX object) could be released, while the JOB 699 * still holds a pointer to them (but no reference). 700 */ 701 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) 702 hl_complete_job(hdev, job); 703 704 if (!cs->submitted) { 705 /* 706 * In case the wait for signal CS was submitted, the fence put 707 * occurs in init_signal_wait_cs() or collective_wait_init_cs() 708 * right before hanging on the PQ. 709 */ 710 if (cs->type == CS_TYPE_WAIT || 711 cs->type == CS_TYPE_COLLECTIVE_WAIT) 712 hl_fence_put(cs->signal_fence); 713 714 goto out; 715 } 716 717 /* Need to update CI for all queue jobs that does not get completion */ 718 hl_hw_queue_update_ci(cs); 719 720 /* remove CS from CS mirror list */ 721 spin_lock(&hdev->cs_mirror_lock); 722 list_del_init(&cs->mirror_node); 723 spin_unlock(&hdev->cs_mirror_lock); 724 725 cs_handle_tdr(hdev, cs); 726 727 if (cs->staged_cs) { 728 /* the completion CS decrements reference for the entire 729 * staged submission 730 */ 731 if (cs->staged_last) { 732 struct hl_cs *staged_cs, *tmp_cs; 733 734 list_for_each_entry_safe(staged_cs, tmp_cs, 735 &cs->staged_cs_node, staged_cs_node) 736 staged_cs_put(hdev, staged_cs); 737 } 738 739 /* A staged CS will be a member in the list only after it 740 * was submitted. We used 'cs_mirror_lock' when inserting 741 * it to list so we will use it again when removing it 742 */ 743 if (cs->submitted) { 744 spin_lock(&hdev->cs_mirror_lock); 745 list_del(&cs->staged_cs_node); 746 spin_unlock(&hdev->cs_mirror_lock); 747 } 748 749 /* decrement refcount to handle when first staged cs 750 * with encaps signals is completed. 751 */ 752 if (hl_cs_cmpl->encaps_signals) 753 kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount, 754 hl_encaps_release_handle_and_put_ctx); 755 } 756 757 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals) 758 kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx); 759 760 out: 761 /* Must be called before hl_ctx_put because inside we use ctx to get 762 * the device 763 */ 764 hl_debugfs_remove_cs(cs); 765 766 hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL; 767 768 /* We need to mark an error for not submitted because in that case 769 * the hl fence release flow is different. Mainly, we don't need 770 * to handle hw_sob for signal/wait 771 */ 772 if (cs->timedout) 773 cs->fence->error = -ETIMEDOUT; 774 else if (cs->aborted) 775 cs->fence->error = -EIO; 776 else if (!cs->submitted) 777 cs->fence->error = -EBUSY; 778 779 if (unlikely(cs->skip_reset_on_timeout)) { 780 dev_err(hdev->dev, 781 "Command submission %llu completed after %llu (s)\n", 782 cs->sequence, 783 div_u64(jiffies - cs->submission_time_jiffies, HZ)); 784 } 785 786 if (cs->timestamp) { 787 cs->fence->timestamp = cs->completion_timestamp; 788 hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence, 789 cs->fence->timestamp, cs->fence->error); 790 } 791 792 hl_ctx_put(cs->ctx); 793 794 complete_all(&cs->fence->completion); 795 complete_multi_cs(hdev, cs); 796 797 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl); 798 799 hl_fence_put(cs->fence); 800 801 kfree(cs->jobs_in_queue_cnt); 802 kfree(cs); 803 } 804 805 static void cs_timedout(struct work_struct *work) 806 { 807 struct hl_device *hdev; 808 u64 event_mask = 0x0; 809 int rc; 810 struct hl_cs *cs = container_of(work, struct hl_cs, 811 work_tdr.work); 812 bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false; 813 814 rc = cs_get_unless_zero(cs); 815 if (!rc) 816 return; 817 818 if ((!cs->submitted) || (cs->completed)) { 819 cs_put(cs); 820 return; 821 } 822 823 hdev = cs->ctx->hdev; 824 825 if (likely(!skip_reset_on_timeout)) { 826 if (hdev->reset_on_lockup) 827 device_reset = true; 828 else 829 hdev->reset_info.needs_reset = true; 830 831 /* Mark the CS is timed out so we won't try to cancel its TDR */ 832 cs->timedout = true; 833 } 834 835 /* Save only the first CS timeout parameters */ 836 rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0); 837 if (rc) { 838 hdev->captured_err_info.cs_timeout.timestamp = ktime_get(); 839 hdev->captured_err_info.cs_timeout.seq = cs->sequence; 840 event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT; 841 } 842 843 switch (cs->type) { 844 case CS_TYPE_SIGNAL: 845 dev_err(hdev->dev, 846 "Signal command submission %llu has not finished in time!\n", 847 cs->sequence); 848 break; 849 850 case CS_TYPE_WAIT: 851 dev_err(hdev->dev, 852 "Wait command submission %llu has not finished in time!\n", 853 cs->sequence); 854 break; 855 856 case CS_TYPE_COLLECTIVE_WAIT: 857 dev_err(hdev->dev, 858 "Collective Wait command submission %llu has not finished in time!\n", 859 cs->sequence); 860 break; 861 862 default: 863 dev_err(hdev->dev, 864 "Command submission %llu has not finished in time!\n", 865 cs->sequence); 866 break; 867 } 868 869 rc = hl_state_dump(hdev); 870 if (rc) 871 dev_err(hdev->dev, "Error during system state dump %d\n", rc); 872 873 cs_put(cs); 874 875 if (device_reset) { 876 event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET; 877 hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask); 878 } else if (event_mask) { 879 hl_notifier_event_send_all(hdev, event_mask); 880 } 881 } 882 883 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, 884 enum hl_cs_type cs_type, u64 user_sequence, 885 struct hl_cs **cs_new, u32 flags, u32 timeout) 886 { 887 struct hl_cs_counters_atomic *cntr; 888 struct hl_fence *other = NULL; 889 struct hl_cs_compl *cs_cmpl; 890 struct hl_cs *cs; 891 int rc; 892 893 cntr = &hdev->aggregated_cs_counters; 894 895 cs = kzalloc(sizeof(*cs), GFP_ATOMIC); 896 if (!cs) 897 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 898 899 if (!cs) { 900 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 901 atomic64_inc(&cntr->out_of_mem_drop_cnt); 902 return -ENOMEM; 903 } 904 905 /* increment refcnt for context */ 906 hl_ctx_get(ctx); 907 908 cs->ctx = ctx; 909 cs->submitted = false; 910 cs->completed = false; 911 cs->type = cs_type; 912 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP); 913 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS); 914 cs->timeout_jiffies = timeout; 915 cs->skip_reset_on_timeout = 916 hdev->reset_info.skip_reset_on_timeout || 917 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT); 918 cs->submission_time_jiffies = jiffies; 919 INIT_LIST_HEAD(&cs->job_list); 920 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); 921 kref_init(&cs->refcount); 922 spin_lock_init(&cs->job_lock); 923 924 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC); 925 if (!cs_cmpl) 926 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL); 927 928 if (!cs_cmpl) { 929 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 930 atomic64_inc(&cntr->out_of_mem_drop_cnt); 931 rc = -ENOMEM; 932 goto free_cs; 933 } 934 935 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, 936 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC); 937 if (!cs->jobs_in_queue_cnt) 938 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, 939 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL); 940 941 if (!cs->jobs_in_queue_cnt) { 942 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 943 atomic64_inc(&cntr->out_of_mem_drop_cnt); 944 rc = -ENOMEM; 945 goto free_cs_cmpl; 946 } 947 948 cs_cmpl->hdev = hdev; 949 cs_cmpl->type = cs->type; 950 spin_lock_init(&cs_cmpl->lock); 951 cs->fence = &cs_cmpl->base_fence; 952 953 spin_lock(&ctx->cs_lock); 954 955 cs_cmpl->cs_seq = ctx->cs_sequence; 956 other = ctx->cs_pending[cs_cmpl->cs_seq & 957 (hdev->asic_prop.max_pending_cs - 1)]; 958 959 if (other && !completion_done(&other->completion)) { 960 /* If the following statement is true, it means we have reached 961 * a point in which only part of the staged submission was 962 * submitted and we don't have enough room in the 'cs_pending' 963 * array for the rest of the submission. 964 * This causes a deadlock because this CS will never be 965 * completed as it depends on future CS's for completion. 966 */ 967 if (other->cs_sequence == user_sequence) 968 dev_crit_ratelimited(hdev->dev, 969 "Staged CS %llu deadlock due to lack of resources", 970 user_sequence); 971 972 dev_dbg_ratelimited(hdev->dev, 973 "Rejecting CS because of too many in-flights CS\n"); 974 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt); 975 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt); 976 rc = -EAGAIN; 977 goto free_fence; 978 } 979 980 /* init hl_fence */ 981 hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq); 982 983 cs->sequence = cs_cmpl->cs_seq; 984 985 ctx->cs_pending[cs_cmpl->cs_seq & 986 (hdev->asic_prop.max_pending_cs - 1)] = 987 &cs_cmpl->base_fence; 988 ctx->cs_sequence++; 989 990 hl_fence_get(&cs_cmpl->base_fence); 991 992 hl_fence_put(other); 993 994 spin_unlock(&ctx->cs_lock); 995 996 *cs_new = cs; 997 998 return 0; 999 1000 free_fence: 1001 spin_unlock(&ctx->cs_lock); 1002 kfree(cs->jobs_in_queue_cnt); 1003 free_cs_cmpl: 1004 kfree(cs_cmpl); 1005 free_cs: 1006 kfree(cs); 1007 hl_ctx_put(ctx); 1008 return rc; 1009 } 1010 1011 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) 1012 { 1013 struct hl_cs_job *job, *tmp; 1014 1015 staged_cs_put(hdev, cs); 1016 1017 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) 1018 hl_complete_job(hdev, job); 1019 } 1020 1021 /* 1022 * release_reserved_encaps_signals() - release reserved encapsulated signals. 1023 * @hdev: pointer to habanalabs device structure 1024 * 1025 * Release reserved encapsulated signals which weren't un-reserved, or for which a CS with 1026 * encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back. 1027 * For these signals need also to put the refcount of the H/W SOB which was taken at the 1028 * reservation. 1029 */ 1030 static void release_reserved_encaps_signals(struct hl_device *hdev) 1031 { 1032 struct hl_ctx *ctx = hl_get_compute_ctx(hdev); 1033 struct hl_cs_encaps_sig_handle *handle; 1034 struct hl_encaps_signals_mgr *mgr; 1035 u32 id; 1036 1037 if (!ctx) 1038 return; 1039 1040 mgr = &ctx->sig_mgr; 1041 1042 idr_for_each_entry(&mgr->handles, handle, id) 1043 if (handle->cs_seq == ULLONG_MAX) 1044 kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx); 1045 1046 hl_ctx_put(ctx); 1047 } 1048 1049 void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush) 1050 { 1051 int i; 1052 struct hl_cs *cs, *tmp; 1053 1054 if (!skip_wq_flush) { 1055 flush_workqueue(hdev->ts_free_obj_wq); 1056 1057 /* flush all completions before iterating over the CS mirror list in 1058 * order to avoid a race with the release functions 1059 */ 1060 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) 1061 flush_workqueue(hdev->cq_wq[i]); 1062 1063 flush_workqueue(hdev->cs_cmplt_wq); 1064 } 1065 1066 /* Make sure we don't have leftovers in the CS mirror list */ 1067 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) { 1068 cs_get(cs); 1069 cs->aborted = true; 1070 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", 1071 cs->ctx->asid, cs->sequence); 1072 cs_rollback(hdev, cs); 1073 cs_put(cs); 1074 } 1075 1076 force_complete_multi_cs(hdev); 1077 1078 release_reserved_encaps_signals(hdev); 1079 } 1080 1081 static void 1082 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt) 1083 { 1084 struct hl_user_pending_interrupt *pend, *temp; 1085 1086 spin_lock(&interrupt->wait_list_lock); 1087 list_for_each_entry_safe(pend, temp, &interrupt->wait_list_head, wait_list_node) { 1088 if (pend->ts_reg_info.buf) { 1089 list_del(&pend->wait_list_node); 1090 hl_mmap_mem_buf_put(pend->ts_reg_info.buf); 1091 hl_cb_put(pend->ts_reg_info.cq_cb); 1092 } else { 1093 pend->fence.error = -EIO; 1094 complete_all(&pend->fence.completion); 1095 } 1096 } 1097 spin_unlock(&interrupt->wait_list_lock); 1098 } 1099 1100 void hl_release_pending_user_interrupts(struct hl_device *hdev) 1101 { 1102 struct asic_fixed_properties *prop = &hdev->asic_prop; 1103 struct hl_user_interrupt *interrupt; 1104 int i; 1105 1106 if (!prop->user_interrupt_count) 1107 return; 1108 1109 /* We iterate through the user interrupt requests and waking up all 1110 * user threads waiting for interrupt completion. We iterate the 1111 * list under a lock, this is why all user threads, once awake, 1112 * will wait on the same lock and will release the waiting object upon 1113 * unlock. 1114 */ 1115 1116 for (i = 0 ; i < prop->user_interrupt_count ; i++) { 1117 interrupt = &hdev->user_interrupt[i]; 1118 wake_pending_user_interrupt_threads(interrupt); 1119 } 1120 1121 interrupt = &hdev->common_user_cq_interrupt; 1122 wake_pending_user_interrupt_threads(interrupt); 1123 1124 interrupt = &hdev->common_decoder_interrupt; 1125 wake_pending_user_interrupt_threads(interrupt); 1126 } 1127 1128 static void force_complete_cs(struct hl_device *hdev) 1129 { 1130 struct hl_cs *cs; 1131 1132 spin_lock(&hdev->cs_mirror_lock); 1133 1134 list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) { 1135 cs->fence->error = -EIO; 1136 complete_all(&cs->fence->completion); 1137 } 1138 1139 spin_unlock(&hdev->cs_mirror_lock); 1140 } 1141 1142 void hl_abort_waiting_for_cs_completions(struct hl_device *hdev) 1143 { 1144 force_complete_cs(hdev); 1145 force_complete_multi_cs(hdev); 1146 } 1147 1148 static void job_wq_completion(struct work_struct *work) 1149 { 1150 struct hl_cs_job *job = container_of(work, struct hl_cs_job, 1151 finish_work); 1152 struct hl_cs *cs = job->cs; 1153 struct hl_device *hdev = cs->ctx->hdev; 1154 1155 /* job is no longer needed */ 1156 hl_complete_job(hdev, job); 1157 } 1158 1159 static void cs_completion(struct work_struct *work) 1160 { 1161 struct hl_cs *cs = container_of(work, struct hl_cs, finish_work); 1162 struct hl_device *hdev = cs->ctx->hdev; 1163 struct hl_cs_job *job, *tmp; 1164 1165 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) 1166 hl_complete_job(hdev, job); 1167 } 1168 1169 u32 hl_get_active_cs_num(struct hl_device *hdev) 1170 { 1171 u32 active_cs_num = 0; 1172 struct hl_cs *cs; 1173 1174 spin_lock(&hdev->cs_mirror_lock); 1175 1176 list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) 1177 if (!cs->completed) 1178 active_cs_num++; 1179 1180 spin_unlock(&hdev->cs_mirror_lock); 1181 1182 return active_cs_num; 1183 } 1184 1185 static int validate_queue_index(struct hl_device *hdev, 1186 struct hl_cs_chunk *chunk, 1187 enum hl_queue_type *queue_type, 1188 bool *is_kernel_allocated_cb) 1189 { 1190 struct asic_fixed_properties *asic = &hdev->asic_prop; 1191 struct hw_queue_properties *hw_queue_prop; 1192 1193 /* This must be checked here to prevent out-of-bounds access to 1194 * hw_queues_props array 1195 */ 1196 if (chunk->queue_index >= asic->max_queues) { 1197 dev_err(hdev->dev, "Queue index %d is invalid\n", 1198 chunk->queue_index); 1199 return -EINVAL; 1200 } 1201 1202 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; 1203 1204 if (hw_queue_prop->type == QUEUE_TYPE_NA) { 1205 dev_err(hdev->dev, "Queue index %d is not applicable\n", 1206 chunk->queue_index); 1207 return -EINVAL; 1208 } 1209 1210 if (hw_queue_prop->binned) { 1211 dev_err(hdev->dev, "Queue index %d is binned out\n", 1212 chunk->queue_index); 1213 return -EINVAL; 1214 } 1215 1216 if (hw_queue_prop->driver_only) { 1217 dev_err(hdev->dev, 1218 "Queue index %d is restricted for the kernel driver\n", 1219 chunk->queue_index); 1220 return -EINVAL; 1221 } 1222 1223 /* When hw queue type isn't QUEUE_TYPE_HW, 1224 * USER_ALLOC_CB flag shall be referred as "don't care". 1225 */ 1226 if (hw_queue_prop->type == QUEUE_TYPE_HW) { 1227 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) { 1228 if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) { 1229 dev_err(hdev->dev, 1230 "Queue index %d doesn't support user CB\n", 1231 chunk->queue_index); 1232 return -EINVAL; 1233 } 1234 1235 *is_kernel_allocated_cb = false; 1236 } else { 1237 if (!(hw_queue_prop->cb_alloc_flags & 1238 CB_ALLOC_KERNEL)) { 1239 dev_err(hdev->dev, 1240 "Queue index %d doesn't support kernel CB\n", 1241 chunk->queue_index); 1242 return -EINVAL; 1243 } 1244 1245 *is_kernel_allocated_cb = true; 1246 } 1247 } else { 1248 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags 1249 & CB_ALLOC_KERNEL); 1250 } 1251 1252 *queue_type = hw_queue_prop->type; 1253 return 0; 1254 } 1255 1256 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev, 1257 struct hl_mem_mgr *mmg, 1258 struct hl_cs_chunk *chunk) 1259 { 1260 struct hl_cb *cb; 1261 1262 cb = hl_cb_get(mmg, chunk->cb_handle); 1263 if (!cb) { 1264 dev_err(hdev->dev, "CB handle 0x%llx invalid\n", chunk->cb_handle); 1265 return NULL; 1266 } 1267 1268 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) { 1269 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size); 1270 goto release_cb; 1271 } 1272 1273 atomic_inc(&cb->cs_cnt); 1274 1275 return cb; 1276 1277 release_cb: 1278 hl_cb_put(cb); 1279 return NULL; 1280 } 1281 1282 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, 1283 enum hl_queue_type queue_type, bool is_kernel_allocated_cb) 1284 { 1285 struct hl_cs_job *job; 1286 1287 job = kzalloc(sizeof(*job), GFP_ATOMIC); 1288 if (!job) 1289 job = kzalloc(sizeof(*job), GFP_KERNEL); 1290 1291 if (!job) 1292 return NULL; 1293 1294 kref_init(&job->refcount); 1295 job->queue_type = queue_type; 1296 job->is_kernel_allocated_cb = is_kernel_allocated_cb; 1297 1298 if (is_cb_patched(hdev, job)) 1299 INIT_LIST_HEAD(&job->userptr_list); 1300 1301 if (job->queue_type == QUEUE_TYPE_EXT) 1302 INIT_WORK(&job->finish_work, job_wq_completion); 1303 1304 return job; 1305 } 1306 1307 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags) 1308 { 1309 if (cs_type_flags & HL_CS_FLAGS_SIGNAL) 1310 return CS_TYPE_SIGNAL; 1311 else if (cs_type_flags & HL_CS_FLAGS_WAIT) 1312 return CS_TYPE_WAIT; 1313 else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT) 1314 return CS_TYPE_COLLECTIVE_WAIT; 1315 else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY) 1316 return CS_RESERVE_SIGNALS; 1317 else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY) 1318 return CS_UNRESERVE_SIGNALS; 1319 else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND) 1320 return CS_TYPE_ENGINE_CORE; 1321 else if (cs_type_flags & HL_CS_FLAGS_ENGINES_COMMAND) 1322 return CS_TYPE_ENGINES; 1323 else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES) 1324 return CS_TYPE_FLUSH_PCI_HBW_WRITES; 1325 else 1326 return CS_TYPE_DEFAULT; 1327 } 1328 1329 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) 1330 { 1331 struct hl_device *hdev = hpriv->hdev; 1332 struct hl_ctx *ctx = hpriv->ctx; 1333 u32 cs_type_flags, num_chunks; 1334 enum hl_device_status status; 1335 enum hl_cs_type cs_type; 1336 bool is_sync_stream; 1337 int i; 1338 1339 for (i = 0 ; i < sizeof(args->in.pad) ; i++) 1340 if (args->in.pad[i]) { 1341 dev_dbg(hdev->dev, "Padding bytes must be 0\n"); 1342 return -EINVAL; 1343 } 1344 1345 if (!hl_device_operational(hdev, &status)) { 1346 return -EBUSY; 1347 } 1348 1349 if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) && 1350 !hdev->supports_staged_submission) { 1351 dev_err(hdev->dev, "staged submission not supported"); 1352 return -EPERM; 1353 } 1354 1355 cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK; 1356 1357 if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) { 1358 dev_err(hdev->dev, 1359 "CS type flags are mutually exclusive, context %d\n", 1360 ctx->asid); 1361 return -EINVAL; 1362 } 1363 1364 cs_type = hl_cs_get_cs_type(cs_type_flags); 1365 num_chunks = args->in.num_chunks_execute; 1366 1367 is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT || 1368 cs_type == CS_TYPE_COLLECTIVE_WAIT); 1369 1370 if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) { 1371 dev_err(hdev->dev, "Sync stream CS is not supported\n"); 1372 return -EINVAL; 1373 } 1374 1375 if (cs_type == CS_TYPE_DEFAULT) { 1376 if (!num_chunks) { 1377 dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid); 1378 return -EINVAL; 1379 } 1380 } else if (is_sync_stream && num_chunks != 1) { 1381 dev_err(hdev->dev, 1382 "Sync stream CS mandates one chunk only, context %d\n", 1383 ctx->asid); 1384 return -EINVAL; 1385 } 1386 1387 return 0; 1388 } 1389 1390 static int hl_cs_copy_chunk_array(struct hl_device *hdev, 1391 struct hl_cs_chunk **cs_chunk_array, 1392 void __user *chunks, u32 num_chunks, 1393 struct hl_ctx *ctx) 1394 { 1395 u32 size_to_copy; 1396 1397 if (num_chunks > HL_MAX_JOBS_PER_CS) { 1398 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1399 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt); 1400 dev_err(hdev->dev, 1401 "Number of chunks can NOT be larger than %d\n", 1402 HL_MAX_JOBS_PER_CS); 1403 return -EINVAL; 1404 } 1405 1406 *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array), 1407 GFP_ATOMIC); 1408 if (!*cs_chunk_array) 1409 *cs_chunk_array = kmalloc_array(num_chunks, 1410 sizeof(**cs_chunk_array), GFP_KERNEL); 1411 if (!*cs_chunk_array) { 1412 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1413 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt); 1414 return -ENOMEM; 1415 } 1416 1417 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); 1418 if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) { 1419 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1420 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt); 1421 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); 1422 kfree(*cs_chunk_array); 1423 return -EFAULT; 1424 } 1425 1426 return 0; 1427 } 1428 1429 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs, 1430 u64 sequence, u32 flags, 1431 u32 encaps_signal_handle) 1432 { 1433 if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION)) 1434 return 0; 1435 1436 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST); 1437 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST); 1438 1439 if (cs->staged_first) { 1440 /* Staged CS sequence is the first CS sequence */ 1441 INIT_LIST_HEAD(&cs->staged_cs_node); 1442 cs->staged_sequence = cs->sequence; 1443 1444 if (cs->encaps_signals) 1445 cs->encaps_sig_hdl_id = encaps_signal_handle; 1446 } else { 1447 /* User sequence will be validated in 'hl_hw_queue_schedule_cs' 1448 * under the cs_mirror_lock 1449 */ 1450 cs->staged_sequence = sequence; 1451 } 1452 1453 /* Increment CS reference if needed */ 1454 staged_cs_get(hdev, cs); 1455 1456 cs->staged_cs = true; 1457 1458 return 0; 1459 } 1460 1461 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid) 1462 { 1463 int i; 1464 1465 for (i = 0; i < hdev->stream_master_qid_arr_size; i++) 1466 if (qid == hdev->stream_master_qid_arr[i]) 1467 return BIT(i); 1468 1469 return 0; 1470 } 1471 1472 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, 1473 u32 num_chunks, u64 *cs_seq, u32 flags, 1474 u32 encaps_signals_handle, u32 timeout, 1475 u16 *signal_initial_sob_count) 1476 { 1477 bool staged_mid, int_queues_only = true, using_hw_queues = false; 1478 struct hl_device *hdev = hpriv->hdev; 1479 struct hl_cs_chunk *cs_chunk_array; 1480 struct hl_cs_counters_atomic *cntr; 1481 struct hl_ctx *ctx = hpriv->ctx; 1482 struct hl_cs_job *job; 1483 struct hl_cs *cs; 1484 struct hl_cb *cb; 1485 u64 user_sequence; 1486 u8 stream_master_qid_map = 0; 1487 int rc, i; 1488 1489 cntr = &hdev->aggregated_cs_counters; 1490 user_sequence = *cs_seq; 1491 *cs_seq = ULLONG_MAX; 1492 1493 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks, 1494 hpriv->ctx); 1495 if (rc) 1496 goto out; 1497 1498 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) && 1499 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST)) 1500 staged_mid = true; 1501 else 1502 staged_mid = false; 1503 1504 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, 1505 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags, 1506 timeout); 1507 if (rc) 1508 goto free_cs_chunk_array; 1509 1510 *cs_seq = cs->sequence; 1511 1512 hl_debugfs_add_cs(cs); 1513 1514 rc = cs_staged_submission(hdev, cs, user_sequence, flags, 1515 encaps_signals_handle); 1516 if (rc) 1517 goto free_cs_object; 1518 1519 /* If this is a staged submission we must return the staged sequence 1520 * rather than the internal CS sequence 1521 */ 1522 if (cs->staged_cs) 1523 *cs_seq = cs->staged_sequence; 1524 1525 /* Validate ALL the CS chunks before submitting the CS */ 1526 for (i = 0 ; i < num_chunks ; i++) { 1527 struct hl_cs_chunk *chunk = &cs_chunk_array[i]; 1528 enum hl_queue_type queue_type; 1529 bool is_kernel_allocated_cb; 1530 1531 rc = validate_queue_index(hdev, chunk, &queue_type, 1532 &is_kernel_allocated_cb); 1533 if (rc) { 1534 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1535 atomic64_inc(&cntr->validation_drop_cnt); 1536 goto free_cs_object; 1537 } 1538 1539 if (is_kernel_allocated_cb) { 1540 cb = get_cb_from_cs_chunk(hdev, &hpriv->mem_mgr, chunk); 1541 if (!cb) { 1542 atomic64_inc( 1543 &ctx->cs_counters.validation_drop_cnt); 1544 atomic64_inc(&cntr->validation_drop_cnt); 1545 rc = -EINVAL; 1546 goto free_cs_object; 1547 } 1548 } else { 1549 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle; 1550 } 1551 1552 if (queue_type == QUEUE_TYPE_EXT || 1553 queue_type == QUEUE_TYPE_HW) { 1554 int_queues_only = false; 1555 1556 /* 1557 * store which stream are being used for external/HW 1558 * queues of this CS 1559 */ 1560 if (hdev->supports_wait_for_multi_cs) 1561 stream_master_qid_map |= 1562 get_stream_master_qid_mask(hdev, 1563 chunk->queue_index); 1564 } 1565 1566 if (queue_type == QUEUE_TYPE_HW) 1567 using_hw_queues = true; 1568 1569 job = hl_cs_allocate_job(hdev, queue_type, 1570 is_kernel_allocated_cb); 1571 if (!job) { 1572 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1573 atomic64_inc(&cntr->out_of_mem_drop_cnt); 1574 dev_err(hdev->dev, "Failed to allocate a new job\n"); 1575 rc = -ENOMEM; 1576 if (is_kernel_allocated_cb) 1577 goto release_cb; 1578 1579 goto free_cs_object; 1580 } 1581 1582 job->id = i + 1; 1583 job->cs = cs; 1584 job->user_cb = cb; 1585 job->user_cb_size = chunk->cb_size; 1586 job->hw_queue_id = chunk->queue_index; 1587 1588 cs->jobs_in_queue_cnt[job->hw_queue_id]++; 1589 cs->jobs_cnt++; 1590 1591 list_add_tail(&job->cs_node, &cs->job_list); 1592 1593 /* 1594 * Increment CS reference. When CS reference is 0, CS is 1595 * done and can be signaled to user and free all its resources 1596 * Only increment for JOB on external or H/W queues, because 1597 * only for those JOBs we get completion 1598 */ 1599 if (cs_needs_completion(cs) && 1600 (job->queue_type == QUEUE_TYPE_EXT || 1601 job->queue_type == QUEUE_TYPE_HW)) 1602 cs_get(cs); 1603 1604 hl_debugfs_add_job(hdev, job); 1605 1606 rc = cs_parser(hpriv, job); 1607 if (rc) { 1608 atomic64_inc(&ctx->cs_counters.parsing_drop_cnt); 1609 atomic64_inc(&cntr->parsing_drop_cnt); 1610 dev_err(hdev->dev, 1611 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", 1612 cs->ctx->asid, cs->sequence, job->id, rc); 1613 goto free_cs_object; 1614 } 1615 } 1616 1617 /* We allow a CS with any queue type combination as long as it does 1618 * not get a completion 1619 */ 1620 if (int_queues_only && cs_needs_completion(cs)) { 1621 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1622 atomic64_inc(&cntr->validation_drop_cnt); 1623 dev_err(hdev->dev, 1624 "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n", 1625 cs->ctx->asid, cs->sequence); 1626 rc = -EINVAL; 1627 goto free_cs_object; 1628 } 1629 1630 if (using_hw_queues) 1631 INIT_WORK(&cs->finish_work, cs_completion); 1632 1633 /* 1634 * store the (external/HW queues) streams used by the CS in the 1635 * fence object for multi-CS completion 1636 */ 1637 if (hdev->supports_wait_for_multi_cs) 1638 cs->fence->stream_master_qid_map = stream_master_qid_map; 1639 1640 rc = hl_hw_queue_schedule_cs(cs); 1641 if (rc) { 1642 if (rc != -EAGAIN) 1643 dev_err(hdev->dev, 1644 "Failed to submit CS %d.%llu to H/W queues, error %d\n", 1645 cs->ctx->asid, cs->sequence, rc); 1646 goto free_cs_object; 1647 } 1648 1649 *signal_initial_sob_count = cs->initial_sob_count; 1650 1651 rc = HL_CS_STATUS_SUCCESS; 1652 goto put_cs; 1653 1654 release_cb: 1655 atomic_dec(&cb->cs_cnt); 1656 hl_cb_put(cb); 1657 free_cs_object: 1658 cs_rollback(hdev, cs); 1659 *cs_seq = ULLONG_MAX; 1660 /* The path below is both for good and erroneous exits */ 1661 put_cs: 1662 /* We finished with the CS in this function, so put the ref */ 1663 cs_put(cs); 1664 free_cs_chunk_array: 1665 kfree(cs_chunk_array); 1666 out: 1667 return rc; 1668 } 1669 1670 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, 1671 u64 *cs_seq) 1672 { 1673 struct hl_device *hdev = hpriv->hdev; 1674 struct hl_ctx *ctx = hpriv->ctx; 1675 bool need_soft_reset = false; 1676 int rc = 0, do_ctx_switch = 0; 1677 void __user *chunks; 1678 u32 num_chunks, tmp; 1679 u16 sob_count; 1680 int ret; 1681 1682 if (hdev->supports_ctx_switch) 1683 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); 1684 1685 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { 1686 mutex_lock(&hpriv->restore_phase_mutex); 1687 1688 if (do_ctx_switch) { 1689 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); 1690 if (rc) { 1691 dev_err_ratelimited(hdev->dev, 1692 "Failed to switch to context %d, rejecting CS! %d\n", 1693 ctx->asid, rc); 1694 /* 1695 * If we timedout, or if the device is not IDLE 1696 * while we want to do context-switch (-EBUSY), 1697 * we need to soft-reset because QMAN is 1698 * probably stuck. However, we can't call to 1699 * reset here directly because of deadlock, so 1700 * need to do it at the very end of this 1701 * function 1702 */ 1703 if ((rc == -ETIMEDOUT) || (rc == -EBUSY)) 1704 need_soft_reset = true; 1705 mutex_unlock(&hpriv->restore_phase_mutex); 1706 goto out; 1707 } 1708 } 1709 1710 hdev->asic_funcs->restore_phase_topology(hdev); 1711 1712 chunks = (void __user *) (uintptr_t) args->in.chunks_restore; 1713 num_chunks = args->in.num_chunks_restore; 1714 1715 if (!num_chunks) { 1716 dev_dbg(hdev->dev, 1717 "Need to run restore phase but restore CS is empty\n"); 1718 rc = 0; 1719 } else { 1720 rc = cs_ioctl_default(hpriv, chunks, num_chunks, 1721 cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count); 1722 } 1723 1724 mutex_unlock(&hpriv->restore_phase_mutex); 1725 1726 if (rc) { 1727 dev_err(hdev->dev, 1728 "Failed to submit restore CS for context %d (%d)\n", 1729 ctx->asid, rc); 1730 goto out; 1731 } 1732 1733 /* Need to wait for restore completion before execution phase */ 1734 if (num_chunks) { 1735 enum hl_cs_wait_status status; 1736 wait_again: 1737 ret = _hl_cs_wait_ioctl(hdev, ctx, 1738 jiffies_to_usecs(hdev->timeout_jiffies), 1739 *cs_seq, &status, NULL); 1740 if (ret) { 1741 if (ret == -ERESTARTSYS) { 1742 usleep_range(100, 200); 1743 goto wait_again; 1744 } 1745 1746 dev_err(hdev->dev, 1747 "Restore CS for context %d failed to complete %d\n", 1748 ctx->asid, ret); 1749 rc = -ENOEXEC; 1750 goto out; 1751 } 1752 } 1753 1754 if (hdev->supports_ctx_switch) 1755 ctx->thread_ctx_switch_wait_token = 1; 1756 1757 } else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) { 1758 rc = hl_poll_timeout_memory(hdev, 1759 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), 1760 100, jiffies_to_usecs(hdev->timeout_jiffies), false); 1761 1762 if (rc == -ETIMEDOUT) { 1763 dev_err(hdev->dev, 1764 "context switch phase timeout (%d)\n", tmp); 1765 goto out; 1766 } 1767 } 1768 1769 out: 1770 if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset)) 1771 hl_device_reset(hdev, 0); 1772 1773 return rc; 1774 } 1775 1776 /* 1777 * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case. 1778 * if the SOB value reaches the max value move to the other SOB reserved 1779 * to the queue. 1780 * @hdev: pointer to device structure 1781 * @q_idx: stream queue index 1782 * @hw_sob: the H/W SOB used in this signal CS. 1783 * @count: signals count 1784 * @encaps_sig: tells whether it's reservation for encaps signals or not. 1785 * 1786 * Note that this function must be called while hw_queues_lock is taken. 1787 */ 1788 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx, 1789 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig) 1790 1791 { 1792 struct hl_sync_stream_properties *prop; 1793 struct hl_hw_sob *sob = *hw_sob, *other_sob; 1794 u8 other_sob_offset; 1795 1796 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; 1797 1798 hw_sob_get(sob); 1799 1800 /* check for wraparound */ 1801 if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) { 1802 /* 1803 * Decrement as we reached the max value. 1804 * The release function won't be called here as we've 1805 * just incremented the refcount right before calling this 1806 * function. 1807 */ 1808 hw_sob_put_err(sob); 1809 1810 /* 1811 * check the other sob value, if it still in use then fail 1812 * otherwise make the switch 1813 */ 1814 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS; 1815 other_sob = &prop->hw_sob[other_sob_offset]; 1816 1817 if (kref_read(&other_sob->kref) != 1) { 1818 dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n", 1819 q_idx); 1820 return -EINVAL; 1821 } 1822 1823 /* 1824 * next_sob_val always points to the next available signal 1825 * in the sob, so in encaps signals it will be the next one 1826 * after reserving the required amount. 1827 */ 1828 if (encaps_sig) 1829 prop->next_sob_val = count + 1; 1830 else 1831 prop->next_sob_val = count; 1832 1833 /* only two SOBs are currently in use */ 1834 prop->curr_sob_offset = other_sob_offset; 1835 *hw_sob = other_sob; 1836 1837 /* 1838 * check if other_sob needs reset, then do it before using it 1839 * for the reservation or the next signal cs. 1840 * we do it here, and for both encaps and regular signal cs 1841 * cases in order to avoid possible races of two kref_put 1842 * of the sob which can occur at the same time if we move the 1843 * sob reset(kref_put) to cs_do_release function. 1844 * in addition, if we have combination of cs signal and 1845 * encaps, and at the point we need to reset the sob there was 1846 * no more reservations and only signal cs keep coming, 1847 * in such case we need signal_cs to put the refcount and 1848 * reset the sob. 1849 */ 1850 if (other_sob->need_reset) 1851 hw_sob_put(other_sob); 1852 1853 if (encaps_sig) { 1854 /* set reset indication for the sob */ 1855 sob->need_reset = true; 1856 hw_sob_get(other_sob); 1857 } 1858 1859 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n", 1860 prop->curr_sob_offset, q_idx); 1861 } else { 1862 prop->next_sob_val += count; 1863 } 1864 1865 return 0; 1866 } 1867 1868 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev, 1869 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx, 1870 bool encaps_signals) 1871 { 1872 u64 *signal_seq_arr = NULL; 1873 u32 size_to_copy, signal_seq_arr_len; 1874 int rc = 0; 1875 1876 if (encaps_signals) { 1877 *signal_seq = chunk->encaps_signal_seq; 1878 return 0; 1879 } 1880 1881 signal_seq_arr_len = chunk->num_signal_seq_arr; 1882 1883 /* currently only one signal seq is supported */ 1884 if (signal_seq_arr_len != 1) { 1885 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1886 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt); 1887 dev_err(hdev->dev, 1888 "Wait for signal CS supports only one signal CS seq\n"); 1889 return -EINVAL; 1890 } 1891 1892 signal_seq_arr = kmalloc_array(signal_seq_arr_len, 1893 sizeof(*signal_seq_arr), 1894 GFP_ATOMIC); 1895 if (!signal_seq_arr) 1896 signal_seq_arr = kmalloc_array(signal_seq_arr_len, 1897 sizeof(*signal_seq_arr), 1898 GFP_KERNEL); 1899 if (!signal_seq_arr) { 1900 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1901 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt); 1902 return -ENOMEM; 1903 } 1904 1905 size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr); 1906 if (copy_from_user(signal_seq_arr, 1907 u64_to_user_ptr(chunk->signal_seq_arr), 1908 size_to_copy)) { 1909 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1910 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt); 1911 dev_err(hdev->dev, 1912 "Failed to copy signal seq array from user\n"); 1913 rc = -EFAULT; 1914 goto out; 1915 } 1916 1917 /* currently it is guaranteed to have only one signal seq */ 1918 *signal_seq = signal_seq_arr[0]; 1919 1920 out: 1921 kfree(signal_seq_arr); 1922 1923 return rc; 1924 } 1925 1926 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, 1927 struct hl_ctx *ctx, struct hl_cs *cs, 1928 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset) 1929 { 1930 struct hl_cs_counters_atomic *cntr; 1931 struct hl_cs_job *job; 1932 struct hl_cb *cb; 1933 u32 cb_size; 1934 1935 cntr = &hdev->aggregated_cs_counters; 1936 1937 job = hl_cs_allocate_job(hdev, q_type, true); 1938 if (!job) { 1939 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1940 atomic64_inc(&cntr->out_of_mem_drop_cnt); 1941 dev_err(hdev->dev, "Failed to allocate a new job\n"); 1942 return -ENOMEM; 1943 } 1944 1945 if (cs->type == CS_TYPE_WAIT) 1946 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev); 1947 else 1948 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev); 1949 1950 cb = hl_cb_kernel_create(hdev, cb_size, 1951 q_type == QUEUE_TYPE_HW && hdev->mmu_enable); 1952 if (!cb) { 1953 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); 1954 atomic64_inc(&cntr->out_of_mem_drop_cnt); 1955 kfree(job); 1956 return -EFAULT; 1957 } 1958 1959 job->id = 0; 1960 job->cs = cs; 1961 job->user_cb = cb; 1962 atomic_inc(&job->user_cb->cs_cnt); 1963 job->user_cb_size = cb_size; 1964 job->hw_queue_id = q_idx; 1965 1966 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) 1967 && cs->encaps_signals) 1968 job->encaps_sig_wait_offset = encaps_signal_offset; 1969 /* 1970 * No need in parsing, user CB is the patched CB. 1971 * We call hl_cb_destroy() out of two reasons - we don't need the CB in 1972 * the CB idr anymore and to decrement its refcount as it was 1973 * incremented inside hl_cb_kernel_create(). 1974 */ 1975 job->patched_cb = job->user_cb; 1976 job->job_cb_size = job->user_cb_size; 1977 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); 1978 1979 /* increment refcount as for external queues we get completion */ 1980 cs_get(cs); 1981 1982 cs->jobs_in_queue_cnt[job->hw_queue_id]++; 1983 cs->jobs_cnt++; 1984 1985 list_add_tail(&job->cs_node, &cs->job_list); 1986 1987 hl_debugfs_add_job(hdev, job); 1988 1989 return 0; 1990 } 1991 1992 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv, 1993 u32 q_idx, u32 count, 1994 u32 *handle_id, u32 *sob_addr, 1995 u32 *signals_count) 1996 { 1997 struct hw_queue_properties *hw_queue_prop; 1998 struct hl_sync_stream_properties *prop; 1999 struct hl_device *hdev = hpriv->hdev; 2000 struct hl_cs_encaps_sig_handle *handle; 2001 struct hl_encaps_signals_mgr *mgr; 2002 struct hl_hw_sob *hw_sob; 2003 int hdl_id; 2004 int rc = 0; 2005 2006 if (count >= HL_MAX_SOB_VAL) { 2007 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n", 2008 count); 2009 rc = -EINVAL; 2010 goto out; 2011 } 2012 2013 if (q_idx >= hdev->asic_prop.max_queues) { 2014 dev_err(hdev->dev, "Queue index %d is invalid\n", 2015 q_idx); 2016 rc = -EINVAL; 2017 goto out; 2018 } 2019 2020 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx]; 2021 2022 if (!hw_queue_prop->supports_sync_stream) { 2023 dev_err(hdev->dev, 2024 "Queue index %d does not support sync stream operations\n", 2025 q_idx); 2026 rc = -EINVAL; 2027 goto out; 2028 } 2029 2030 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; 2031 2032 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 2033 if (!handle) { 2034 rc = -ENOMEM; 2035 goto out; 2036 } 2037 2038 handle->count = count; 2039 2040 hl_ctx_get(hpriv->ctx); 2041 handle->ctx = hpriv->ctx; 2042 mgr = &hpriv->ctx->sig_mgr; 2043 2044 spin_lock(&mgr->lock); 2045 hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC); 2046 spin_unlock(&mgr->lock); 2047 2048 if (hdl_id < 0) { 2049 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n"); 2050 rc = -EINVAL; 2051 goto put_ctx; 2052 } 2053 2054 handle->id = hdl_id; 2055 handle->q_idx = q_idx; 2056 handle->hdev = hdev; 2057 kref_init(&handle->refcount); 2058 2059 hdev->asic_funcs->hw_queues_lock(hdev); 2060 2061 hw_sob = &prop->hw_sob[prop->curr_sob_offset]; 2062 2063 /* 2064 * Increment the SOB value by count by user request 2065 * to reserve those signals 2066 * check if the signals amount to reserve is not exceeding the max sob 2067 * value, if yes then switch sob. 2068 */ 2069 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count, 2070 true); 2071 if (rc) { 2072 dev_err(hdev->dev, "Failed to switch SOB\n"); 2073 hdev->asic_funcs->hw_queues_unlock(hdev); 2074 rc = -EINVAL; 2075 goto remove_idr; 2076 } 2077 /* set the hw_sob to the handle after calling the sob wraparound handler 2078 * since sob could have changed. 2079 */ 2080 handle->hw_sob = hw_sob; 2081 2082 /* store the current sob value for unreserve validity check, and 2083 * signal offset support 2084 */ 2085 handle->pre_sob_val = prop->next_sob_val - handle->count; 2086 2087 handle->cs_seq = ULLONG_MAX; 2088 2089 *signals_count = prop->next_sob_val; 2090 hdev->asic_funcs->hw_queues_unlock(hdev); 2091 2092 *sob_addr = handle->hw_sob->sob_addr; 2093 *handle_id = hdl_id; 2094 2095 dev_dbg(hdev->dev, 2096 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n", 2097 hw_sob->sob_id, handle->hw_sob->sob_addr, 2098 prop->next_sob_val - 1, q_idx, hdl_id); 2099 goto out; 2100 2101 remove_idr: 2102 spin_lock(&mgr->lock); 2103 idr_remove(&mgr->handles, hdl_id); 2104 spin_unlock(&mgr->lock); 2105 2106 put_ctx: 2107 hl_ctx_put(handle->ctx); 2108 kfree(handle); 2109 2110 out: 2111 return rc; 2112 } 2113 2114 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id) 2115 { 2116 struct hl_cs_encaps_sig_handle *encaps_sig_hdl; 2117 struct hl_sync_stream_properties *prop; 2118 struct hl_device *hdev = hpriv->hdev; 2119 struct hl_encaps_signals_mgr *mgr; 2120 struct hl_hw_sob *hw_sob; 2121 u32 q_idx, sob_addr; 2122 int rc = 0; 2123 2124 mgr = &hpriv->ctx->sig_mgr; 2125 2126 spin_lock(&mgr->lock); 2127 encaps_sig_hdl = idr_find(&mgr->handles, handle_id); 2128 if (encaps_sig_hdl) { 2129 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n", 2130 handle_id, encaps_sig_hdl->hw_sob->sob_addr, 2131 encaps_sig_hdl->count); 2132 2133 hdev->asic_funcs->hw_queues_lock(hdev); 2134 2135 q_idx = encaps_sig_hdl->q_idx; 2136 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; 2137 hw_sob = &prop->hw_sob[prop->curr_sob_offset]; 2138 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id); 2139 2140 /* Check if sob_val got out of sync due to other 2141 * signal submission requests which were handled 2142 * between the reserve-unreserve calls or SOB switch 2143 * upon reaching SOB max value. 2144 */ 2145 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count 2146 != prop->next_sob_val || 2147 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) { 2148 dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n", 2149 encaps_sig_hdl->pre_sob_val, 2150 (prop->next_sob_val - encaps_sig_hdl->count)); 2151 2152 hdev->asic_funcs->hw_queues_unlock(hdev); 2153 rc = -EINVAL; 2154 goto out_unlock; 2155 } 2156 2157 /* 2158 * Decrement the SOB value by count by user request 2159 * to unreserve those signals 2160 */ 2161 prop->next_sob_val -= encaps_sig_hdl->count; 2162 2163 hdev->asic_funcs->hw_queues_unlock(hdev); 2164 2165 hw_sob_put(hw_sob); 2166 2167 /* Release the id and free allocated memory of the handle */ 2168 idr_remove(&mgr->handles, handle_id); 2169 2170 /* unlock before calling ctx_put, where we might sleep */ 2171 spin_unlock(&mgr->lock); 2172 hl_ctx_put(encaps_sig_hdl->ctx); 2173 kfree(encaps_sig_hdl); 2174 goto out; 2175 } else { 2176 rc = -EINVAL; 2177 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n"); 2178 } 2179 2180 out_unlock: 2181 spin_unlock(&mgr->lock); 2182 2183 out: 2184 return rc; 2185 } 2186 2187 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, 2188 void __user *chunks, u32 num_chunks, 2189 u64 *cs_seq, u32 flags, u32 timeout, 2190 u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count) 2191 { 2192 struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL; 2193 bool handle_found = false, is_wait_cs = false, 2194 wait_cs_submitted = false, 2195 cs_encaps_signals = false; 2196 struct hl_cs_chunk *cs_chunk_array, *chunk; 2197 bool staged_cs_with_encaps_signals = false; 2198 struct hw_queue_properties *hw_queue_prop; 2199 struct hl_device *hdev = hpriv->hdev; 2200 struct hl_cs_compl *sig_waitcs_cmpl; 2201 u32 q_idx, collective_engine_id = 0; 2202 struct hl_cs_counters_atomic *cntr; 2203 struct hl_fence *sig_fence = NULL; 2204 struct hl_ctx *ctx = hpriv->ctx; 2205 enum hl_queue_type q_type; 2206 struct hl_cs *cs; 2207 u64 signal_seq; 2208 int rc; 2209 2210 cntr = &hdev->aggregated_cs_counters; 2211 *cs_seq = ULLONG_MAX; 2212 2213 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks, 2214 ctx); 2215 if (rc) 2216 goto out; 2217 2218 /* currently it is guaranteed to have only one chunk */ 2219 chunk = &cs_chunk_array[0]; 2220 2221 if (chunk->queue_index >= hdev->asic_prop.max_queues) { 2222 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2223 atomic64_inc(&cntr->validation_drop_cnt); 2224 dev_err(hdev->dev, "Queue index %d is invalid\n", 2225 chunk->queue_index); 2226 rc = -EINVAL; 2227 goto free_cs_chunk_array; 2228 } 2229 2230 q_idx = chunk->queue_index; 2231 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx]; 2232 q_type = hw_queue_prop->type; 2233 2234 if (!hw_queue_prop->supports_sync_stream) { 2235 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2236 atomic64_inc(&cntr->validation_drop_cnt); 2237 dev_err(hdev->dev, 2238 "Queue index %d does not support sync stream operations\n", 2239 q_idx); 2240 rc = -EINVAL; 2241 goto free_cs_chunk_array; 2242 } 2243 2244 if (cs_type == CS_TYPE_COLLECTIVE_WAIT) { 2245 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) { 2246 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2247 atomic64_inc(&cntr->validation_drop_cnt); 2248 dev_err(hdev->dev, 2249 "Queue index %d is invalid\n", q_idx); 2250 rc = -EINVAL; 2251 goto free_cs_chunk_array; 2252 } 2253 2254 if (!hdev->nic_ports_mask) { 2255 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2256 atomic64_inc(&cntr->validation_drop_cnt); 2257 dev_err(hdev->dev, 2258 "Collective operations not supported when NIC ports are disabled"); 2259 rc = -EINVAL; 2260 goto free_cs_chunk_array; 2261 } 2262 2263 collective_engine_id = chunk->collective_engine_id; 2264 } 2265 2266 is_wait_cs = !!(cs_type == CS_TYPE_WAIT || 2267 cs_type == CS_TYPE_COLLECTIVE_WAIT); 2268 2269 cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS); 2270 2271 if (is_wait_cs) { 2272 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, 2273 ctx, cs_encaps_signals); 2274 if (rc) 2275 goto free_cs_chunk_array; 2276 2277 if (cs_encaps_signals) { 2278 /* check if cs sequence has encapsulated 2279 * signals handle 2280 */ 2281 struct idr *idp; 2282 u32 id; 2283 2284 spin_lock(&ctx->sig_mgr.lock); 2285 idp = &ctx->sig_mgr.handles; 2286 idr_for_each_entry(idp, encaps_sig_hdl, id) { 2287 if (encaps_sig_hdl->cs_seq == signal_seq) { 2288 /* get refcount to protect removing this handle from idr, 2289 * needed when multiple wait cs are used with offset 2290 * to wait on reserved encaps signals. 2291 * Since kref_put of this handle is executed outside the 2292 * current lock, it is possible that the handle refcount 2293 * is 0 but it yet to be removed from the list. In this 2294 * case need to consider the handle as not valid. 2295 */ 2296 if (kref_get_unless_zero(&encaps_sig_hdl->refcount)) 2297 handle_found = true; 2298 break; 2299 } 2300 } 2301 spin_unlock(&ctx->sig_mgr.lock); 2302 2303 if (!handle_found) { 2304 /* treat as signal CS already finished */ 2305 dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n", 2306 signal_seq); 2307 rc = 0; 2308 goto free_cs_chunk_array; 2309 } 2310 2311 /* validate also the signal offset value */ 2312 if (chunk->encaps_signal_offset > 2313 encaps_sig_hdl->count) { 2314 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n", 2315 chunk->encaps_signal_offset, 2316 encaps_sig_hdl->count); 2317 rc = -EINVAL; 2318 goto free_cs_chunk_array; 2319 } 2320 } 2321 2322 sig_fence = hl_ctx_get_fence(ctx, signal_seq); 2323 if (IS_ERR(sig_fence)) { 2324 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2325 atomic64_inc(&cntr->validation_drop_cnt); 2326 dev_err(hdev->dev, 2327 "Failed to get signal CS with seq 0x%llx\n", 2328 signal_seq); 2329 rc = PTR_ERR(sig_fence); 2330 goto free_cs_chunk_array; 2331 } 2332 2333 if (!sig_fence) { 2334 /* signal CS already finished */ 2335 rc = 0; 2336 goto free_cs_chunk_array; 2337 } 2338 2339 sig_waitcs_cmpl = 2340 container_of(sig_fence, struct hl_cs_compl, base_fence); 2341 2342 staged_cs_with_encaps_signals = !! 2343 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT && 2344 (flags & HL_CS_FLAGS_ENCAP_SIGNALS)); 2345 2346 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL && 2347 !staged_cs_with_encaps_signals) { 2348 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2349 atomic64_inc(&cntr->validation_drop_cnt); 2350 dev_err(hdev->dev, 2351 "CS seq 0x%llx is not of a signal/encaps-signal CS\n", 2352 signal_seq); 2353 hl_fence_put(sig_fence); 2354 rc = -EINVAL; 2355 goto free_cs_chunk_array; 2356 } 2357 2358 if (completion_done(&sig_fence->completion)) { 2359 /* signal CS already finished */ 2360 hl_fence_put(sig_fence); 2361 rc = 0; 2362 goto free_cs_chunk_array; 2363 } 2364 } 2365 2366 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout); 2367 if (rc) { 2368 if (is_wait_cs) 2369 hl_fence_put(sig_fence); 2370 2371 goto free_cs_chunk_array; 2372 } 2373 2374 /* 2375 * Save the signal CS fence for later initialization right before 2376 * hanging the wait CS on the queue. 2377 * for encaps signals case, we save the cs sequence and handle pointer 2378 * for later initialization. 2379 */ 2380 if (is_wait_cs) { 2381 cs->signal_fence = sig_fence; 2382 /* store the handle pointer, so we don't have to 2383 * look for it again, later on the flow 2384 * when we need to set SOB info in hw_queue. 2385 */ 2386 if (cs->encaps_signals) 2387 cs->encaps_sig_hdl = encaps_sig_hdl; 2388 } 2389 2390 hl_debugfs_add_cs(cs); 2391 2392 *cs_seq = cs->sequence; 2393 2394 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL) 2395 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, 2396 q_idx, chunk->encaps_signal_offset); 2397 else if (cs_type == CS_TYPE_COLLECTIVE_WAIT) 2398 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx, 2399 cs, q_idx, collective_engine_id, 2400 chunk->encaps_signal_offset); 2401 else { 2402 atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 2403 atomic64_inc(&cntr->validation_drop_cnt); 2404 rc = -EINVAL; 2405 } 2406 2407 if (rc) 2408 goto free_cs_object; 2409 2410 if (q_type == QUEUE_TYPE_HW) 2411 INIT_WORK(&cs->finish_work, cs_completion); 2412 2413 rc = hl_hw_queue_schedule_cs(cs); 2414 if (rc) { 2415 /* In case wait cs failed here, it means the signal cs 2416 * already completed. we want to free all it's related objects 2417 * but we don't want to fail the ioctl. 2418 */ 2419 if (is_wait_cs) 2420 rc = 0; 2421 else if (rc != -EAGAIN) 2422 dev_err(hdev->dev, 2423 "Failed to submit CS %d.%llu to H/W queues, error %d\n", 2424 ctx->asid, cs->sequence, rc); 2425 goto free_cs_object; 2426 } 2427 2428 *signal_sob_addr_offset = cs->sob_addr_offset; 2429 *signal_initial_sob_count = cs->initial_sob_count; 2430 2431 rc = HL_CS_STATUS_SUCCESS; 2432 if (is_wait_cs) 2433 wait_cs_submitted = true; 2434 goto put_cs; 2435 2436 free_cs_object: 2437 cs_rollback(hdev, cs); 2438 *cs_seq = ULLONG_MAX; 2439 /* The path below is both for good and erroneous exits */ 2440 put_cs: 2441 /* We finished with the CS in this function, so put the ref */ 2442 cs_put(cs); 2443 free_cs_chunk_array: 2444 if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs) 2445 kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx); 2446 kfree(cs_chunk_array); 2447 out: 2448 return rc; 2449 } 2450 2451 static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores, 2452 u32 num_engine_cores, u32 core_command) 2453 { 2454 struct hl_device *hdev = hpriv->hdev; 2455 void __user *engine_cores_arr; 2456 u32 *cores; 2457 int rc; 2458 2459 if (!hdev->asic_prop.supports_engine_modes) 2460 return -EPERM; 2461 2462 if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) { 2463 dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores); 2464 return -EINVAL; 2465 } 2466 2467 if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) { 2468 dev_err(hdev->dev, "Engine core command is invalid\n"); 2469 return -EINVAL; 2470 } 2471 2472 engine_cores_arr = (void __user *) (uintptr_t) engine_cores; 2473 cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL); 2474 if (!cores) 2475 return -ENOMEM; 2476 2477 if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) { 2478 dev_err(hdev->dev, "Failed to copy core-ids array from user\n"); 2479 kfree(cores); 2480 return -EFAULT; 2481 } 2482 2483 rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command); 2484 kfree(cores); 2485 2486 return rc; 2487 } 2488 2489 static int cs_ioctl_engines(struct hl_fpriv *hpriv, u64 engines_arr_user_addr, 2490 u32 num_engines, enum hl_engine_command command) 2491 { 2492 struct hl_device *hdev = hpriv->hdev; 2493 u32 *engines, max_num_of_engines; 2494 void __user *engines_arr; 2495 int rc; 2496 2497 if (!hdev->asic_prop.supports_engine_modes) 2498 return -EPERM; 2499 2500 if (command >= HL_ENGINE_COMMAND_MAX) { 2501 dev_err(hdev->dev, "Engine command is invalid\n"); 2502 return -EINVAL; 2503 } 2504 2505 max_num_of_engines = hdev->asic_prop.max_num_of_engines; 2506 if (command == HL_ENGINE_CORE_RUN || command == HL_ENGINE_CORE_HALT) 2507 max_num_of_engines = hdev->asic_prop.num_engine_cores; 2508 2509 if (!num_engines || num_engines > max_num_of_engines) { 2510 dev_err(hdev->dev, "Number of engines %d is invalid\n", num_engines); 2511 return -EINVAL; 2512 } 2513 2514 engines_arr = (void __user *) (uintptr_t) engines_arr_user_addr; 2515 engines = kmalloc_array(num_engines, sizeof(u32), GFP_KERNEL); 2516 if (!engines) 2517 return -ENOMEM; 2518 2519 if (copy_from_user(engines, engines_arr, num_engines * sizeof(u32))) { 2520 dev_err(hdev->dev, "Failed to copy engine-ids array from user\n"); 2521 kfree(engines); 2522 return -EFAULT; 2523 } 2524 2525 rc = hdev->asic_funcs->set_engines(hdev, engines, num_engines, command); 2526 kfree(engines); 2527 2528 return rc; 2529 } 2530 2531 static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv) 2532 { 2533 struct hl_device *hdev = hpriv->hdev; 2534 struct asic_fixed_properties *prop = &hdev->asic_prop; 2535 2536 if (!prop->hbw_flush_reg) { 2537 dev_dbg(hdev->dev, "HBW flush is not supported\n"); 2538 return -EOPNOTSUPP; 2539 } 2540 2541 RREG32(prop->hbw_flush_reg); 2542 2543 return 0; 2544 } 2545 2546 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) 2547 { 2548 union hl_cs_args *args = data; 2549 enum hl_cs_type cs_type = 0; 2550 u64 cs_seq = ULONG_MAX; 2551 void __user *chunks; 2552 u32 num_chunks, flags, timeout, 2553 signals_count = 0, sob_addr = 0, handle_id = 0; 2554 u16 sob_initial_count = 0; 2555 int rc; 2556 2557 rc = hl_cs_sanity_checks(hpriv, args); 2558 if (rc) 2559 goto out; 2560 2561 rc = hl_cs_ctx_switch(hpriv, args, &cs_seq); 2562 if (rc) 2563 goto out; 2564 2565 cs_type = hl_cs_get_cs_type(args->in.cs_flags & 2566 ~HL_CS_FLAGS_FORCE_RESTORE); 2567 chunks = (void __user *) (uintptr_t) args->in.chunks_execute; 2568 num_chunks = args->in.num_chunks_execute; 2569 flags = args->in.cs_flags; 2570 2571 /* In case this is a staged CS, user should supply the CS sequence */ 2572 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) && 2573 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST)) 2574 cs_seq = args->in.seq; 2575 2576 timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT 2577 ? msecs_to_jiffies(args->in.timeout * 1000) 2578 : hpriv->hdev->timeout_jiffies; 2579 2580 switch (cs_type) { 2581 case CS_TYPE_SIGNAL: 2582 case CS_TYPE_WAIT: 2583 case CS_TYPE_COLLECTIVE_WAIT: 2584 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks, 2585 &cs_seq, args->in.cs_flags, timeout, 2586 &sob_addr, &sob_initial_count); 2587 break; 2588 case CS_RESERVE_SIGNALS: 2589 rc = cs_ioctl_reserve_signals(hpriv, 2590 args->in.encaps_signals_q_idx, 2591 args->in.encaps_signals_count, 2592 &handle_id, &sob_addr, &signals_count); 2593 break; 2594 case CS_UNRESERVE_SIGNALS: 2595 rc = cs_ioctl_unreserve_signals(hpriv, 2596 args->in.encaps_sig_handle_id); 2597 break; 2598 case CS_TYPE_ENGINE_CORE: 2599 rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores, 2600 args->in.num_engine_cores, args->in.core_command); 2601 break; 2602 case CS_TYPE_ENGINES: 2603 rc = cs_ioctl_engines(hpriv, args->in.engines, 2604 args->in.num_engines, args->in.engine_command); 2605 break; 2606 case CS_TYPE_FLUSH_PCI_HBW_WRITES: 2607 rc = cs_ioctl_flush_pci_hbw_writes(hpriv); 2608 break; 2609 default: 2610 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq, 2611 args->in.cs_flags, 2612 args->in.encaps_sig_handle_id, 2613 timeout, &sob_initial_count); 2614 break; 2615 } 2616 out: 2617 if (rc != -EAGAIN) { 2618 memset(args, 0, sizeof(*args)); 2619 2620 switch (cs_type) { 2621 case CS_RESERVE_SIGNALS: 2622 args->out.handle_id = handle_id; 2623 args->out.sob_base_addr_offset = sob_addr; 2624 args->out.count = signals_count; 2625 break; 2626 case CS_TYPE_SIGNAL: 2627 args->out.sob_base_addr_offset = sob_addr; 2628 args->out.sob_count_before_submission = sob_initial_count; 2629 args->out.seq = cs_seq; 2630 break; 2631 case CS_TYPE_DEFAULT: 2632 args->out.sob_count_before_submission = sob_initial_count; 2633 args->out.seq = cs_seq; 2634 break; 2635 default: 2636 args->out.seq = cs_seq; 2637 break; 2638 } 2639 2640 args->out.status = rc; 2641 } 2642 2643 return rc; 2644 } 2645 2646 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence, 2647 enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp) 2648 { 2649 struct hl_device *hdev = ctx->hdev; 2650 ktime_t timestamp_kt; 2651 long completion_rc; 2652 int rc = 0, error; 2653 2654 if (IS_ERR(fence)) { 2655 rc = PTR_ERR(fence); 2656 if (rc == -EINVAL) 2657 dev_notice_ratelimited(hdev->dev, 2658 "Can't wait on CS %llu because current CS is at seq %llu\n", 2659 seq, ctx->cs_sequence); 2660 return rc; 2661 } 2662 2663 if (!fence) { 2664 if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, ×tamp_kt, &error)) { 2665 dev_dbg(hdev->dev, 2666 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n", 2667 seq, ctx->cs_sequence); 2668 *status = CS_WAIT_STATUS_GONE; 2669 return 0; 2670 } 2671 2672 completion_rc = 1; 2673 goto report_results; 2674 } 2675 2676 if (!timeout_us) { 2677 completion_rc = completion_done(&fence->completion); 2678 } else { 2679 unsigned long timeout; 2680 2681 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ? 2682 timeout_us : usecs_to_jiffies(timeout_us); 2683 completion_rc = 2684 wait_for_completion_interruptible_timeout( 2685 &fence->completion, timeout); 2686 } 2687 2688 error = fence->error; 2689 timestamp_kt = fence->timestamp; 2690 2691 report_results: 2692 if (completion_rc > 0) { 2693 *status = CS_WAIT_STATUS_COMPLETED; 2694 if (timestamp) 2695 *timestamp = ktime_to_ns(timestamp_kt); 2696 } else { 2697 *status = CS_WAIT_STATUS_BUSY; 2698 } 2699 2700 if (completion_rc == -ERESTARTSYS) 2701 rc = completion_rc; 2702 else if (error == -ETIMEDOUT || error == -EIO) 2703 rc = error; 2704 2705 return rc; 2706 } 2707 2708 /* 2709 * hl_cs_poll_fences - iterate CS fences to check for CS completion 2710 * 2711 * @mcs_data: multi-CS internal data 2712 * @mcs_compl: multi-CS completion structure 2713 * 2714 * @return 0 on success, otherwise non 0 error code 2715 * 2716 * The function iterates on all CS sequence in the list and set bit in 2717 * completion_bitmap for each completed CS. 2718 * While iterating, the function sets the stream map of each fence in the fence 2719 * array in the completion QID stream map to be used by CSs to perform 2720 * completion to the multi-CS context. 2721 * This function shall be called after taking context ref 2722 */ 2723 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl) 2724 { 2725 struct hl_fence **fence_ptr = mcs_data->fence_arr; 2726 struct hl_device *hdev = mcs_data->ctx->hdev; 2727 int i, rc, arr_len = mcs_data->arr_len; 2728 u64 *seq_arr = mcs_data->seq_arr; 2729 ktime_t max_ktime, first_cs_time; 2730 enum hl_cs_wait_status status; 2731 2732 memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *)); 2733 2734 /* get all fences under the same lock */ 2735 rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len); 2736 if (rc) 2737 return rc; 2738 2739 /* 2740 * re-initialize the completion here to handle 2 possible cases: 2741 * 1. CS will complete the multi-CS prior clearing the completion. in which 2742 * case the fence iteration is guaranteed to catch the CS completion. 2743 * 2. the completion will occur after re-init of the completion. 2744 * in which case we will wake up immediately in wait_for_completion. 2745 */ 2746 reinit_completion(&mcs_compl->completion); 2747 2748 /* 2749 * set to maximum time to verify timestamp is valid: if at the end 2750 * this value is maintained- no timestamp was updated 2751 */ 2752 max_ktime = ktime_set(KTIME_SEC_MAX, 0); 2753 first_cs_time = max_ktime; 2754 2755 for (i = 0; i < arr_len; i++, fence_ptr++) { 2756 struct hl_fence *fence = *fence_ptr; 2757 2758 /* 2759 * In order to prevent case where we wait until timeout even though a CS associated 2760 * with the multi-CS actually completed we do things in the below order: 2761 * 1. for each fence set it's QID map in the multi-CS completion QID map. This way 2762 * any CS can, potentially, complete the multi CS for the specific QID (note 2763 * that once completion is initialized, calling complete* and then wait on the 2764 * completion will cause it to return at once) 2765 * 2. only after allowing multi-CS completion for the specific QID we check whether 2766 * the specific CS already completed (and thus the wait for completion part will 2767 * be skipped). if the CS not completed it is guaranteed that completing CS will 2768 * wake up the completion. 2769 */ 2770 if (fence) 2771 mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map; 2772 2773 /* 2774 * function won't sleep as it is called with timeout 0 (i.e. 2775 * poll the fence) 2776 */ 2777 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL); 2778 if (rc) { 2779 dev_err(hdev->dev, 2780 "wait_for_fence error :%d for CS seq %llu\n", 2781 rc, seq_arr[i]); 2782 break; 2783 } 2784 2785 switch (status) { 2786 case CS_WAIT_STATUS_BUSY: 2787 /* CS did not finished, QID to wait on already stored */ 2788 break; 2789 case CS_WAIT_STATUS_COMPLETED: 2790 /* 2791 * Using mcs_handling_done to avoid possibility of mcs_data 2792 * returns to user indicating CS completed before it finished 2793 * all of its mcs handling, to avoid race the next time the 2794 * user waits for mcs. 2795 * note: when reaching this case fence is definitely not NULL 2796 * but NULL check was added to overcome static analysis 2797 */ 2798 if (fence && !fence->mcs_handling_done) { 2799 /* 2800 * in case multi CS is completed but MCS handling not done 2801 * we "complete" the multi CS to prevent it from waiting 2802 * until time-out and the "multi-CS handling done" will have 2803 * another chance at the next iteration 2804 */ 2805 complete_all(&mcs_compl->completion); 2806 break; 2807 } 2808 2809 mcs_data->completion_bitmap |= BIT(i); 2810 /* 2811 * For all completed CSs we take the earliest timestamp. 2812 * For this we have to validate that the timestamp is 2813 * earliest of all timestamps so far. 2814 */ 2815 if (fence && mcs_data->update_ts && 2816 (ktime_compare(fence->timestamp, first_cs_time) < 0)) 2817 first_cs_time = fence->timestamp; 2818 break; 2819 case CS_WAIT_STATUS_GONE: 2820 mcs_data->update_ts = false; 2821 mcs_data->gone_cs = true; 2822 /* 2823 * It is possible to get an old sequence numbers from user 2824 * which related to already completed CSs and their fences 2825 * already gone. In this case, CS set as completed but 2826 * no need to consider its QID for mcs completion. 2827 */ 2828 mcs_data->completion_bitmap |= BIT(i); 2829 break; 2830 default: 2831 dev_err(hdev->dev, "Invalid fence status\n"); 2832 rc = -EINVAL; 2833 break; 2834 } 2835 2836 } 2837 2838 hl_fences_put(mcs_data->fence_arr, arr_len); 2839 2840 if (mcs_data->update_ts && 2841 (ktime_compare(first_cs_time, max_ktime) != 0)) 2842 mcs_data->timestamp = ktime_to_ns(first_cs_time); 2843 2844 return rc; 2845 } 2846 2847 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq, 2848 enum hl_cs_wait_status *status, s64 *timestamp) 2849 { 2850 struct hl_fence *fence; 2851 int rc = 0; 2852 2853 if (timestamp) 2854 *timestamp = 0; 2855 2856 hl_ctx_get(ctx); 2857 2858 fence = hl_ctx_get_fence(ctx, seq); 2859 2860 rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp); 2861 hl_fence_put(fence); 2862 hl_ctx_put(ctx); 2863 2864 return rc; 2865 } 2866 2867 static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs) 2868 { 2869 if (usecs <= U32_MAX) 2870 return usecs_to_jiffies(usecs); 2871 2872 /* 2873 * If the value in nanoseconds is larger than 64 bit, use the largest 2874 * 64 bit value. 2875 */ 2876 if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC))) 2877 return nsecs_to_jiffies(U64_MAX); 2878 2879 return nsecs_to_jiffies(usecs * NSEC_PER_USEC); 2880 } 2881 2882 /* 2883 * hl_wait_multi_cs_completion_init - init completion structure 2884 * 2885 * @hdev: pointer to habanalabs device structure 2886 * @stream_master_bitmap: stream master QIDs map, set bit indicates stream 2887 * master QID to wait on 2888 * 2889 * @return valid completion struct pointer on success, otherwise error pointer 2890 * 2891 * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver. 2892 * the function gets the first available completion (by marking it "used") 2893 * and initialize its values. 2894 */ 2895 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev) 2896 { 2897 struct multi_cs_completion *mcs_compl; 2898 int i; 2899 2900 /* find free multi_cs completion structure */ 2901 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) { 2902 mcs_compl = &hdev->multi_cs_completion[i]; 2903 spin_lock(&mcs_compl->lock); 2904 if (!mcs_compl->used) { 2905 mcs_compl->used = 1; 2906 mcs_compl->timestamp = 0; 2907 /* 2908 * init QID map to 0 to avoid completion by CSs. the actual QID map 2909 * to multi-CS CSs will be set incrementally at a later stage 2910 */ 2911 mcs_compl->stream_master_qid_map = 0; 2912 spin_unlock(&mcs_compl->lock); 2913 break; 2914 } 2915 spin_unlock(&mcs_compl->lock); 2916 } 2917 2918 if (i == MULTI_CS_MAX_USER_CTX) { 2919 dev_err(hdev->dev, "no available multi-CS completion structure\n"); 2920 return ERR_PTR(-ENOMEM); 2921 } 2922 return mcs_compl; 2923 } 2924 2925 /* 2926 * hl_wait_multi_cs_completion_fini - return completion structure and set as 2927 * unused 2928 * 2929 * @mcs_compl: pointer to the completion structure 2930 */ 2931 static void hl_wait_multi_cs_completion_fini( 2932 struct multi_cs_completion *mcs_compl) 2933 { 2934 /* 2935 * free completion structure, do it under lock to be in-sync with the 2936 * thread that signals completion 2937 */ 2938 spin_lock(&mcs_compl->lock); 2939 mcs_compl->used = 0; 2940 spin_unlock(&mcs_compl->lock); 2941 } 2942 2943 /* 2944 * hl_wait_multi_cs_completion - wait for first CS to complete 2945 * 2946 * @mcs_data: multi-CS internal data 2947 * 2948 * @return 0 on success, otherwise non 0 error code 2949 */ 2950 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data, 2951 struct multi_cs_completion *mcs_compl) 2952 { 2953 long completion_rc; 2954 2955 completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion, 2956 mcs_data->timeout_jiffies); 2957 2958 /* update timestamp */ 2959 if (completion_rc > 0) 2960 mcs_data->timestamp = mcs_compl->timestamp; 2961 2962 if (completion_rc == -ERESTARTSYS) 2963 return completion_rc; 2964 2965 mcs_data->wait_status = completion_rc; 2966 2967 return 0; 2968 } 2969 2970 /* 2971 * hl_multi_cs_completion_init - init array of multi-CS completion structures 2972 * 2973 * @hdev: pointer to habanalabs device structure 2974 */ 2975 void hl_multi_cs_completion_init(struct hl_device *hdev) 2976 { 2977 struct multi_cs_completion *mcs_cmpl; 2978 int i; 2979 2980 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) { 2981 mcs_cmpl = &hdev->multi_cs_completion[i]; 2982 mcs_cmpl->used = 0; 2983 spin_lock_init(&mcs_cmpl->lock); 2984 init_completion(&mcs_cmpl->completion); 2985 } 2986 } 2987 2988 /* 2989 * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl 2990 * 2991 * @hpriv: pointer to the private data of the fd 2992 * @data: pointer to multi-CS wait ioctl in/out args 2993 * 2994 */ 2995 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) 2996 { 2997 struct multi_cs_completion *mcs_compl; 2998 struct hl_device *hdev = hpriv->hdev; 2999 struct multi_cs_data mcs_data = {}; 3000 union hl_wait_cs_args *args = data; 3001 struct hl_ctx *ctx = hpriv->ctx; 3002 struct hl_fence **fence_arr; 3003 void __user *seq_arr; 3004 u32 size_to_copy; 3005 u64 *cs_seq_arr; 3006 u8 seq_arr_len; 3007 int rc, i; 3008 3009 for (i = 0 ; i < sizeof(args->in.pad) ; i++) 3010 if (args->in.pad[i]) { 3011 dev_dbg(hdev->dev, "Padding bytes must be 0\n"); 3012 return -EINVAL; 3013 } 3014 3015 if (!hdev->supports_wait_for_multi_cs) { 3016 dev_err(hdev->dev, "Wait for multi CS is not supported\n"); 3017 return -EPERM; 3018 } 3019 3020 seq_arr_len = args->in.seq_arr_len; 3021 3022 if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) { 3023 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n", 3024 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len); 3025 return -EINVAL; 3026 } 3027 3028 /* allocate memory for sequence array */ 3029 cs_seq_arr = 3030 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL); 3031 if (!cs_seq_arr) 3032 return -ENOMEM; 3033 3034 /* copy CS sequence array from user */ 3035 seq_arr = (void __user *) (uintptr_t) args->in.seq; 3036 size_to_copy = seq_arr_len * sizeof(*cs_seq_arr); 3037 if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) { 3038 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n"); 3039 rc = -EFAULT; 3040 goto free_seq_arr; 3041 } 3042 3043 /* allocate array for the fences */ 3044 fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL); 3045 if (!fence_arr) { 3046 rc = -ENOMEM; 3047 goto free_seq_arr; 3048 } 3049 3050 /* initialize the multi-CS internal data */ 3051 mcs_data.ctx = ctx; 3052 mcs_data.seq_arr = cs_seq_arr; 3053 mcs_data.fence_arr = fence_arr; 3054 mcs_data.arr_len = seq_arr_len; 3055 3056 hl_ctx_get(ctx); 3057 3058 /* wait (with timeout) for the first CS to be completed */ 3059 mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us); 3060 mcs_compl = hl_wait_multi_cs_completion_init(hdev); 3061 if (IS_ERR(mcs_compl)) { 3062 rc = PTR_ERR(mcs_compl); 3063 goto put_ctx; 3064 } 3065 3066 /* poll all CS fences, extract timestamp */ 3067 mcs_data.update_ts = true; 3068 rc = hl_cs_poll_fences(&mcs_data, mcs_compl); 3069 /* 3070 * skip wait for CS completion when one of the below is true: 3071 * - an error on the poll function 3072 * - one or more CS in the list completed 3073 * - the user called ioctl with timeout 0 3074 */ 3075 if (rc || mcs_data.completion_bitmap || !args->in.timeout_us) 3076 goto completion_fini; 3077 3078 while (true) { 3079 rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl); 3080 if (rc || (mcs_data.wait_status == 0)) 3081 break; 3082 3083 /* 3084 * poll fences once again to update the CS map. 3085 * no timestamp should be updated this time. 3086 */ 3087 mcs_data.update_ts = false; 3088 rc = hl_cs_poll_fences(&mcs_data, mcs_compl); 3089 3090 if (rc || mcs_data.completion_bitmap) 3091 break; 3092 3093 /* 3094 * if hl_wait_multi_cs_completion returned before timeout (i.e. 3095 * it got a completion) it either got completed by CS in the multi CS list 3096 * (in which case the indication will be non empty completion_bitmap) or it 3097 * got completed by CS submitted to one of the shared stream master but 3098 * not in the multi CS list (in which case we should wait again but modify 3099 * the timeout and set timestamp as zero to let a CS related to the current 3100 * multi-CS set a new, relevant, timestamp) 3101 */ 3102 mcs_data.timeout_jiffies = mcs_data.wait_status; 3103 mcs_compl->timestamp = 0; 3104 } 3105 3106 completion_fini: 3107 hl_wait_multi_cs_completion_fini(mcs_compl); 3108 3109 put_ctx: 3110 hl_ctx_put(ctx); 3111 kfree(fence_arr); 3112 3113 free_seq_arr: 3114 kfree(cs_seq_arr); 3115 3116 if (rc == -ERESTARTSYS) { 3117 dev_err_ratelimited(hdev->dev, 3118 "user process got signal while waiting for Multi-CS\n"); 3119 rc = -EINTR; 3120 } 3121 3122 if (rc) 3123 return rc; 3124 3125 /* update output args */ 3126 memset(args, 0, sizeof(*args)); 3127 3128 if (mcs_data.completion_bitmap) { 3129 args->out.status = HL_WAIT_CS_STATUS_COMPLETED; 3130 args->out.cs_completion_map = mcs_data.completion_bitmap; 3131 3132 /* if timestamp not 0- it's valid */ 3133 if (mcs_data.timestamp) { 3134 args->out.timestamp_nsec = mcs_data.timestamp; 3135 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD; 3136 } 3137 3138 /* update if some CS was gone */ 3139 if (!mcs_data.timestamp) 3140 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE; 3141 } else { 3142 args->out.status = HL_WAIT_CS_STATUS_BUSY; 3143 } 3144 3145 return 0; 3146 } 3147 3148 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) 3149 { 3150 struct hl_device *hdev = hpriv->hdev; 3151 union hl_wait_cs_args *args = data; 3152 enum hl_cs_wait_status status; 3153 u64 seq = args->in.seq; 3154 s64 timestamp; 3155 int rc; 3156 3157 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, ×tamp); 3158 3159 if (rc == -ERESTARTSYS) { 3160 dev_err_ratelimited(hdev->dev, 3161 "user process got signal while waiting for CS handle %llu\n", 3162 seq); 3163 return -EINTR; 3164 } 3165 3166 memset(args, 0, sizeof(*args)); 3167 3168 if (rc) { 3169 if (rc == -ETIMEDOUT) { 3170 dev_err_ratelimited(hdev->dev, 3171 "CS %llu has timed-out while user process is waiting for it\n", 3172 seq); 3173 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT; 3174 } else if (rc == -EIO) { 3175 dev_err_ratelimited(hdev->dev, 3176 "CS %llu has been aborted while user process is waiting for it\n", 3177 seq); 3178 args->out.status = HL_WAIT_CS_STATUS_ABORTED; 3179 } 3180 return rc; 3181 } 3182 3183 if (timestamp) { 3184 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD; 3185 args->out.timestamp_nsec = timestamp; 3186 } 3187 3188 switch (status) { 3189 case CS_WAIT_STATUS_GONE: 3190 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE; 3191 fallthrough; 3192 case CS_WAIT_STATUS_COMPLETED: 3193 args->out.status = HL_WAIT_CS_STATUS_COMPLETED; 3194 break; 3195 case CS_WAIT_STATUS_BUSY: 3196 default: 3197 args->out.status = HL_WAIT_CS_STATUS_BUSY; 3198 break; 3199 } 3200 3201 return 0; 3202 } 3203 3204 static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf, 3205 struct hl_cb *cq_cb, 3206 u64 ts_offset, u64 cq_offset, u64 target_value, 3207 spinlock_t *wait_list_lock, 3208 struct hl_user_pending_interrupt **pend) 3209 { 3210 struct hl_ts_buff *ts_buff = buf->private; 3211 struct hl_user_pending_interrupt *requested_offset_record = 3212 (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address + 3213 ts_offset; 3214 struct hl_user_pending_interrupt *cb_last = 3215 (struct hl_user_pending_interrupt *)ts_buff->kernel_buff_address + 3216 (ts_buff->kernel_buff_size / sizeof(struct hl_user_pending_interrupt)); 3217 unsigned long iter_counter = 0; 3218 u64 current_cq_counter; 3219 ktime_t timestamp; 3220 3221 /* Validate ts_offset not exceeding last max */ 3222 if (requested_offset_record >= cb_last) { 3223 dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n", 3224 (u64)(uintptr_t)cb_last); 3225 return -EINVAL; 3226 } 3227 3228 timestamp = ktime_get(); 3229 3230 start_over: 3231 spin_lock(wait_list_lock); 3232 3233 /* Unregister only if we didn't reach the target value 3234 * since in this case there will be no handling in irq context 3235 * and then it's safe to delete the node out of the interrupt list 3236 * then re-use it on other interrupt 3237 */ 3238 if (requested_offset_record->ts_reg_info.in_use) { 3239 current_cq_counter = *requested_offset_record->cq_kernel_addr; 3240 if (current_cq_counter < requested_offset_record->cq_target_value) { 3241 list_del(&requested_offset_record->wait_list_node); 3242 spin_unlock(wait_list_lock); 3243 3244 hl_mmap_mem_buf_put(requested_offset_record->ts_reg_info.buf); 3245 hl_cb_put(requested_offset_record->ts_reg_info.cq_cb); 3246 3247 dev_dbg(buf->mmg->dev, 3248 "ts node removed from interrupt list now can re-use\n"); 3249 } else { 3250 dev_dbg(buf->mmg->dev, 3251 "ts node in middle of irq handling\n"); 3252 3253 /* irq thread handling in the middle give it time to finish */ 3254 spin_unlock(wait_list_lock); 3255 usleep_range(100, 1000); 3256 if (++iter_counter == MAX_TS_ITER_NUM) { 3257 dev_err(buf->mmg->dev, 3258 "Timestamp offset processing reached timeout of %lld ms\n", 3259 ktime_ms_delta(ktime_get(), timestamp)); 3260 return -EAGAIN; 3261 } 3262 3263 goto start_over; 3264 } 3265 } else { 3266 /* Fill up the new registration node info */ 3267 requested_offset_record->ts_reg_info.buf = buf; 3268 requested_offset_record->ts_reg_info.cq_cb = cq_cb; 3269 requested_offset_record->ts_reg_info.timestamp_kernel_addr = 3270 (u64 *) ts_buff->user_buff_address + ts_offset; 3271 requested_offset_record->cq_kernel_addr = 3272 (u64 *) cq_cb->kernel_address + cq_offset; 3273 requested_offset_record->cq_target_value = target_value; 3274 3275 spin_unlock(wait_list_lock); 3276 } 3277 3278 *pend = requested_offset_record; 3279 3280 dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n", 3281 requested_offset_record); 3282 return 0; 3283 } 3284 3285 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, 3286 struct hl_mem_mgr *cb_mmg, struct hl_mem_mgr *mmg, 3287 u64 timeout_us, u64 cq_counters_handle, u64 cq_counters_offset, 3288 u64 target_value, struct hl_user_interrupt *interrupt, 3289 bool register_ts_record, u64 ts_handle, u64 ts_offset, 3290 u32 *status, u64 *timestamp) 3291 { 3292 struct hl_user_pending_interrupt *pend; 3293 struct hl_mmap_mem_buf *buf; 3294 struct hl_cb *cq_cb; 3295 unsigned long timeout; 3296 long completion_rc; 3297 int rc = 0; 3298 3299 timeout = hl_usecs64_to_jiffies(timeout_us); 3300 3301 hl_ctx_get(ctx); 3302 3303 cq_cb = hl_cb_get(cb_mmg, cq_counters_handle); 3304 if (!cq_cb) { 3305 rc = -EINVAL; 3306 goto put_ctx; 3307 } 3308 3309 /* Validate the cq offset */ 3310 if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >= 3311 ((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) { 3312 rc = -EINVAL; 3313 goto put_cq_cb; 3314 } 3315 3316 if (register_ts_record) { 3317 dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n", 3318 interrupt->interrupt_id, ts_offset, cq_counters_offset); 3319 buf = hl_mmap_mem_buf_get(mmg, ts_handle); 3320 if (!buf) { 3321 rc = -EINVAL; 3322 goto put_cq_cb; 3323 } 3324 3325 /* get ts buffer record */ 3326 rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset, 3327 cq_counters_offset, target_value, 3328 &interrupt->wait_list_lock, &pend); 3329 if (rc) 3330 goto put_ts_buff; 3331 } else { 3332 pend = kzalloc(sizeof(*pend), GFP_KERNEL); 3333 if (!pend) { 3334 rc = -ENOMEM; 3335 goto put_cq_cb; 3336 } 3337 hl_fence_init(&pend->fence, ULONG_MAX); 3338 pend->cq_kernel_addr = (u64 *) cq_cb->kernel_address + cq_counters_offset; 3339 pend->cq_target_value = target_value; 3340 } 3341 3342 spin_lock(&interrupt->wait_list_lock); 3343 3344 /* We check for completion value as interrupt could have been received 3345 * before we added the node to the wait list 3346 */ 3347 if (*pend->cq_kernel_addr >= target_value) { 3348 if (register_ts_record) 3349 pend->ts_reg_info.in_use = 0; 3350 spin_unlock(&interrupt->wait_list_lock); 3351 3352 *status = HL_WAIT_CS_STATUS_COMPLETED; 3353 3354 if (register_ts_record) { 3355 *pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns(); 3356 goto put_ts_buff; 3357 } else { 3358 pend->fence.timestamp = ktime_get(); 3359 goto set_timestamp; 3360 } 3361 } else if (!timeout_us) { 3362 spin_unlock(&interrupt->wait_list_lock); 3363 *status = HL_WAIT_CS_STATUS_BUSY; 3364 pend->fence.timestamp = ktime_get(); 3365 goto set_timestamp; 3366 } 3367 3368 /* Add pending user interrupt to relevant list for the interrupt 3369 * handler to monitor. 3370 * Note that we cannot have sorted list by target value, 3371 * in order to shorten the list pass loop, since 3372 * same list could have nodes for different cq counter handle. 3373 * Note: 3374 * Mark ts buff offset as in use here in the spinlock protection area 3375 * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record 3376 * before adding the node to the list. this scenario might happen when 3377 * multiple threads are racing on same offset and one thread could 3378 * set the ts buff in ts_buff_get_kernel_ts_record then the other thread 3379 * takes over and get to ts_buff_get_kernel_ts_record and then we will try 3380 * to re-use the same ts buff offset, and will try to delete a non existing 3381 * node from the list. 3382 */ 3383 if (register_ts_record) 3384 pend->ts_reg_info.in_use = 1; 3385 3386 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head); 3387 spin_unlock(&interrupt->wait_list_lock); 3388 3389 if (register_ts_record) { 3390 rc = *status = HL_WAIT_CS_STATUS_COMPLETED; 3391 goto ts_registration_exit; 3392 } 3393 3394 /* Wait for interrupt handler to signal completion */ 3395 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion, 3396 timeout); 3397 if (completion_rc > 0) { 3398 *status = HL_WAIT_CS_STATUS_COMPLETED; 3399 } else { 3400 if (completion_rc == -ERESTARTSYS) { 3401 dev_err_ratelimited(hdev->dev, 3402 "user process got signal while waiting for interrupt ID %d\n", 3403 interrupt->interrupt_id); 3404 rc = -EINTR; 3405 *status = HL_WAIT_CS_STATUS_ABORTED; 3406 } else { 3407 if (pend->fence.error == -EIO) { 3408 dev_err_ratelimited(hdev->dev, 3409 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n", 3410 pend->fence.error); 3411 rc = -EIO; 3412 *status = HL_WAIT_CS_STATUS_ABORTED; 3413 } else { 3414 /* The wait has timed-out. We don't know anything beyond that 3415 * because the workload wasn't submitted through the driver. 3416 * Therefore, from driver's perspective, the workload is still 3417 * executing. 3418 */ 3419 rc = 0; 3420 *status = HL_WAIT_CS_STATUS_BUSY; 3421 } 3422 } 3423 } 3424 3425 /* 3426 * We keep removing the node from list here, and not at the irq handler 3427 * for completion timeout case. and if it's a registration 3428 * for ts record, the node will be deleted in the irq handler after 3429 * we reach the target value. 3430 */ 3431 spin_lock(&interrupt->wait_list_lock); 3432 list_del(&pend->wait_list_node); 3433 spin_unlock(&interrupt->wait_list_lock); 3434 3435 set_timestamp: 3436 *timestamp = ktime_to_ns(pend->fence.timestamp); 3437 kfree(pend); 3438 hl_cb_put(cq_cb); 3439 ts_registration_exit: 3440 hl_ctx_put(ctx); 3441 3442 return rc; 3443 3444 put_ts_buff: 3445 hl_mmap_mem_buf_put(buf); 3446 put_cq_cb: 3447 hl_cb_put(cq_cb); 3448 put_ctx: 3449 hl_ctx_put(ctx); 3450 3451 return rc; 3452 } 3453 3454 static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx, 3455 u64 timeout_us, u64 user_address, 3456 u64 target_value, struct hl_user_interrupt *interrupt, 3457 u32 *status, 3458 u64 *timestamp) 3459 { 3460 struct hl_user_pending_interrupt *pend; 3461 unsigned long timeout; 3462 u64 completion_value; 3463 long completion_rc; 3464 int rc = 0; 3465 3466 timeout = hl_usecs64_to_jiffies(timeout_us); 3467 3468 hl_ctx_get(ctx); 3469 3470 pend = kzalloc(sizeof(*pend), GFP_KERNEL); 3471 if (!pend) { 3472 hl_ctx_put(ctx); 3473 return -ENOMEM; 3474 } 3475 3476 hl_fence_init(&pend->fence, ULONG_MAX); 3477 3478 /* Add pending user interrupt to relevant list for the interrupt 3479 * handler to monitor 3480 */ 3481 spin_lock(&interrupt->wait_list_lock); 3482 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head); 3483 spin_unlock(&interrupt->wait_list_lock); 3484 3485 /* We check for completion value as interrupt could have been received 3486 * before we added the node to the wait list 3487 */ 3488 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) { 3489 dev_err(hdev->dev, "Failed to copy completion value from user\n"); 3490 rc = -EFAULT; 3491 goto remove_pending_user_interrupt; 3492 } 3493 3494 if (completion_value >= target_value) { 3495 *status = HL_WAIT_CS_STATUS_COMPLETED; 3496 /* There was no interrupt, we assume the completion is now. */ 3497 pend->fence.timestamp = ktime_get(); 3498 } else { 3499 *status = HL_WAIT_CS_STATUS_BUSY; 3500 } 3501 3502 if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED)) 3503 goto remove_pending_user_interrupt; 3504 3505 wait_again: 3506 /* Wait for interrupt handler to signal completion */ 3507 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion, 3508 timeout); 3509 3510 /* If timeout did not expire we need to perform the comparison. 3511 * If comparison fails, keep waiting until timeout expires 3512 */ 3513 if (completion_rc > 0) { 3514 spin_lock(&interrupt->wait_list_lock); 3515 /* reinit_completion must be called before we check for user 3516 * completion value, otherwise, if interrupt is received after 3517 * the comparison and before the next wait_for_completion, 3518 * we will reach timeout and fail 3519 */ 3520 reinit_completion(&pend->fence.completion); 3521 spin_unlock(&interrupt->wait_list_lock); 3522 3523 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) { 3524 dev_err(hdev->dev, "Failed to copy completion value from user\n"); 3525 rc = -EFAULT; 3526 3527 goto remove_pending_user_interrupt; 3528 } 3529 3530 if (completion_value >= target_value) { 3531 *status = HL_WAIT_CS_STATUS_COMPLETED; 3532 } else if (pend->fence.error) { 3533 dev_err_ratelimited(hdev->dev, 3534 "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n", 3535 pend->fence.error); 3536 /* set the command completion status as ABORTED */ 3537 *status = HL_WAIT_CS_STATUS_ABORTED; 3538 } else { 3539 timeout = completion_rc; 3540 goto wait_again; 3541 } 3542 } else if (completion_rc == -ERESTARTSYS) { 3543 dev_err_ratelimited(hdev->dev, 3544 "user process got signal while waiting for interrupt ID %d\n", 3545 interrupt->interrupt_id); 3546 rc = -EINTR; 3547 } else { 3548 /* The wait has timed-out. We don't know anything beyond that 3549 * because the workload wasn't submitted through the driver. 3550 * Therefore, from driver's perspective, the workload is still 3551 * executing. 3552 */ 3553 rc = 0; 3554 *status = HL_WAIT_CS_STATUS_BUSY; 3555 } 3556 3557 remove_pending_user_interrupt: 3558 spin_lock(&interrupt->wait_list_lock); 3559 list_del(&pend->wait_list_node); 3560 spin_unlock(&interrupt->wait_list_lock); 3561 3562 *timestamp = ktime_to_ns(pend->fence.timestamp); 3563 3564 kfree(pend); 3565 hl_ctx_put(ctx); 3566 3567 return rc; 3568 } 3569 3570 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data) 3571 { 3572 u16 interrupt_id, first_interrupt, last_interrupt; 3573 struct hl_device *hdev = hpriv->hdev; 3574 struct asic_fixed_properties *prop; 3575 struct hl_user_interrupt *interrupt; 3576 union hl_wait_cs_args *args = data; 3577 u32 status = HL_WAIT_CS_STATUS_BUSY; 3578 u64 timestamp = 0; 3579 int rc, int_idx; 3580 3581 prop = &hdev->asic_prop; 3582 3583 if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) { 3584 dev_err(hdev->dev, "no user interrupts allowed"); 3585 return -EPERM; 3586 } 3587 3588 interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags); 3589 3590 first_interrupt = prop->first_available_user_interrupt; 3591 last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1; 3592 3593 if (interrupt_id < prop->user_dec_intr_count) { 3594 3595 /* Check if the requested core is enabled */ 3596 if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) { 3597 dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed", 3598 interrupt_id); 3599 return -EINVAL; 3600 } 3601 3602 interrupt = &hdev->user_interrupt[interrupt_id]; 3603 3604 } else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) { 3605 3606 int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count; 3607 interrupt = &hdev->user_interrupt[int_idx]; 3608 3609 } else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) { 3610 interrupt = &hdev->common_user_cq_interrupt; 3611 } else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) { 3612 interrupt = &hdev->common_decoder_interrupt; 3613 } else { 3614 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id); 3615 return -EINVAL; 3616 } 3617 3618 if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ) 3619 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr, 3620 args->in.interrupt_timeout_us, args->in.cq_counters_handle, 3621 args->in.cq_counters_offset, 3622 args->in.target, interrupt, 3623 !!(args->in.flags & HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT), 3624 args->in.timestamp_handle, args->in.timestamp_offset, 3625 &status, ×tamp); 3626 else 3627 rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx, 3628 args->in.interrupt_timeout_us, args->in.addr, 3629 args->in.target, interrupt, &status, 3630 ×tamp); 3631 if (rc) 3632 return rc; 3633 3634 memset(args, 0, sizeof(*args)); 3635 args->out.status = status; 3636 3637 if (timestamp) { 3638 args->out.timestamp_nsec = timestamp; 3639 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD; 3640 } 3641 3642 return 0; 3643 } 3644 3645 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data) 3646 { 3647 struct hl_device *hdev = hpriv->hdev; 3648 union hl_wait_cs_args *args = data; 3649 u32 flags = args->in.flags; 3650 int rc; 3651 3652 /* If the device is not operational, or if an error has happened and user should release the 3653 * device, there is no point in waiting for any command submission or user interrupt. 3654 */ 3655 if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active) 3656 return -EBUSY; 3657 3658 if (flags & HL_WAIT_CS_FLAGS_INTERRUPT) 3659 rc = hl_interrupt_wait_ioctl(hpriv, data); 3660 else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS) 3661 rc = hl_multi_cs_wait_ioctl(hpriv, data); 3662 else 3663 rc = hl_cs_wait_ioctl(hpriv, data); 3664 3665 return rc; 3666 } 3667