1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/ratelimit.h> 26 #include <linux/printk.h> 27 #include <linux/slab.h> 28 #include <linux/list.h> 29 #include <linux/types.h> 30 #include <linux/bitops.h> 31 #include <linux/sched.h> 32 #include "kfd_priv.h" 33 #include "kfd_device_queue_manager.h" 34 #include "kfd_mqd_manager.h" 35 #include "cik_regs.h" 36 #include "kfd_kernel_queue.h" 37 #include "amdgpu_amdkfd.h" 38 #include "mes_api_def.h" 39 #include "kfd_debug.h" 40 41 /* Size of the per-pipe EOP queue */ 42 #define CIK_HPD_EOP_BYTES_LOG2 11 43 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2) 44 45 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, 46 u32 pasid, unsigned int vmid); 47 48 static int execute_queues_cpsch(struct device_queue_manager *dqm, 49 enum kfd_unmap_queues_filter filter, 50 uint32_t filter_param, 51 uint32_t grace_period); 52 static int unmap_queues_cpsch(struct device_queue_manager *dqm, 53 enum kfd_unmap_queues_filter filter, 54 uint32_t filter_param, 55 uint32_t grace_period, 56 bool reset); 57 58 static int map_queues_cpsch(struct device_queue_manager *dqm); 59 60 static void deallocate_sdma_queue(struct device_queue_manager *dqm, 61 struct queue *q); 62 63 static inline void deallocate_hqd(struct device_queue_manager *dqm, 64 struct queue *q); 65 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q); 66 static int allocate_sdma_queue(struct device_queue_manager *dqm, 67 struct queue *q, const uint32_t *restore_sdma_id); 68 static void kfd_process_hw_exception(struct work_struct *work); 69 70 static inline 71 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) 72 { 73 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI) 74 return KFD_MQD_TYPE_SDMA; 75 return KFD_MQD_TYPE_CP; 76 } 77 78 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) 79 { 80 int i; 81 int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec 82 + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe; 83 84 /* queue is available for KFD usage if bit is 1 */ 85 for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i) 86 if (test_bit(pipe_offset + i, 87 dqm->dev->kfd->shared_resources.cp_queue_bitmap)) 88 return true; 89 return false; 90 } 91 92 unsigned int get_cp_queues_num(struct device_queue_manager *dqm) 93 { 94 return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap, 95 KGD_MAX_QUEUES); 96 } 97 98 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm) 99 { 100 return dqm->dev->kfd->shared_resources.num_queue_per_pipe; 101 } 102 103 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) 104 { 105 return dqm->dev->kfd->shared_resources.num_pipe_per_mec; 106 } 107 108 static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) 109 { 110 return kfd_get_num_sdma_engines(dqm->dev) + 111 kfd_get_num_xgmi_sdma_engines(dqm->dev); 112 } 113 114 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) 115 { 116 return kfd_get_num_sdma_engines(dqm->dev) * 117 dqm->dev->kfd->device_info.num_sdma_queues_per_engine; 118 } 119 120 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) 121 { 122 return kfd_get_num_xgmi_sdma_engines(dqm->dev) * 123 dqm->dev->kfd->device_info.num_sdma_queues_per_engine; 124 } 125 126 static void init_sdma_bitmaps(struct device_queue_manager *dqm) 127 { 128 bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES); 129 bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm)); 130 131 bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); 132 bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm)); 133 134 /* Mask out the reserved queues */ 135 bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap, 136 dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap, 137 KFD_MAX_SDMA_QUEUES); 138 } 139 140 void program_sh_mem_settings(struct device_queue_manager *dqm, 141 struct qcm_process_device *qpd) 142 { 143 uint32_t xcc_mask = dqm->dev->xcc_mask; 144 int xcc_id; 145 146 for_each_inst(xcc_id, xcc_mask) 147 dqm->dev->kfd2kgd->program_sh_mem_settings( 148 dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, 149 qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, 150 qpd->sh_mem_bases, xcc_id); 151 } 152 153 static void kfd_hws_hang(struct device_queue_manager *dqm) 154 { 155 /* 156 * Issue a GPU reset if HWS is unresponsive 157 */ 158 dqm->is_hws_hang = true; 159 160 /* It's possible we're detecting a HWS hang in the 161 * middle of a GPU reset. No need to schedule another 162 * reset in this case. 163 */ 164 if (!dqm->is_resetting) 165 schedule_work(&dqm->hw_exception_work); 166 } 167 168 static int convert_to_mes_queue_type(int queue_type) 169 { 170 int mes_queue_type; 171 172 switch (queue_type) { 173 case KFD_QUEUE_TYPE_COMPUTE: 174 mes_queue_type = MES_QUEUE_TYPE_COMPUTE; 175 break; 176 case KFD_QUEUE_TYPE_SDMA: 177 mes_queue_type = MES_QUEUE_TYPE_SDMA; 178 break; 179 default: 180 WARN(1, "Invalid queue type %d", queue_type); 181 mes_queue_type = -EINVAL; 182 break; 183 } 184 185 return mes_queue_type; 186 } 187 188 static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, 189 struct qcm_process_device *qpd) 190 { 191 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; 192 struct kfd_process_device *pdd = qpd_to_pdd(qpd); 193 struct mes_add_queue_input queue_input; 194 int r, queue_type; 195 uint64_t wptr_addr_off; 196 197 if (dqm->is_hws_hang) 198 return -EIO; 199 200 memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); 201 queue_input.process_id = qpd->pqm->process->pasid; 202 queue_input.page_table_base_addr = qpd->page_table_base; 203 queue_input.process_va_start = 0; 204 queue_input.process_va_end = adev->vm_manager.max_pfn - 1; 205 /* MES unit for quantum is 100ns */ 206 queue_input.process_quantum = KFD_MES_PROCESS_QUANTUM; /* Equivalent to 10ms. */ 207 queue_input.process_context_addr = pdd->proc_ctx_gpu_addr; 208 queue_input.gang_quantum = KFD_MES_GANG_QUANTUM; /* Equivalent to 1ms */ 209 queue_input.gang_context_addr = q->gang_ctx_gpu_addr; 210 queue_input.inprocess_gang_priority = q->properties.priority; 211 queue_input.gang_global_priority_level = 212 AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 213 queue_input.doorbell_offset = q->properties.doorbell_off; 214 queue_input.mqd_addr = q->gart_mqd_addr; 215 queue_input.wptr_addr = (uint64_t)q->properties.write_ptr; 216 217 if (q->wptr_bo) { 218 wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1); 219 queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off; 220 } 221 222 queue_input.is_kfd_process = 1; 223 queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL); 224 queue_input.queue_size = q->properties.queue_size >> 2; 225 226 queue_input.paging = false; 227 queue_input.tba_addr = qpd->tba_addr; 228 queue_input.tma_addr = qpd->tma_addr; 229 queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device); 230 queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled || 231 kfd_dbg_has_ttmps_always_setup(q->device); 232 233 queue_type = convert_to_mes_queue_type(q->properties.type); 234 if (queue_type < 0) { 235 pr_err("Queue type not supported with MES, queue:%d\n", 236 q->properties.type); 237 return -EINVAL; 238 } 239 queue_input.queue_type = (uint32_t)queue_type; 240 241 queue_input.exclusively_scheduled = q->properties.is_gws; 242 243 amdgpu_mes_lock(&adev->mes); 244 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); 245 amdgpu_mes_unlock(&adev->mes); 246 if (r) { 247 pr_err("failed to add hardware queue to MES, doorbell=0x%x\n", 248 q->properties.doorbell_off); 249 pr_err("MES might be in unrecoverable state, issue a GPU reset\n"); 250 kfd_hws_hang(dqm); 251 } 252 253 return r; 254 } 255 256 static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q, 257 struct qcm_process_device *qpd) 258 { 259 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev; 260 int r; 261 struct mes_remove_queue_input queue_input; 262 263 if (dqm->is_hws_hang) 264 return -EIO; 265 266 memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input)); 267 queue_input.doorbell_offset = q->properties.doorbell_off; 268 queue_input.gang_context_addr = q->gang_ctx_gpu_addr; 269 270 amdgpu_mes_lock(&adev->mes); 271 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); 272 amdgpu_mes_unlock(&adev->mes); 273 274 if (r) { 275 pr_err("failed to remove hardware queue from MES, doorbell=0x%x\n", 276 q->properties.doorbell_off); 277 pr_err("MES might be in unrecoverable state, issue a GPU reset\n"); 278 kfd_hws_hang(dqm); 279 } 280 281 return r; 282 } 283 284 static int remove_all_queues_mes(struct device_queue_manager *dqm) 285 { 286 struct device_process_node *cur; 287 struct qcm_process_device *qpd; 288 struct queue *q; 289 int retval = 0; 290 291 list_for_each_entry(cur, &dqm->queues, list) { 292 qpd = cur->qpd; 293 list_for_each_entry(q, &qpd->queues_list, list) { 294 if (q->properties.is_active) { 295 retval = remove_queue_mes(dqm, q, qpd); 296 if (retval) { 297 pr_err("%s: Failed to remove queue %d for dev %d", 298 __func__, 299 q->properties.queue_id, 300 dqm->dev->id); 301 return retval; 302 } 303 } 304 } 305 } 306 307 return retval; 308 } 309 310 static void increment_queue_count(struct device_queue_manager *dqm, 311 struct qcm_process_device *qpd, 312 struct queue *q) 313 { 314 dqm->active_queue_count++; 315 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 316 q->properties.type == KFD_QUEUE_TYPE_DIQ) 317 dqm->active_cp_queue_count++; 318 319 if (q->properties.is_gws) { 320 dqm->gws_queue_count++; 321 qpd->mapped_gws_queue = true; 322 } 323 } 324 325 static void decrement_queue_count(struct device_queue_manager *dqm, 326 struct qcm_process_device *qpd, 327 struct queue *q) 328 { 329 dqm->active_queue_count--; 330 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 331 q->properties.type == KFD_QUEUE_TYPE_DIQ) 332 dqm->active_cp_queue_count--; 333 334 if (q->properties.is_gws) { 335 dqm->gws_queue_count--; 336 qpd->mapped_gws_queue = false; 337 } 338 } 339 340 /* 341 * Allocate a doorbell ID to this queue. 342 * If doorbell_id is passed in, make sure requested ID is valid then allocate it. 343 */ 344 static int allocate_doorbell(struct qcm_process_device *qpd, 345 struct queue *q, 346 uint32_t const *restore_id) 347 { 348 struct kfd_node *dev = qpd->dqm->dev; 349 350 if (!KFD_IS_SOC15(dev)) { 351 /* On pre-SOC15 chips we need to use the queue ID to 352 * preserve the user mode ABI. 353 */ 354 355 if (restore_id && *restore_id != q->properties.queue_id) 356 return -EINVAL; 357 358 q->doorbell_id = q->properties.queue_id; 359 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 360 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 361 /* For SDMA queues on SOC15 with 8-byte doorbell, use static 362 * doorbell assignments based on the engine and queue id. 363 * The doobell index distance between RLC (2*i) and (2*i+1) 364 * for a SDMA engine is 512. 365 */ 366 367 uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx; 368 369 /* 370 * q->properties.sdma_engine_id corresponds to the virtual 371 * sdma engine number. However, for doorbell allocation, 372 * we need the physical sdma engine id in order to get the 373 * correct doorbell offset. 374 */ 375 uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id * 376 get_num_all_sdma_engines(qpd->dqm) + 377 q->properties.sdma_engine_id] 378 + (q->properties.sdma_queue_id & 1) 379 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET 380 + (q->properties.sdma_queue_id >> 1); 381 382 if (restore_id && *restore_id != valid_id) 383 return -EINVAL; 384 q->doorbell_id = valid_id; 385 } else { 386 /* For CP queues on SOC15 */ 387 if (restore_id) { 388 /* make sure that ID is free */ 389 if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap)) 390 return -EINVAL; 391 392 q->doorbell_id = *restore_id; 393 } else { 394 /* or reserve a free doorbell ID */ 395 unsigned int found; 396 397 found = find_first_zero_bit(qpd->doorbell_bitmap, 398 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 399 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { 400 pr_debug("No doorbells available"); 401 return -EBUSY; 402 } 403 set_bit(found, qpd->doorbell_bitmap); 404 q->doorbell_id = found; 405 } 406 } 407 408 q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(dev->adev, 409 qpd->proc_doorbells, 410 q->doorbell_id); 411 return 0; 412 } 413 414 static void deallocate_doorbell(struct qcm_process_device *qpd, 415 struct queue *q) 416 { 417 unsigned int old; 418 struct kfd_node *dev = qpd->dqm->dev; 419 420 if (!KFD_IS_SOC15(dev) || 421 q->properties.type == KFD_QUEUE_TYPE_SDMA || 422 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 423 return; 424 425 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap); 426 WARN_ON(!old); 427 } 428 429 static void program_trap_handler_settings(struct device_queue_manager *dqm, 430 struct qcm_process_device *qpd) 431 { 432 uint32_t xcc_mask = dqm->dev->xcc_mask; 433 int xcc_id; 434 435 if (dqm->dev->kfd2kgd->program_trap_handler_settings) 436 for_each_inst(xcc_id, xcc_mask) 437 dqm->dev->kfd2kgd->program_trap_handler_settings( 438 dqm->dev->adev, qpd->vmid, qpd->tba_addr, 439 qpd->tma_addr, xcc_id); 440 } 441 442 static int allocate_vmid(struct device_queue_manager *dqm, 443 struct qcm_process_device *qpd, 444 struct queue *q) 445 { 446 int allocated_vmid = -1, i; 447 448 for (i = dqm->dev->vm_info.first_vmid_kfd; 449 i <= dqm->dev->vm_info.last_vmid_kfd; i++) { 450 if (!dqm->vmid_pasid[i]) { 451 allocated_vmid = i; 452 break; 453 } 454 } 455 456 if (allocated_vmid < 0) { 457 pr_err("no more vmid to allocate\n"); 458 return -ENOSPC; 459 } 460 461 pr_debug("vmid allocated: %d\n", allocated_vmid); 462 463 dqm->vmid_pasid[allocated_vmid] = q->process->pasid; 464 465 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid); 466 467 qpd->vmid = allocated_vmid; 468 q->properties.vmid = allocated_vmid; 469 470 program_sh_mem_settings(dqm, qpd); 471 472 if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled) 473 program_trap_handler_settings(dqm, qpd); 474 475 /* qpd->page_table_base is set earlier when register_process() 476 * is called, i.e. when the first queue is created. 477 */ 478 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev, 479 qpd->vmid, 480 qpd->page_table_base); 481 /* invalidate the VM context after pasid and vmid mapping is set up */ 482 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY); 483 484 if (dqm->dev->kfd2kgd->set_scratch_backing_va) 485 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev, 486 qpd->sh_hidden_private_base, qpd->vmid); 487 488 return 0; 489 } 490 491 static int flush_texture_cache_nocpsch(struct kfd_node *kdev, 492 struct qcm_process_device *qpd) 493 { 494 const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf; 495 int ret; 496 497 if (!qpd->ib_kaddr) 498 return -ENOMEM; 499 500 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr); 501 if (ret) 502 return ret; 503 504 return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid, 505 qpd->ib_base, (uint32_t *)qpd->ib_kaddr, 506 pmf->release_mem_size / sizeof(uint32_t)); 507 } 508 509 static void deallocate_vmid(struct device_queue_manager *dqm, 510 struct qcm_process_device *qpd, 511 struct queue *q) 512 { 513 /* On GFX v7, CP doesn't flush TC at dequeue */ 514 if (q->device->adev->asic_type == CHIP_HAWAII) 515 if (flush_texture_cache_nocpsch(q->device, qpd)) 516 pr_err("Failed to flush TC\n"); 517 518 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY); 519 520 /* Release the vmid mapping */ 521 set_pasid_vmid_mapping(dqm, 0, qpd->vmid); 522 dqm->vmid_pasid[qpd->vmid] = 0; 523 524 qpd->vmid = 0; 525 q->properties.vmid = 0; 526 } 527 528 static int create_queue_nocpsch(struct device_queue_manager *dqm, 529 struct queue *q, 530 struct qcm_process_device *qpd, 531 const struct kfd_criu_queue_priv_data *qd, 532 const void *restore_mqd, const void *restore_ctl_stack) 533 { 534 struct mqd_manager *mqd_mgr; 535 int retval; 536 537 dqm_lock(dqm); 538 539 if (dqm->total_queue_count >= max_num_of_queues_per_device) { 540 pr_warn("Can't create new usermode queue because %d queues were already created\n", 541 dqm->total_queue_count); 542 retval = -EPERM; 543 goto out_unlock; 544 } 545 546 if (list_empty(&qpd->queues_list)) { 547 retval = allocate_vmid(dqm, qpd, q); 548 if (retval) 549 goto out_unlock; 550 } 551 q->properties.vmid = qpd->vmid; 552 /* 553 * Eviction state logic: mark all queues as evicted, even ones 554 * not currently active. Restoring inactive queues later only 555 * updates the is_evicted flag but is a no-op otherwise. 556 */ 557 q->properties.is_evicted = !!qpd->evicted; 558 559 q->properties.tba_addr = qpd->tba_addr; 560 q->properties.tma_addr = qpd->tma_addr; 561 562 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 563 q->properties.type)]; 564 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { 565 retval = allocate_hqd(dqm, q); 566 if (retval) 567 goto deallocate_vmid; 568 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n", 569 q->pipe, q->queue); 570 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 571 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 572 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL); 573 if (retval) 574 goto deallocate_vmid; 575 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); 576 } 577 578 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL); 579 if (retval) 580 goto out_deallocate_hqd; 581 582 /* Temporarily release dqm lock to avoid a circular lock dependency */ 583 dqm_unlock(dqm); 584 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties); 585 dqm_lock(dqm); 586 587 if (!q->mqd_mem_obj) { 588 retval = -ENOMEM; 589 goto out_deallocate_doorbell; 590 } 591 592 if (qd) 593 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, 594 &q->properties, restore_mqd, restore_ctl_stack, 595 qd->ctl_stack_size); 596 else 597 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, 598 &q->gart_mqd_addr, &q->properties); 599 600 if (q->properties.is_active) { 601 if (!dqm->sched_running) { 602 WARN_ONCE(1, "Load non-HWS mqd while stopped\n"); 603 goto add_queue_to_list; 604 } 605 606 if (WARN(q->process->mm != current->mm, 607 "should only run in user thread")) 608 retval = -EFAULT; 609 else 610 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, 611 q->queue, &q->properties, current->mm); 612 if (retval) 613 goto out_free_mqd; 614 } 615 616 add_queue_to_list: 617 list_add(&q->list, &qpd->queues_list); 618 qpd->queue_count++; 619 if (q->properties.is_active) 620 increment_queue_count(dqm, qpd, q); 621 622 /* 623 * Unconditionally increment this counter, regardless of the queue's 624 * type or whether the queue is active. 625 */ 626 dqm->total_queue_count++; 627 pr_debug("Total of %d queues are accountable so far\n", 628 dqm->total_queue_count); 629 goto out_unlock; 630 631 out_free_mqd: 632 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 633 out_deallocate_doorbell: 634 deallocate_doorbell(qpd, q); 635 out_deallocate_hqd: 636 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) 637 deallocate_hqd(dqm, q); 638 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 639 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 640 deallocate_sdma_queue(dqm, q); 641 deallocate_vmid: 642 if (list_empty(&qpd->queues_list)) 643 deallocate_vmid(dqm, qpd, q); 644 out_unlock: 645 dqm_unlock(dqm); 646 return retval; 647 } 648 649 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) 650 { 651 bool set; 652 int pipe, bit, i; 653 654 set = false; 655 656 for (pipe = dqm->next_pipe_to_allocate, i = 0; 657 i < get_pipes_per_mec(dqm); 658 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) { 659 660 if (!is_pipe_enabled(dqm, 0, pipe)) 661 continue; 662 663 if (dqm->allocated_queues[pipe] != 0) { 664 bit = ffs(dqm->allocated_queues[pipe]) - 1; 665 dqm->allocated_queues[pipe] &= ~(1 << bit); 666 q->pipe = pipe; 667 q->queue = bit; 668 set = true; 669 break; 670 } 671 } 672 673 if (!set) 674 return -EBUSY; 675 676 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue); 677 /* horizontal hqd allocation */ 678 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm); 679 680 return 0; 681 } 682 683 static inline void deallocate_hqd(struct device_queue_manager *dqm, 684 struct queue *q) 685 { 686 dqm->allocated_queues[q->pipe] |= (1 << q->queue); 687 } 688 689 #define SQ_IND_CMD_CMD_KILL 0x00000003 690 #define SQ_IND_CMD_MODE_BROADCAST 0x00000001 691 692 static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p) 693 { 694 int status = 0; 695 unsigned int vmid; 696 uint16_t queried_pasid; 697 union SQ_CMD_BITS reg_sq_cmd; 698 union GRBM_GFX_INDEX_BITS reg_gfx_index; 699 struct kfd_process_device *pdd; 700 int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; 701 int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; 702 uint32_t xcc_mask = dev->xcc_mask; 703 int xcc_id; 704 705 reg_sq_cmd.u32All = 0; 706 reg_gfx_index.u32All = 0; 707 708 pr_debug("Killing all process wavefronts\n"); 709 710 if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) { 711 pr_err("no vmid pasid mapping supported \n"); 712 return -EOPNOTSUPP; 713 } 714 715 /* Scan all registers in the range ATC_VMID8_PASID_MAPPING .. 716 * ATC_VMID15_PASID_MAPPING 717 * to check which VMID the current process is mapped to. 718 */ 719 720 for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { 721 status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info 722 (dev->adev, vmid, &queried_pasid); 723 724 if (status && queried_pasid == p->pasid) { 725 pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n", 726 vmid, p->pasid); 727 break; 728 } 729 } 730 731 if (vmid > last_vmid_to_scan) { 732 pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid); 733 return -EFAULT; 734 } 735 736 /* taking the VMID for that process on the safe way using PDD */ 737 pdd = kfd_get_process_device_data(dev, p); 738 if (!pdd) 739 return -EFAULT; 740 741 reg_gfx_index.bits.sh_broadcast_writes = 1; 742 reg_gfx_index.bits.se_broadcast_writes = 1; 743 reg_gfx_index.bits.instance_broadcast_writes = 1; 744 reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST; 745 reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL; 746 reg_sq_cmd.bits.vm_id = vmid; 747 748 for_each_inst(xcc_id, xcc_mask) 749 dev->kfd2kgd->wave_control_execute( 750 dev->adev, reg_gfx_index.u32All, 751 reg_sq_cmd.u32All, xcc_id); 752 753 return 0; 754 } 755 756 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked 757 * to avoid asynchronized access 758 */ 759 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, 760 struct qcm_process_device *qpd, 761 struct queue *q) 762 { 763 int retval; 764 struct mqd_manager *mqd_mgr; 765 766 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 767 q->properties.type)]; 768 769 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) 770 deallocate_hqd(dqm, q); 771 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 772 deallocate_sdma_queue(dqm, q); 773 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 774 deallocate_sdma_queue(dqm, q); 775 else { 776 pr_debug("q->properties.type %d is invalid\n", 777 q->properties.type); 778 return -EINVAL; 779 } 780 dqm->total_queue_count--; 781 782 deallocate_doorbell(qpd, q); 783 784 if (!dqm->sched_running) { 785 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n"); 786 return 0; 787 } 788 789 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, 790 KFD_PREEMPT_TYPE_WAVEFRONT_RESET, 791 KFD_UNMAP_LATENCY_MS, 792 q->pipe, q->queue); 793 if (retval == -ETIME) 794 qpd->reset_wavefronts = true; 795 796 list_del(&q->list); 797 if (list_empty(&qpd->queues_list)) { 798 if (qpd->reset_wavefronts) { 799 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n", 800 dqm->dev); 801 /* dbgdev_wave_reset_wavefronts has to be called before 802 * deallocate_vmid(), i.e. when vmid is still in use. 803 */ 804 dbgdev_wave_reset_wavefronts(dqm->dev, 805 qpd->pqm->process); 806 qpd->reset_wavefronts = false; 807 } 808 809 deallocate_vmid(dqm, qpd, q); 810 } 811 qpd->queue_count--; 812 if (q->properties.is_active) 813 decrement_queue_count(dqm, qpd, q); 814 815 return retval; 816 } 817 818 static int destroy_queue_nocpsch(struct device_queue_manager *dqm, 819 struct qcm_process_device *qpd, 820 struct queue *q) 821 { 822 int retval; 823 uint64_t sdma_val = 0; 824 struct kfd_process_device *pdd = qpd_to_pdd(qpd); 825 struct mqd_manager *mqd_mgr = 826 dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)]; 827 828 /* Get the SDMA queue stats */ 829 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || 830 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { 831 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr, 832 &sdma_val); 833 if (retval) 834 pr_err("Failed to read SDMA queue counter for queue: %d\n", 835 q->properties.queue_id); 836 } 837 838 dqm_lock(dqm); 839 retval = destroy_queue_nocpsch_locked(dqm, qpd, q); 840 if (!retval) 841 pdd->sdma_past_activity_counter += sdma_val; 842 dqm_unlock(dqm); 843 844 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 845 846 return retval; 847 } 848 849 static int update_queue(struct device_queue_manager *dqm, struct queue *q, 850 struct mqd_update_info *minfo) 851 { 852 int retval = 0; 853 struct mqd_manager *mqd_mgr; 854 struct kfd_process_device *pdd; 855 bool prev_active = false; 856 857 dqm_lock(dqm); 858 pdd = kfd_get_process_device_data(q->device, q->process); 859 if (!pdd) { 860 retval = -ENODEV; 861 goto out_unlock; 862 } 863 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 864 q->properties.type)]; 865 866 /* Save previous activity state for counters */ 867 prev_active = q->properties.is_active; 868 869 /* Make sure the queue is unmapped before updating the MQD */ 870 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { 871 if (!dqm->dev->kfd->shared_resources.enable_mes) 872 retval = unmap_queues_cpsch(dqm, 873 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); 874 else if (prev_active) 875 retval = remove_queue_mes(dqm, q, &pdd->qpd); 876 877 if (retval) { 878 pr_err("unmap queue failed\n"); 879 goto out_unlock; 880 } 881 } else if (prev_active && 882 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 883 q->properties.type == KFD_QUEUE_TYPE_SDMA || 884 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { 885 886 if (!dqm->sched_running) { 887 WARN_ONCE(1, "Update non-HWS queue while stopped\n"); 888 goto out_unlock; 889 } 890 891 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, 892 (dqm->dev->kfd->cwsr_enabled ? 893 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : 894 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), 895 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); 896 if (retval) { 897 pr_err("destroy mqd failed\n"); 898 goto out_unlock; 899 } 900 } 901 902 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo); 903 904 /* 905 * check active state vs. the previous state and modify 906 * counter accordingly. map_queues_cpsch uses the 907 * dqm->active_queue_count to determine whether a new runlist must be 908 * uploaded. 909 */ 910 if (q->properties.is_active && !prev_active) { 911 increment_queue_count(dqm, &pdd->qpd, q); 912 } else if (!q->properties.is_active && prev_active) { 913 decrement_queue_count(dqm, &pdd->qpd, q); 914 } else if (q->gws && !q->properties.is_gws) { 915 if (q->properties.is_active) { 916 dqm->gws_queue_count++; 917 pdd->qpd.mapped_gws_queue = true; 918 } 919 q->properties.is_gws = true; 920 } else if (!q->gws && q->properties.is_gws) { 921 if (q->properties.is_active) { 922 dqm->gws_queue_count--; 923 pdd->qpd.mapped_gws_queue = false; 924 } 925 q->properties.is_gws = false; 926 } 927 928 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { 929 if (!dqm->dev->kfd->shared_resources.enable_mes) 930 retval = map_queues_cpsch(dqm); 931 else if (q->properties.is_active) 932 retval = add_queue_mes(dqm, q, &pdd->qpd); 933 } else if (q->properties.is_active && 934 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 935 q->properties.type == KFD_QUEUE_TYPE_SDMA || 936 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { 937 if (WARN(q->process->mm != current->mm, 938 "should only run in user thread")) 939 retval = -EFAULT; 940 else 941 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 942 q->pipe, q->queue, 943 &q->properties, current->mm); 944 } 945 946 out_unlock: 947 dqm_unlock(dqm); 948 return retval; 949 } 950 951 /* suspend_single_queue does not lock the dqm like the 952 * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should 953 * lock the dqm before calling, and unlock after calling. 954 * 955 * The reason we don't lock the dqm is because this function may be 956 * called on multiple queues in a loop, so rather than locking/unlocking 957 * multiple times, we will just keep the dqm locked for all of the calls. 958 */ 959 static int suspend_single_queue(struct device_queue_manager *dqm, 960 struct kfd_process_device *pdd, 961 struct queue *q) 962 { 963 bool is_new; 964 965 if (q->properties.is_suspended) 966 return 0; 967 968 pr_debug("Suspending PASID %u queue [%i]\n", 969 pdd->process->pasid, 970 q->properties.queue_id); 971 972 is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW); 973 974 if (is_new || q->properties.is_being_destroyed) { 975 pr_debug("Suspend: skip %s queue id %i\n", 976 is_new ? "new" : "destroyed", 977 q->properties.queue_id); 978 return -EBUSY; 979 } 980 981 q->properties.is_suspended = true; 982 if (q->properties.is_active) { 983 if (dqm->dev->kfd->shared_resources.enable_mes) { 984 int r = remove_queue_mes(dqm, q, &pdd->qpd); 985 986 if (r) 987 return r; 988 } 989 990 decrement_queue_count(dqm, &pdd->qpd, q); 991 q->properties.is_active = false; 992 } 993 994 return 0; 995 } 996 997 /* resume_single_queue does not lock the dqm like the functions 998 * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should 999 * lock the dqm before calling, and unlock after calling. 1000 * 1001 * The reason we don't lock the dqm is because this function may be 1002 * called on multiple queues in a loop, so rather than locking/unlocking 1003 * multiple times, we will just keep the dqm locked for all of the calls. 1004 */ 1005 static int resume_single_queue(struct device_queue_manager *dqm, 1006 struct qcm_process_device *qpd, 1007 struct queue *q) 1008 { 1009 struct kfd_process_device *pdd; 1010 1011 if (!q->properties.is_suspended) 1012 return 0; 1013 1014 pdd = qpd_to_pdd(qpd); 1015 1016 pr_debug("Restoring from suspend PASID %u queue [%i]\n", 1017 pdd->process->pasid, 1018 q->properties.queue_id); 1019 1020 q->properties.is_suspended = false; 1021 1022 if (QUEUE_IS_ACTIVE(q->properties)) { 1023 if (dqm->dev->kfd->shared_resources.enable_mes) { 1024 int r = add_queue_mes(dqm, q, &pdd->qpd); 1025 1026 if (r) 1027 return r; 1028 } 1029 1030 q->properties.is_active = true; 1031 increment_queue_count(dqm, qpd, q); 1032 } 1033 1034 return 0; 1035 } 1036 1037 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, 1038 struct qcm_process_device *qpd) 1039 { 1040 struct queue *q; 1041 struct mqd_manager *mqd_mgr; 1042 struct kfd_process_device *pdd; 1043 int retval, ret = 0; 1044 1045 dqm_lock(dqm); 1046 if (qpd->evicted++ > 0) /* already evicted, do nothing */ 1047 goto out; 1048 1049 pdd = qpd_to_pdd(qpd); 1050 pr_debug_ratelimited("Evicting PASID 0x%x queues\n", 1051 pdd->process->pasid); 1052 1053 pdd->last_evict_timestamp = get_jiffies_64(); 1054 /* Mark all queues as evicted. Deactivate all active queues on 1055 * the qpd. 1056 */ 1057 list_for_each_entry(q, &qpd->queues_list, list) { 1058 q->properties.is_evicted = true; 1059 if (!q->properties.is_active) 1060 continue; 1061 1062 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 1063 q->properties.type)]; 1064 q->properties.is_active = false; 1065 decrement_queue_count(dqm, qpd, q); 1066 1067 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) 1068 continue; 1069 1070 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, 1071 (dqm->dev->kfd->cwsr_enabled ? 1072 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : 1073 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), 1074 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); 1075 if (retval && !ret) 1076 /* Return the first error, but keep going to 1077 * maintain a consistent eviction state 1078 */ 1079 ret = retval; 1080 } 1081 1082 out: 1083 dqm_unlock(dqm); 1084 return ret; 1085 } 1086 1087 static int evict_process_queues_cpsch(struct device_queue_manager *dqm, 1088 struct qcm_process_device *qpd) 1089 { 1090 struct queue *q; 1091 struct kfd_process_device *pdd; 1092 int retval = 0; 1093 1094 dqm_lock(dqm); 1095 if (qpd->evicted++ > 0) /* already evicted, do nothing */ 1096 goto out; 1097 1098 pdd = qpd_to_pdd(qpd); 1099 1100 /* The debugger creates processes that temporarily have not acquired 1101 * all VMs for all devices and has no VMs itself. 1102 * Skip queue eviction on process eviction. 1103 */ 1104 if (!pdd->drm_priv) 1105 goto out; 1106 1107 pr_debug_ratelimited("Evicting PASID 0x%x queues\n", 1108 pdd->process->pasid); 1109 1110 /* Mark all queues as evicted. Deactivate all active queues on 1111 * the qpd. 1112 */ 1113 list_for_each_entry(q, &qpd->queues_list, list) { 1114 q->properties.is_evicted = true; 1115 if (!q->properties.is_active) 1116 continue; 1117 1118 q->properties.is_active = false; 1119 decrement_queue_count(dqm, qpd, q); 1120 1121 if (dqm->dev->kfd->shared_resources.enable_mes) { 1122 retval = remove_queue_mes(dqm, q, qpd); 1123 if (retval) { 1124 pr_err("Failed to evict queue %d\n", 1125 q->properties.queue_id); 1126 goto out; 1127 } 1128 } 1129 } 1130 pdd->last_evict_timestamp = get_jiffies_64(); 1131 if (!dqm->dev->kfd->shared_resources.enable_mes) 1132 retval = execute_queues_cpsch(dqm, 1133 qpd->is_debug ? 1134 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : 1135 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, 1136 USE_DEFAULT_GRACE_PERIOD); 1137 1138 out: 1139 dqm_unlock(dqm); 1140 return retval; 1141 } 1142 1143 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, 1144 struct qcm_process_device *qpd) 1145 { 1146 struct mm_struct *mm = NULL; 1147 struct queue *q; 1148 struct mqd_manager *mqd_mgr; 1149 struct kfd_process_device *pdd; 1150 uint64_t pd_base; 1151 uint64_t eviction_duration; 1152 int retval, ret = 0; 1153 1154 pdd = qpd_to_pdd(qpd); 1155 /* Retrieve PD base */ 1156 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv); 1157 1158 dqm_lock(dqm); 1159 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ 1160 goto out; 1161 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ 1162 qpd->evicted--; 1163 goto out; 1164 } 1165 1166 pr_debug_ratelimited("Restoring PASID 0x%x queues\n", 1167 pdd->process->pasid); 1168 1169 /* Update PD Base in QPD */ 1170 qpd->page_table_base = pd_base; 1171 pr_debug("Updated PD address to 0x%llx\n", pd_base); 1172 1173 if (!list_empty(&qpd->queues_list)) { 1174 dqm->dev->kfd2kgd->set_vm_context_page_table_base( 1175 dqm->dev->adev, 1176 qpd->vmid, 1177 qpd->page_table_base); 1178 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY); 1179 } 1180 1181 /* Take a safe reference to the mm_struct, which may otherwise 1182 * disappear even while the kfd_process is still referenced. 1183 */ 1184 mm = get_task_mm(pdd->process->lead_thread); 1185 if (!mm) { 1186 ret = -EFAULT; 1187 goto out; 1188 } 1189 1190 /* Remove the eviction flags. Activate queues that are not 1191 * inactive for other reasons. 1192 */ 1193 list_for_each_entry(q, &qpd->queues_list, list) { 1194 q->properties.is_evicted = false; 1195 if (!QUEUE_IS_ACTIVE(q->properties)) 1196 continue; 1197 1198 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 1199 q->properties.type)]; 1200 q->properties.is_active = true; 1201 increment_queue_count(dqm, qpd, q); 1202 1203 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) 1204 continue; 1205 1206 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, 1207 q->queue, &q->properties, mm); 1208 if (retval && !ret) 1209 /* Return the first error, but keep going to 1210 * maintain a consistent eviction state 1211 */ 1212 ret = retval; 1213 } 1214 qpd->evicted = 0; 1215 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; 1216 atomic64_add(eviction_duration, &pdd->evict_duration_counter); 1217 out: 1218 if (mm) 1219 mmput(mm); 1220 dqm_unlock(dqm); 1221 return ret; 1222 } 1223 1224 static int restore_process_queues_cpsch(struct device_queue_manager *dqm, 1225 struct qcm_process_device *qpd) 1226 { 1227 struct queue *q; 1228 struct kfd_process_device *pdd; 1229 uint64_t eviction_duration; 1230 int retval = 0; 1231 1232 pdd = qpd_to_pdd(qpd); 1233 1234 dqm_lock(dqm); 1235 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ 1236 goto out; 1237 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ 1238 qpd->evicted--; 1239 goto out; 1240 } 1241 1242 /* The debugger creates processes that temporarily have not acquired 1243 * all VMs for all devices and has no VMs itself. 1244 * Skip queue restore on process restore. 1245 */ 1246 if (!pdd->drm_priv) 1247 goto vm_not_acquired; 1248 1249 pr_debug_ratelimited("Restoring PASID 0x%x queues\n", 1250 pdd->process->pasid); 1251 1252 /* Update PD Base in QPD */ 1253 qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv); 1254 pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base); 1255 1256 /* activate all active queues on the qpd */ 1257 list_for_each_entry(q, &qpd->queues_list, list) { 1258 q->properties.is_evicted = false; 1259 if (!QUEUE_IS_ACTIVE(q->properties)) 1260 continue; 1261 1262 q->properties.is_active = true; 1263 increment_queue_count(dqm, &pdd->qpd, q); 1264 1265 if (dqm->dev->kfd->shared_resources.enable_mes) { 1266 retval = add_queue_mes(dqm, q, qpd); 1267 if (retval) { 1268 pr_err("Failed to restore queue %d\n", 1269 q->properties.queue_id); 1270 goto out; 1271 } 1272 } 1273 } 1274 if (!dqm->dev->kfd->shared_resources.enable_mes) 1275 retval = execute_queues_cpsch(dqm, 1276 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); 1277 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; 1278 atomic64_add(eviction_duration, &pdd->evict_duration_counter); 1279 vm_not_acquired: 1280 qpd->evicted = 0; 1281 out: 1282 dqm_unlock(dqm); 1283 return retval; 1284 } 1285 1286 static int register_process(struct device_queue_manager *dqm, 1287 struct qcm_process_device *qpd) 1288 { 1289 struct device_process_node *n; 1290 struct kfd_process_device *pdd; 1291 uint64_t pd_base; 1292 int retval; 1293 1294 n = kzalloc(sizeof(*n), GFP_KERNEL); 1295 if (!n) 1296 return -ENOMEM; 1297 1298 n->qpd = qpd; 1299 1300 pdd = qpd_to_pdd(qpd); 1301 /* Retrieve PD base */ 1302 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv); 1303 1304 dqm_lock(dqm); 1305 list_add(&n->list, &dqm->queues); 1306 1307 /* Update PD Base in QPD */ 1308 qpd->page_table_base = pd_base; 1309 pr_debug("Updated PD address to 0x%llx\n", pd_base); 1310 1311 retval = dqm->asic_ops.update_qpd(dqm, qpd); 1312 1313 dqm->processes_count++; 1314 1315 dqm_unlock(dqm); 1316 1317 /* Outside the DQM lock because under the DQM lock we can't do 1318 * reclaim or take other locks that others hold while reclaiming. 1319 */ 1320 kfd_inc_compute_active(dqm->dev); 1321 1322 return retval; 1323 } 1324 1325 static int unregister_process(struct device_queue_manager *dqm, 1326 struct qcm_process_device *qpd) 1327 { 1328 int retval; 1329 struct device_process_node *cur, *next; 1330 1331 pr_debug("qpd->queues_list is %s\n", 1332 list_empty(&qpd->queues_list) ? "empty" : "not empty"); 1333 1334 retval = 0; 1335 dqm_lock(dqm); 1336 1337 list_for_each_entry_safe(cur, next, &dqm->queues, list) { 1338 if (qpd == cur->qpd) { 1339 list_del(&cur->list); 1340 kfree(cur); 1341 dqm->processes_count--; 1342 goto out; 1343 } 1344 } 1345 /* qpd not found in dqm list */ 1346 retval = 1; 1347 out: 1348 dqm_unlock(dqm); 1349 1350 /* Outside the DQM lock because under the DQM lock we can't do 1351 * reclaim or take other locks that others hold while reclaiming. 1352 */ 1353 if (!retval) 1354 kfd_dec_compute_active(dqm->dev); 1355 1356 return retval; 1357 } 1358 1359 static int 1360 set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, 1361 unsigned int vmid) 1362 { 1363 uint32_t xcc_mask = dqm->dev->xcc_mask; 1364 int xcc_id, ret; 1365 1366 for_each_inst(xcc_id, xcc_mask) { 1367 ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping( 1368 dqm->dev->adev, pasid, vmid, xcc_id); 1369 if (ret) 1370 break; 1371 } 1372 1373 return ret; 1374 } 1375 1376 static void init_interrupts(struct device_queue_manager *dqm) 1377 { 1378 uint32_t xcc_mask = dqm->dev->xcc_mask; 1379 unsigned int i, xcc_id; 1380 1381 for_each_inst(xcc_id, xcc_mask) { 1382 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { 1383 if (is_pipe_enabled(dqm, 0, i)) { 1384 dqm->dev->kfd2kgd->init_interrupts( 1385 dqm->dev->adev, i, xcc_id); 1386 } 1387 } 1388 } 1389 } 1390 1391 static int initialize_nocpsch(struct device_queue_manager *dqm) 1392 { 1393 int pipe, queue; 1394 1395 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); 1396 1397 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm), 1398 sizeof(unsigned int), GFP_KERNEL); 1399 if (!dqm->allocated_queues) 1400 return -ENOMEM; 1401 1402 mutex_init(&dqm->lock_hidden); 1403 INIT_LIST_HEAD(&dqm->queues); 1404 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0; 1405 dqm->active_cp_queue_count = 0; 1406 dqm->gws_queue_count = 0; 1407 1408 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { 1409 int pipe_offset = pipe * get_queues_per_pipe(dqm); 1410 1411 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) 1412 if (test_bit(pipe_offset + queue, 1413 dqm->dev->kfd->shared_resources.cp_queue_bitmap)) 1414 dqm->allocated_queues[pipe] |= 1 << queue; 1415 } 1416 1417 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid)); 1418 1419 init_sdma_bitmaps(dqm); 1420 1421 return 0; 1422 } 1423 1424 static void uninitialize(struct device_queue_manager *dqm) 1425 { 1426 int i; 1427 1428 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0); 1429 1430 kfree(dqm->allocated_queues); 1431 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) 1432 kfree(dqm->mqd_mgrs[i]); 1433 mutex_destroy(&dqm->lock_hidden); 1434 } 1435 1436 static int start_nocpsch(struct device_queue_manager *dqm) 1437 { 1438 int r = 0; 1439 1440 pr_info("SW scheduler is used"); 1441 init_interrupts(dqm); 1442 1443 if (dqm->dev->adev->asic_type == CHIP_HAWAII) 1444 r = pm_init(&dqm->packet_mgr, dqm); 1445 if (!r) 1446 dqm->sched_running = true; 1447 1448 return r; 1449 } 1450 1451 static int stop_nocpsch(struct device_queue_manager *dqm) 1452 { 1453 dqm_lock(dqm); 1454 if (!dqm->sched_running) { 1455 dqm_unlock(dqm); 1456 return 0; 1457 } 1458 1459 if (dqm->dev->adev->asic_type == CHIP_HAWAII) 1460 pm_uninit(&dqm->packet_mgr, false); 1461 dqm->sched_running = false; 1462 dqm_unlock(dqm); 1463 1464 return 0; 1465 } 1466 1467 static void pre_reset(struct device_queue_manager *dqm) 1468 { 1469 dqm_lock(dqm); 1470 dqm->is_resetting = true; 1471 dqm_unlock(dqm); 1472 } 1473 1474 static int allocate_sdma_queue(struct device_queue_manager *dqm, 1475 struct queue *q, const uint32_t *restore_sdma_id) 1476 { 1477 int bit; 1478 1479 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { 1480 if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { 1481 pr_err("No more SDMA queue to allocate\n"); 1482 return -ENOMEM; 1483 } 1484 1485 if (restore_sdma_id) { 1486 /* Re-use existing sdma_id */ 1487 if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) { 1488 pr_err("SDMA queue already in use\n"); 1489 return -EBUSY; 1490 } 1491 clear_bit(*restore_sdma_id, dqm->sdma_bitmap); 1492 q->sdma_id = *restore_sdma_id; 1493 } else { 1494 /* Find first available sdma_id */ 1495 bit = find_first_bit(dqm->sdma_bitmap, 1496 get_num_sdma_queues(dqm)); 1497 clear_bit(bit, dqm->sdma_bitmap); 1498 q->sdma_id = bit; 1499 } 1500 1501 q->properties.sdma_engine_id = 1502 q->sdma_id % kfd_get_num_sdma_engines(dqm->dev); 1503 q->properties.sdma_queue_id = q->sdma_id / 1504 kfd_get_num_sdma_engines(dqm->dev); 1505 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 1506 if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { 1507 pr_err("No more XGMI SDMA queue to allocate\n"); 1508 return -ENOMEM; 1509 } 1510 if (restore_sdma_id) { 1511 /* Re-use existing sdma_id */ 1512 if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) { 1513 pr_err("SDMA queue already in use\n"); 1514 return -EBUSY; 1515 } 1516 clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap); 1517 q->sdma_id = *restore_sdma_id; 1518 } else { 1519 bit = find_first_bit(dqm->xgmi_sdma_bitmap, 1520 get_num_xgmi_sdma_queues(dqm)); 1521 clear_bit(bit, dqm->xgmi_sdma_bitmap); 1522 q->sdma_id = bit; 1523 } 1524 /* sdma_engine_id is sdma id including 1525 * both PCIe-optimized SDMAs and XGMI- 1526 * optimized SDMAs. The calculation below 1527 * assumes the first N engines are always 1528 * PCIe-optimized ones 1529 */ 1530 q->properties.sdma_engine_id = 1531 kfd_get_num_sdma_engines(dqm->dev) + 1532 q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); 1533 q->properties.sdma_queue_id = q->sdma_id / 1534 kfd_get_num_xgmi_sdma_engines(dqm->dev); 1535 } 1536 1537 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); 1538 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id); 1539 1540 return 0; 1541 } 1542 1543 static void deallocate_sdma_queue(struct device_queue_manager *dqm, 1544 struct queue *q) 1545 { 1546 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { 1547 if (q->sdma_id >= get_num_sdma_queues(dqm)) 1548 return; 1549 set_bit(q->sdma_id, dqm->sdma_bitmap); 1550 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 1551 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm)) 1552 return; 1553 set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap); 1554 } 1555 } 1556 1557 /* 1558 * Device Queue Manager implementation for cp scheduler 1559 */ 1560 1561 static int set_sched_resources(struct device_queue_manager *dqm) 1562 { 1563 int i, mec; 1564 struct scheduling_resources res; 1565 1566 res.vmid_mask = dqm->dev->compute_vmid_bitmap; 1567 1568 res.queue_mask = 0; 1569 for (i = 0; i < KGD_MAX_QUEUES; ++i) { 1570 mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe) 1571 / dqm->dev->kfd->shared_resources.num_pipe_per_mec; 1572 1573 if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap)) 1574 continue; 1575 1576 /* only acquire queues from the first MEC */ 1577 if (mec > 0) 1578 continue; 1579 1580 /* This situation may be hit in the future if a new HW 1581 * generation exposes more than 64 queues. If so, the 1582 * definition of res.queue_mask needs updating 1583 */ 1584 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { 1585 pr_err("Invalid queue enabled by amdgpu: %d\n", i); 1586 break; 1587 } 1588 1589 res.queue_mask |= 1ull 1590 << amdgpu_queue_mask_bit_to_set_resource_bit( 1591 dqm->dev->adev, i); 1592 } 1593 res.gws_mask = ~0ull; 1594 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0; 1595 1596 pr_debug("Scheduling resources:\n" 1597 "vmid mask: 0x%8X\n" 1598 "queue mask: 0x%8llX\n", 1599 res.vmid_mask, res.queue_mask); 1600 1601 return pm_send_set_resources(&dqm->packet_mgr, &res); 1602 } 1603 1604 static int initialize_cpsch(struct device_queue_manager *dqm) 1605 { 1606 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); 1607 1608 mutex_init(&dqm->lock_hidden); 1609 INIT_LIST_HEAD(&dqm->queues); 1610 dqm->active_queue_count = dqm->processes_count = 0; 1611 dqm->active_cp_queue_count = 0; 1612 dqm->gws_queue_count = 0; 1613 dqm->active_runlist = false; 1614 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); 1615 dqm->trap_debug_vmid = 0; 1616 1617 init_sdma_bitmaps(dqm); 1618 1619 if (dqm->dev->kfd2kgd->get_iq_wait_times) 1620 dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev, 1621 &dqm->wait_times, 1622 ffs(dqm->dev->xcc_mask) - 1); 1623 return 0; 1624 } 1625 1626 static int start_cpsch(struct device_queue_manager *dqm) 1627 { 1628 int retval; 1629 1630 retval = 0; 1631 1632 dqm_lock(dqm); 1633 1634 if (!dqm->dev->kfd->shared_resources.enable_mes) { 1635 retval = pm_init(&dqm->packet_mgr, dqm); 1636 if (retval) 1637 goto fail_packet_manager_init; 1638 1639 retval = set_sched_resources(dqm); 1640 if (retval) 1641 goto fail_set_sched_resources; 1642 } 1643 pr_debug("Allocating fence memory\n"); 1644 1645 /* allocate fence memory on the gart */ 1646 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), 1647 &dqm->fence_mem); 1648 1649 if (retval) 1650 goto fail_allocate_vidmem; 1651 1652 dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr; 1653 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; 1654 1655 init_interrupts(dqm); 1656 1657 /* clear hang status when driver try to start the hw scheduler */ 1658 dqm->is_hws_hang = false; 1659 dqm->is_resetting = false; 1660 dqm->sched_running = true; 1661 1662 if (!dqm->dev->kfd->shared_resources.enable_mes) 1663 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); 1664 1665 /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */ 1666 if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu && 1667 (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) { 1668 uint32_t reg_offset = 0; 1669 uint32_t grace_period = 1; 1670 1671 retval = pm_update_grace_period(&dqm->packet_mgr, 1672 grace_period); 1673 if (retval) 1674 pr_err("Setting grace timeout failed\n"); 1675 else if (dqm->dev->kfd2kgd->build_grace_period_packet_info) 1676 /* Update dqm->wait_times maintained in software */ 1677 dqm->dev->kfd2kgd->build_grace_period_packet_info( 1678 dqm->dev->adev, dqm->wait_times, 1679 grace_period, ®_offset, 1680 &dqm->wait_times); 1681 } 1682 1683 dqm_unlock(dqm); 1684 1685 return 0; 1686 fail_allocate_vidmem: 1687 fail_set_sched_resources: 1688 if (!dqm->dev->kfd->shared_resources.enable_mes) 1689 pm_uninit(&dqm->packet_mgr, false); 1690 fail_packet_manager_init: 1691 dqm_unlock(dqm); 1692 return retval; 1693 } 1694 1695 static int stop_cpsch(struct device_queue_manager *dqm) 1696 { 1697 bool hanging; 1698 1699 dqm_lock(dqm); 1700 if (!dqm->sched_running) { 1701 dqm_unlock(dqm); 1702 return 0; 1703 } 1704 1705 if (!dqm->is_hws_hang) { 1706 if (!dqm->dev->kfd->shared_resources.enable_mes) 1707 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); 1708 else 1709 remove_all_queues_mes(dqm); 1710 } 1711 1712 hanging = dqm->is_hws_hang || dqm->is_resetting; 1713 dqm->sched_running = false; 1714 1715 if (!dqm->dev->kfd->shared_resources.enable_mes) 1716 pm_release_ib(&dqm->packet_mgr); 1717 1718 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); 1719 if (!dqm->dev->kfd->shared_resources.enable_mes) 1720 pm_uninit(&dqm->packet_mgr, hanging); 1721 dqm_unlock(dqm); 1722 1723 return 0; 1724 } 1725 1726 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, 1727 struct kernel_queue *kq, 1728 struct qcm_process_device *qpd) 1729 { 1730 dqm_lock(dqm); 1731 if (dqm->total_queue_count >= max_num_of_queues_per_device) { 1732 pr_warn("Can't create new kernel queue because %d queues were already created\n", 1733 dqm->total_queue_count); 1734 dqm_unlock(dqm); 1735 return -EPERM; 1736 } 1737 1738 /* 1739 * Unconditionally increment this counter, regardless of the queue's 1740 * type or whether the queue is active. 1741 */ 1742 dqm->total_queue_count++; 1743 pr_debug("Total of %d queues are accountable so far\n", 1744 dqm->total_queue_count); 1745 1746 list_add(&kq->list, &qpd->priv_queue_list); 1747 increment_queue_count(dqm, qpd, kq->queue); 1748 qpd->is_debug = true; 1749 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, 1750 USE_DEFAULT_GRACE_PERIOD); 1751 dqm_unlock(dqm); 1752 1753 return 0; 1754 } 1755 1756 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, 1757 struct kernel_queue *kq, 1758 struct qcm_process_device *qpd) 1759 { 1760 dqm_lock(dqm); 1761 list_del(&kq->list); 1762 decrement_queue_count(dqm, qpd, kq->queue); 1763 qpd->is_debug = false; 1764 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 1765 USE_DEFAULT_GRACE_PERIOD); 1766 /* 1767 * Unconditionally decrement this counter, regardless of the queue's 1768 * type. 1769 */ 1770 dqm->total_queue_count--; 1771 pr_debug("Total of %d queues are accountable so far\n", 1772 dqm->total_queue_count); 1773 dqm_unlock(dqm); 1774 } 1775 1776 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, 1777 struct qcm_process_device *qpd, 1778 const struct kfd_criu_queue_priv_data *qd, 1779 const void *restore_mqd, const void *restore_ctl_stack) 1780 { 1781 int retval; 1782 struct mqd_manager *mqd_mgr; 1783 1784 if (dqm->total_queue_count >= max_num_of_queues_per_device) { 1785 pr_warn("Can't create new usermode queue because %d queues were already created\n", 1786 dqm->total_queue_count); 1787 retval = -EPERM; 1788 goto out; 1789 } 1790 1791 if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 1792 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 1793 dqm_lock(dqm); 1794 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL); 1795 dqm_unlock(dqm); 1796 if (retval) 1797 goto out; 1798 } 1799 1800 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL); 1801 if (retval) 1802 goto out_deallocate_sdma_queue; 1803 1804 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 1805 q->properties.type)]; 1806 1807 if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 1808 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 1809 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); 1810 q->properties.tba_addr = qpd->tba_addr; 1811 q->properties.tma_addr = qpd->tma_addr; 1812 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties); 1813 if (!q->mqd_mem_obj) { 1814 retval = -ENOMEM; 1815 goto out_deallocate_doorbell; 1816 } 1817 1818 dqm_lock(dqm); 1819 /* 1820 * Eviction state logic: mark all queues as evicted, even ones 1821 * not currently active. Restoring inactive queues later only 1822 * updates the is_evicted flag but is a no-op otherwise. 1823 */ 1824 q->properties.is_evicted = !!qpd->evicted; 1825 q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled && 1826 kfd_dbg_has_cwsr_workaround(q->device); 1827 1828 if (qd) 1829 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, 1830 &q->properties, restore_mqd, restore_ctl_stack, 1831 qd->ctl_stack_size); 1832 else 1833 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, 1834 &q->gart_mqd_addr, &q->properties); 1835 1836 list_add(&q->list, &qpd->queues_list); 1837 qpd->queue_count++; 1838 1839 if (q->properties.is_active) { 1840 increment_queue_count(dqm, qpd, q); 1841 1842 if (!dqm->dev->kfd->shared_resources.enable_mes) 1843 retval = execute_queues_cpsch(dqm, 1844 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); 1845 else 1846 retval = add_queue_mes(dqm, q, qpd); 1847 if (retval) 1848 goto cleanup_queue; 1849 } 1850 1851 /* 1852 * Unconditionally increment this counter, regardless of the queue's 1853 * type or whether the queue is active. 1854 */ 1855 dqm->total_queue_count++; 1856 1857 pr_debug("Total of %d queues are accountable so far\n", 1858 dqm->total_queue_count); 1859 1860 dqm_unlock(dqm); 1861 return retval; 1862 1863 cleanup_queue: 1864 qpd->queue_count--; 1865 list_del(&q->list); 1866 if (q->properties.is_active) 1867 decrement_queue_count(dqm, qpd, q); 1868 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 1869 dqm_unlock(dqm); 1870 out_deallocate_doorbell: 1871 deallocate_doorbell(qpd, q); 1872 out_deallocate_sdma_queue: 1873 if (q->properties.type == KFD_QUEUE_TYPE_SDMA || 1874 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { 1875 dqm_lock(dqm); 1876 deallocate_sdma_queue(dqm, q); 1877 dqm_unlock(dqm); 1878 } 1879 out: 1880 return retval; 1881 } 1882 1883 int amdkfd_fence_wait_timeout(uint64_t *fence_addr, 1884 uint64_t fence_value, 1885 unsigned int timeout_ms) 1886 { 1887 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; 1888 1889 while (*fence_addr != fence_value) { 1890 if (time_after(jiffies, end_jiffies)) { 1891 pr_err("qcm fence wait loop timeout expired\n"); 1892 /* In HWS case, this is used to halt the driver thread 1893 * in order not to mess up CP states before doing 1894 * scandumps for FW debugging. 1895 */ 1896 while (halt_if_hws_hang) 1897 schedule(); 1898 1899 return -ETIME; 1900 } 1901 schedule(); 1902 } 1903 1904 return 0; 1905 } 1906 1907 /* dqm->lock mutex has to be locked before calling this function */ 1908 static int map_queues_cpsch(struct device_queue_manager *dqm) 1909 { 1910 int retval; 1911 1912 if (!dqm->sched_running) 1913 return 0; 1914 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0) 1915 return 0; 1916 if (dqm->active_runlist) 1917 return 0; 1918 1919 retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues); 1920 pr_debug("%s sent runlist\n", __func__); 1921 if (retval) { 1922 pr_err("failed to execute runlist\n"); 1923 return retval; 1924 } 1925 dqm->active_runlist = true; 1926 1927 return retval; 1928 } 1929 1930 /* dqm->lock mutex has to be locked before calling this function */ 1931 static int unmap_queues_cpsch(struct device_queue_manager *dqm, 1932 enum kfd_unmap_queues_filter filter, 1933 uint32_t filter_param, 1934 uint32_t grace_period, 1935 bool reset) 1936 { 1937 int retval = 0; 1938 struct mqd_manager *mqd_mgr; 1939 1940 if (!dqm->sched_running) 1941 return 0; 1942 if (dqm->is_hws_hang || dqm->is_resetting) 1943 return -EIO; 1944 if (!dqm->active_runlist) 1945 return retval; 1946 1947 if (grace_period != USE_DEFAULT_GRACE_PERIOD) { 1948 retval = pm_update_grace_period(&dqm->packet_mgr, grace_period); 1949 if (retval) 1950 return retval; 1951 } 1952 1953 retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset); 1954 if (retval) 1955 return retval; 1956 1957 *dqm->fence_addr = KFD_FENCE_INIT; 1958 pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr, 1959 KFD_FENCE_COMPLETED); 1960 /* should be timed out */ 1961 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, 1962 queue_preemption_timeout_ms); 1963 if (retval) { 1964 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n"); 1965 kfd_hws_hang(dqm); 1966 return retval; 1967 } 1968 1969 /* In the current MEC firmware implementation, if compute queue 1970 * doesn't response to the preemption request in time, HIQ will 1971 * abandon the unmap request without returning any timeout error 1972 * to driver. Instead, MEC firmware will log the doorbell of the 1973 * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields. 1974 * To make sure the queue unmap was successful, driver need to 1975 * check those fields 1976 */ 1977 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; 1978 if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) { 1979 pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n"); 1980 while (halt_if_hws_hang) 1981 schedule(); 1982 return -ETIME; 1983 } 1984 1985 /* We need to reset the grace period value for this device */ 1986 if (grace_period != USE_DEFAULT_GRACE_PERIOD) { 1987 if (pm_update_grace_period(&dqm->packet_mgr, 1988 USE_DEFAULT_GRACE_PERIOD)) 1989 pr_err("Failed to reset grace period\n"); 1990 } 1991 1992 pm_release_ib(&dqm->packet_mgr); 1993 dqm->active_runlist = false; 1994 1995 return retval; 1996 } 1997 1998 /* only for compute queue */ 1999 static int reset_queues_cpsch(struct device_queue_manager *dqm, 2000 uint16_t pasid) 2001 { 2002 int retval; 2003 2004 dqm_lock(dqm); 2005 2006 retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID, 2007 pasid, USE_DEFAULT_GRACE_PERIOD, true); 2008 2009 dqm_unlock(dqm); 2010 return retval; 2011 } 2012 2013 /* dqm->lock mutex has to be locked before calling this function */ 2014 static int execute_queues_cpsch(struct device_queue_manager *dqm, 2015 enum kfd_unmap_queues_filter filter, 2016 uint32_t filter_param, 2017 uint32_t grace_period) 2018 { 2019 int retval; 2020 2021 if (dqm->is_hws_hang) 2022 return -EIO; 2023 retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false); 2024 if (retval) 2025 return retval; 2026 2027 return map_queues_cpsch(dqm); 2028 } 2029 2030 static int wait_on_destroy_queue(struct device_queue_manager *dqm, 2031 struct queue *q) 2032 { 2033 struct kfd_process_device *pdd = kfd_get_process_device_data(q->device, 2034 q->process); 2035 int ret = 0; 2036 2037 if (pdd->qpd.is_debug) 2038 return ret; 2039 2040 q->properties.is_being_destroyed = true; 2041 2042 if (pdd->process->debug_trap_enabled && q->properties.is_suspended) { 2043 dqm_unlock(dqm); 2044 mutex_unlock(&q->process->mutex); 2045 ret = wait_event_interruptible(dqm->destroy_wait, 2046 !q->properties.is_suspended); 2047 2048 mutex_lock(&q->process->mutex); 2049 dqm_lock(dqm); 2050 } 2051 2052 return ret; 2053 } 2054 2055 static int destroy_queue_cpsch(struct device_queue_manager *dqm, 2056 struct qcm_process_device *qpd, 2057 struct queue *q) 2058 { 2059 int retval; 2060 struct mqd_manager *mqd_mgr; 2061 uint64_t sdma_val = 0; 2062 struct kfd_process_device *pdd = qpd_to_pdd(qpd); 2063 2064 /* Get the SDMA queue stats */ 2065 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || 2066 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { 2067 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr, 2068 &sdma_val); 2069 if (retval) 2070 pr_err("Failed to read SDMA queue counter for queue: %d\n", 2071 q->properties.queue_id); 2072 } 2073 2074 /* remove queue from list to prevent rescheduling after preemption */ 2075 dqm_lock(dqm); 2076 2077 retval = wait_on_destroy_queue(dqm, q); 2078 2079 if (retval) { 2080 dqm_unlock(dqm); 2081 return retval; 2082 } 2083 2084 if (qpd->is_debug) { 2085 /* 2086 * error, currently we do not allow to destroy a queue 2087 * of a currently debugged process 2088 */ 2089 retval = -EBUSY; 2090 goto failed_try_destroy_debugged_queue; 2091 2092 } 2093 2094 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 2095 q->properties.type)]; 2096 2097 deallocate_doorbell(qpd, q); 2098 2099 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || 2100 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { 2101 deallocate_sdma_queue(dqm, q); 2102 pdd->sdma_past_activity_counter += sdma_val; 2103 } 2104 2105 list_del(&q->list); 2106 qpd->queue_count--; 2107 if (q->properties.is_active) { 2108 decrement_queue_count(dqm, qpd, q); 2109 if (!dqm->dev->kfd->shared_resources.enable_mes) { 2110 retval = execute_queues_cpsch(dqm, 2111 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, 2112 USE_DEFAULT_GRACE_PERIOD); 2113 if (retval == -ETIME) 2114 qpd->reset_wavefronts = true; 2115 } else { 2116 retval = remove_queue_mes(dqm, q, qpd); 2117 } 2118 } 2119 2120 /* 2121 * Unconditionally decrement this counter, regardless of the queue's 2122 * type 2123 */ 2124 dqm->total_queue_count--; 2125 pr_debug("Total of %d queues are accountable so far\n", 2126 dqm->total_queue_count); 2127 2128 dqm_unlock(dqm); 2129 2130 /* 2131 * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid 2132 * circular locking 2133 */ 2134 kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE), 2135 qpd->pqm->process, q->device, 2136 -1, false, NULL, 0); 2137 2138 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 2139 2140 return retval; 2141 2142 failed_try_destroy_debugged_queue: 2143 2144 dqm_unlock(dqm); 2145 return retval; 2146 } 2147 2148 /* 2149 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to 2150 * stay in user mode. 2151 */ 2152 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL 2153 /* APE1 limit is inclusive and 64K aligned. */ 2154 #define APE1_LIMIT_ALIGNMENT 0xFFFF 2155 2156 static bool set_cache_memory_policy(struct device_queue_manager *dqm, 2157 struct qcm_process_device *qpd, 2158 enum cache_policy default_policy, 2159 enum cache_policy alternate_policy, 2160 void __user *alternate_aperture_base, 2161 uint64_t alternate_aperture_size) 2162 { 2163 bool retval = true; 2164 2165 if (!dqm->asic_ops.set_cache_memory_policy) 2166 return retval; 2167 2168 dqm_lock(dqm); 2169 2170 if (alternate_aperture_size == 0) { 2171 /* base > limit disables APE1 */ 2172 qpd->sh_mem_ape1_base = 1; 2173 qpd->sh_mem_ape1_limit = 0; 2174 } else { 2175 /* 2176 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, 2177 * SH_MEM_APE1_BASE[31:0], 0x0000 } 2178 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, 2179 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } 2180 * Verify that the base and size parameters can be 2181 * represented in this format and convert them. 2182 * Additionally restrict APE1 to user-mode addresses. 2183 */ 2184 2185 uint64_t base = (uintptr_t)alternate_aperture_base; 2186 uint64_t limit = base + alternate_aperture_size - 1; 2187 2188 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || 2189 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { 2190 retval = false; 2191 goto out; 2192 } 2193 2194 qpd->sh_mem_ape1_base = base >> 16; 2195 qpd->sh_mem_ape1_limit = limit >> 16; 2196 } 2197 2198 retval = dqm->asic_ops.set_cache_memory_policy( 2199 dqm, 2200 qpd, 2201 default_policy, 2202 alternate_policy, 2203 alternate_aperture_base, 2204 alternate_aperture_size); 2205 2206 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) 2207 program_sh_mem_settings(dqm, qpd); 2208 2209 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n", 2210 qpd->sh_mem_config, qpd->sh_mem_ape1_base, 2211 qpd->sh_mem_ape1_limit); 2212 2213 out: 2214 dqm_unlock(dqm); 2215 return retval; 2216 } 2217 2218 static int process_termination_nocpsch(struct device_queue_manager *dqm, 2219 struct qcm_process_device *qpd) 2220 { 2221 struct queue *q; 2222 struct device_process_node *cur, *next_dpn; 2223 int retval = 0; 2224 bool found = false; 2225 2226 dqm_lock(dqm); 2227 2228 /* Clear all user mode queues */ 2229 while (!list_empty(&qpd->queues_list)) { 2230 struct mqd_manager *mqd_mgr; 2231 int ret; 2232 2233 q = list_first_entry(&qpd->queues_list, struct queue, list); 2234 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 2235 q->properties.type)]; 2236 ret = destroy_queue_nocpsch_locked(dqm, qpd, q); 2237 if (ret) 2238 retval = ret; 2239 dqm_unlock(dqm); 2240 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 2241 dqm_lock(dqm); 2242 } 2243 2244 /* Unregister process */ 2245 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { 2246 if (qpd == cur->qpd) { 2247 list_del(&cur->list); 2248 kfree(cur); 2249 dqm->processes_count--; 2250 found = true; 2251 break; 2252 } 2253 } 2254 2255 dqm_unlock(dqm); 2256 2257 /* Outside the DQM lock because under the DQM lock we can't do 2258 * reclaim or take other locks that others hold while reclaiming. 2259 */ 2260 if (found) 2261 kfd_dec_compute_active(dqm->dev); 2262 2263 return retval; 2264 } 2265 2266 static int get_wave_state(struct device_queue_manager *dqm, 2267 struct queue *q, 2268 void __user *ctl_stack, 2269 u32 *ctl_stack_used_size, 2270 u32 *save_area_used_size) 2271 { 2272 struct mqd_manager *mqd_mgr; 2273 2274 dqm_lock(dqm); 2275 2276 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; 2277 2278 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || 2279 q->properties.is_active || !q->device->kfd->cwsr_enabled || 2280 !mqd_mgr->get_wave_state) { 2281 dqm_unlock(dqm); 2282 return -EINVAL; 2283 } 2284 2285 dqm_unlock(dqm); 2286 2287 /* 2288 * get_wave_state is outside the dqm lock to prevent circular locking 2289 * and the queue should be protected against destruction by the process 2290 * lock. 2291 */ 2292 return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties, 2293 ctl_stack, ctl_stack_used_size, save_area_used_size); 2294 } 2295 2296 static void get_queue_checkpoint_info(struct device_queue_manager *dqm, 2297 const struct queue *q, 2298 u32 *mqd_size, 2299 u32 *ctl_stack_size) 2300 { 2301 struct mqd_manager *mqd_mgr; 2302 enum KFD_MQD_TYPE mqd_type = 2303 get_mqd_type_from_queue_type(q->properties.type); 2304 2305 dqm_lock(dqm); 2306 mqd_mgr = dqm->mqd_mgrs[mqd_type]; 2307 *mqd_size = mqd_mgr->mqd_size; 2308 *ctl_stack_size = 0; 2309 2310 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info) 2311 mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size); 2312 2313 dqm_unlock(dqm); 2314 } 2315 2316 static int checkpoint_mqd(struct device_queue_manager *dqm, 2317 const struct queue *q, 2318 void *mqd, 2319 void *ctl_stack) 2320 { 2321 struct mqd_manager *mqd_mgr; 2322 int r = 0; 2323 enum KFD_MQD_TYPE mqd_type = 2324 get_mqd_type_from_queue_type(q->properties.type); 2325 2326 dqm_lock(dqm); 2327 2328 if (q->properties.is_active || !q->device->kfd->cwsr_enabled) { 2329 r = -EINVAL; 2330 goto dqm_unlock; 2331 } 2332 2333 mqd_mgr = dqm->mqd_mgrs[mqd_type]; 2334 if (!mqd_mgr->checkpoint_mqd) { 2335 r = -EOPNOTSUPP; 2336 goto dqm_unlock; 2337 } 2338 2339 mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack); 2340 2341 dqm_unlock: 2342 dqm_unlock(dqm); 2343 return r; 2344 } 2345 2346 static int process_termination_cpsch(struct device_queue_manager *dqm, 2347 struct qcm_process_device *qpd) 2348 { 2349 int retval; 2350 struct queue *q; 2351 struct kernel_queue *kq, *kq_next; 2352 struct mqd_manager *mqd_mgr; 2353 struct device_process_node *cur, *next_dpn; 2354 enum kfd_unmap_queues_filter filter = 2355 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES; 2356 bool found = false; 2357 2358 retval = 0; 2359 2360 dqm_lock(dqm); 2361 2362 /* Clean all kernel queues */ 2363 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { 2364 list_del(&kq->list); 2365 decrement_queue_count(dqm, qpd, kq->queue); 2366 qpd->is_debug = false; 2367 dqm->total_queue_count--; 2368 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; 2369 } 2370 2371 /* Clear all user mode queues */ 2372 list_for_each_entry(q, &qpd->queues_list, list) { 2373 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 2374 deallocate_sdma_queue(dqm, q); 2375 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 2376 deallocate_sdma_queue(dqm, q); 2377 2378 if (q->properties.is_active) { 2379 decrement_queue_count(dqm, qpd, q); 2380 2381 if (dqm->dev->kfd->shared_resources.enable_mes) { 2382 retval = remove_queue_mes(dqm, q, qpd); 2383 if (retval) 2384 pr_err("Failed to remove queue %d\n", 2385 q->properties.queue_id); 2386 } 2387 } 2388 2389 dqm->total_queue_count--; 2390 } 2391 2392 /* Unregister process */ 2393 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { 2394 if (qpd == cur->qpd) { 2395 list_del(&cur->list); 2396 kfree(cur); 2397 dqm->processes_count--; 2398 found = true; 2399 break; 2400 } 2401 } 2402 2403 if (!dqm->dev->kfd->shared_resources.enable_mes) 2404 retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD); 2405 2406 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { 2407 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev); 2408 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process); 2409 qpd->reset_wavefronts = false; 2410 } 2411 2412 /* Lastly, free mqd resources. 2413 * Do free_mqd() after dqm_unlock to avoid circular locking. 2414 */ 2415 while (!list_empty(&qpd->queues_list)) { 2416 q = list_first_entry(&qpd->queues_list, struct queue, list); 2417 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 2418 q->properties.type)]; 2419 list_del(&q->list); 2420 qpd->queue_count--; 2421 dqm_unlock(dqm); 2422 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 2423 dqm_lock(dqm); 2424 } 2425 dqm_unlock(dqm); 2426 2427 /* Outside the DQM lock because under the DQM lock we can't do 2428 * reclaim or take other locks that others hold while reclaiming. 2429 */ 2430 if (found) 2431 kfd_dec_compute_active(dqm->dev); 2432 2433 return retval; 2434 } 2435 2436 static int init_mqd_managers(struct device_queue_manager *dqm) 2437 { 2438 int i, j; 2439 struct mqd_manager *mqd_mgr; 2440 2441 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) { 2442 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev); 2443 if (!mqd_mgr) { 2444 pr_err("mqd manager [%d] initialization failed\n", i); 2445 goto out_free; 2446 } 2447 dqm->mqd_mgrs[i] = mqd_mgr; 2448 } 2449 2450 return 0; 2451 2452 out_free: 2453 for (j = 0; j < i; j++) { 2454 kfree(dqm->mqd_mgrs[j]); 2455 dqm->mqd_mgrs[j] = NULL; 2456 } 2457 2458 return -ENOMEM; 2459 } 2460 2461 /* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/ 2462 static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) 2463 { 2464 int retval; 2465 struct kfd_node *dev = dqm->dev; 2466 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; 2467 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * 2468 get_num_all_sdma_engines(dqm) * 2469 dev->kfd->device_info.num_sdma_queues_per_engine + 2470 (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * 2471 NUM_XCC(dqm->dev->xcc_mask)); 2472 2473 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, 2474 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), 2475 (void *)&(mem_obj->cpu_ptr), false); 2476 2477 return retval; 2478 } 2479 2480 struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) 2481 { 2482 struct device_queue_manager *dqm; 2483 2484 pr_debug("Loading device queue manager\n"); 2485 2486 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL); 2487 if (!dqm) 2488 return NULL; 2489 2490 switch (dev->adev->asic_type) { 2491 /* HWS is not available on Hawaii. */ 2492 case CHIP_HAWAII: 2493 /* HWS depends on CWSR for timely dequeue. CWSR is not 2494 * available on Tonga. 2495 * 2496 * FIXME: This argument also applies to Kaveri. 2497 */ 2498 case CHIP_TONGA: 2499 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS; 2500 break; 2501 default: 2502 dqm->sched_policy = sched_policy; 2503 break; 2504 } 2505 2506 dqm->dev = dev; 2507 switch (dqm->sched_policy) { 2508 case KFD_SCHED_POLICY_HWS: 2509 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: 2510 /* initialize dqm for cp scheduling */ 2511 dqm->ops.create_queue = create_queue_cpsch; 2512 dqm->ops.initialize = initialize_cpsch; 2513 dqm->ops.start = start_cpsch; 2514 dqm->ops.stop = stop_cpsch; 2515 dqm->ops.pre_reset = pre_reset; 2516 dqm->ops.destroy_queue = destroy_queue_cpsch; 2517 dqm->ops.update_queue = update_queue; 2518 dqm->ops.register_process = register_process; 2519 dqm->ops.unregister_process = unregister_process; 2520 dqm->ops.uninitialize = uninitialize; 2521 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch; 2522 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch; 2523 dqm->ops.set_cache_memory_policy = set_cache_memory_policy; 2524 dqm->ops.process_termination = process_termination_cpsch; 2525 dqm->ops.evict_process_queues = evict_process_queues_cpsch; 2526 dqm->ops.restore_process_queues = restore_process_queues_cpsch; 2527 dqm->ops.get_wave_state = get_wave_state; 2528 dqm->ops.reset_queues = reset_queues_cpsch; 2529 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info; 2530 dqm->ops.checkpoint_mqd = checkpoint_mqd; 2531 break; 2532 case KFD_SCHED_POLICY_NO_HWS: 2533 /* initialize dqm for no cp scheduling */ 2534 dqm->ops.start = start_nocpsch; 2535 dqm->ops.stop = stop_nocpsch; 2536 dqm->ops.pre_reset = pre_reset; 2537 dqm->ops.create_queue = create_queue_nocpsch; 2538 dqm->ops.destroy_queue = destroy_queue_nocpsch; 2539 dqm->ops.update_queue = update_queue; 2540 dqm->ops.register_process = register_process; 2541 dqm->ops.unregister_process = unregister_process; 2542 dqm->ops.initialize = initialize_nocpsch; 2543 dqm->ops.uninitialize = uninitialize; 2544 dqm->ops.set_cache_memory_policy = set_cache_memory_policy; 2545 dqm->ops.process_termination = process_termination_nocpsch; 2546 dqm->ops.evict_process_queues = evict_process_queues_nocpsch; 2547 dqm->ops.restore_process_queues = 2548 restore_process_queues_nocpsch; 2549 dqm->ops.get_wave_state = get_wave_state; 2550 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info; 2551 dqm->ops.checkpoint_mqd = checkpoint_mqd; 2552 break; 2553 default: 2554 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy); 2555 goto out_free; 2556 } 2557 2558 switch (dev->adev->asic_type) { 2559 case CHIP_KAVERI: 2560 case CHIP_HAWAII: 2561 device_queue_manager_init_cik(&dqm->asic_ops); 2562 break; 2563 2564 case CHIP_CARRIZO: 2565 case CHIP_TONGA: 2566 case CHIP_FIJI: 2567 case CHIP_POLARIS10: 2568 case CHIP_POLARIS11: 2569 case CHIP_POLARIS12: 2570 case CHIP_VEGAM: 2571 device_queue_manager_init_vi(&dqm->asic_ops); 2572 break; 2573 2574 default: 2575 if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0)) 2576 device_queue_manager_init_v11(&dqm->asic_ops); 2577 else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) 2578 device_queue_manager_init_v10(&dqm->asic_ops); 2579 else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) 2580 device_queue_manager_init_v9(&dqm->asic_ops); 2581 else { 2582 WARN(1, "Unexpected ASIC family %u", 2583 dev->adev->asic_type); 2584 goto out_free; 2585 } 2586 } 2587 2588 if (init_mqd_managers(dqm)) 2589 goto out_free; 2590 2591 if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { 2592 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n"); 2593 goto out_free; 2594 } 2595 2596 if (!dqm->ops.initialize(dqm)) { 2597 init_waitqueue_head(&dqm->destroy_wait); 2598 return dqm; 2599 } 2600 2601 out_free: 2602 kfree(dqm); 2603 return NULL; 2604 } 2605 2606 static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, 2607 struct kfd_mem_obj *mqd) 2608 { 2609 WARN(!mqd, "No hiq sdma mqd trunk to free"); 2610 2611 amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem); 2612 } 2613 2614 void device_queue_manager_uninit(struct device_queue_manager *dqm) 2615 { 2616 dqm->ops.stop(dqm); 2617 dqm->ops.uninitialize(dqm); 2618 if (!dqm->dev->kfd->shared_resources.enable_mes) 2619 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd); 2620 kfree(dqm); 2621 } 2622 2623 int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid) 2624 { 2625 struct kfd_process_device *pdd; 2626 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); 2627 int ret = 0; 2628 2629 if (!p) 2630 return -EINVAL; 2631 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); 2632 pdd = kfd_get_process_device_data(dqm->dev, p); 2633 if (pdd) 2634 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); 2635 kfd_unref_process(p); 2636 2637 return ret; 2638 } 2639 2640 static void kfd_process_hw_exception(struct work_struct *work) 2641 { 2642 struct device_queue_manager *dqm = container_of(work, 2643 struct device_queue_manager, hw_exception_work); 2644 amdgpu_amdkfd_gpu_reset(dqm->dev->adev); 2645 } 2646 2647 int reserve_debug_trap_vmid(struct device_queue_manager *dqm, 2648 struct qcm_process_device *qpd) 2649 { 2650 int r; 2651 int updated_vmid_mask; 2652 2653 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 2654 pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); 2655 return -EINVAL; 2656 } 2657 2658 dqm_lock(dqm); 2659 2660 if (dqm->trap_debug_vmid != 0) { 2661 pr_err("Trap debug id already reserved\n"); 2662 r = -EBUSY; 2663 goto out_unlock; 2664 } 2665 2666 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 2667 USE_DEFAULT_GRACE_PERIOD, false); 2668 if (r) 2669 goto out_unlock; 2670 2671 updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; 2672 updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd); 2673 2674 dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; 2675 dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd; 2676 r = set_sched_resources(dqm); 2677 if (r) 2678 goto out_unlock; 2679 2680 r = map_queues_cpsch(dqm); 2681 if (r) 2682 goto out_unlock; 2683 2684 pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid); 2685 2686 out_unlock: 2687 dqm_unlock(dqm); 2688 return r; 2689 } 2690 2691 /* 2692 * Releases vmid for the trap debugger 2693 */ 2694 int release_debug_trap_vmid(struct device_queue_manager *dqm, 2695 struct qcm_process_device *qpd) 2696 { 2697 int r; 2698 int updated_vmid_mask; 2699 uint32_t trap_debug_vmid; 2700 2701 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 2702 pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); 2703 return -EINVAL; 2704 } 2705 2706 dqm_lock(dqm); 2707 trap_debug_vmid = dqm->trap_debug_vmid; 2708 if (dqm->trap_debug_vmid == 0) { 2709 pr_err("Trap debug id is not reserved\n"); 2710 r = -EINVAL; 2711 goto out_unlock; 2712 } 2713 2714 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 2715 USE_DEFAULT_GRACE_PERIOD, false); 2716 if (r) 2717 goto out_unlock; 2718 2719 updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; 2720 updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd); 2721 2722 dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; 2723 dqm->trap_debug_vmid = 0; 2724 r = set_sched_resources(dqm); 2725 if (r) 2726 goto out_unlock; 2727 2728 r = map_queues_cpsch(dqm); 2729 if (r) 2730 goto out_unlock; 2731 2732 pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid); 2733 2734 out_unlock: 2735 dqm_unlock(dqm); 2736 return r; 2737 } 2738 2739 #define QUEUE_NOT_FOUND -1 2740 /* invalidate queue operation in array */ 2741 static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids) 2742 { 2743 int i; 2744 2745 for (i = 0; i < num_queues; i++) 2746 queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK; 2747 } 2748 2749 /* find queue index in array */ 2750 static int q_array_get_index(unsigned int queue_id, 2751 uint32_t num_queues, 2752 uint32_t *queue_ids) 2753 { 2754 int i; 2755 2756 for (i = 0; i < num_queues; i++) 2757 if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK)) 2758 return i; 2759 2760 return QUEUE_NOT_FOUND; 2761 } 2762 2763 struct copy_context_work_handler_workarea { 2764 struct work_struct copy_context_work; 2765 struct kfd_process *p; 2766 }; 2767 2768 static void copy_context_work_handler (struct work_struct *work) 2769 { 2770 struct copy_context_work_handler_workarea *workarea; 2771 struct mqd_manager *mqd_mgr; 2772 struct queue *q; 2773 struct mm_struct *mm; 2774 struct kfd_process *p; 2775 uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size; 2776 int i; 2777 2778 workarea = container_of(work, 2779 struct copy_context_work_handler_workarea, 2780 copy_context_work); 2781 2782 p = workarea->p; 2783 mm = get_task_mm(p->lead_thread); 2784 2785 if (!mm) 2786 return; 2787 2788 kthread_use_mm(mm); 2789 for (i = 0; i < p->n_pdds; i++) { 2790 struct kfd_process_device *pdd = p->pdds[i]; 2791 struct device_queue_manager *dqm = pdd->dev->dqm; 2792 struct qcm_process_device *qpd = &pdd->qpd; 2793 2794 list_for_each_entry(q, &qpd->queues_list, list) { 2795 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; 2796 2797 /* We ignore the return value from get_wave_state 2798 * because 2799 * i) right now, it always returns 0, and 2800 * ii) if we hit an error, we would continue to the 2801 * next queue anyway. 2802 */ 2803 mqd_mgr->get_wave_state(mqd_mgr, 2804 q->mqd, 2805 &q->properties, 2806 (void __user *) q->properties.ctx_save_restore_area_address, 2807 &tmp_ctl_stack_used_size, 2808 &tmp_save_area_used_size); 2809 } 2810 } 2811 kthread_unuse_mm(mm); 2812 mmput(mm); 2813 } 2814 2815 static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array) 2816 { 2817 size_t array_size = num_queues * sizeof(uint32_t); 2818 2819 if (!usr_queue_id_array) 2820 return NULL; 2821 2822 return memdup_user(usr_queue_id_array, array_size); 2823 } 2824 2825 int resume_queues(struct kfd_process *p, 2826 uint32_t num_queues, 2827 uint32_t *usr_queue_id_array) 2828 { 2829 uint32_t *queue_ids = NULL; 2830 int total_resumed = 0; 2831 int i; 2832 2833 if (usr_queue_id_array) { 2834 queue_ids = get_queue_ids(num_queues, usr_queue_id_array); 2835 2836 if (IS_ERR(queue_ids)) 2837 return PTR_ERR(queue_ids); 2838 2839 /* mask all queues as invalid. unmask per successful request */ 2840 q_array_invalidate(num_queues, queue_ids); 2841 } 2842 2843 for (i = 0; i < p->n_pdds; i++) { 2844 struct kfd_process_device *pdd = p->pdds[i]; 2845 struct device_queue_manager *dqm = pdd->dev->dqm; 2846 struct qcm_process_device *qpd = &pdd->qpd; 2847 struct queue *q; 2848 int r, per_device_resumed = 0; 2849 2850 dqm_lock(dqm); 2851 2852 /* unmask queues that resume or already resumed as valid */ 2853 list_for_each_entry(q, &qpd->queues_list, list) { 2854 int q_idx = QUEUE_NOT_FOUND; 2855 2856 if (queue_ids) 2857 q_idx = q_array_get_index( 2858 q->properties.queue_id, 2859 num_queues, 2860 queue_ids); 2861 2862 if (!queue_ids || q_idx != QUEUE_NOT_FOUND) { 2863 int err = resume_single_queue(dqm, &pdd->qpd, q); 2864 2865 if (queue_ids) { 2866 if (!err) { 2867 queue_ids[q_idx] &= 2868 ~KFD_DBG_QUEUE_INVALID_MASK; 2869 } else { 2870 queue_ids[q_idx] |= 2871 KFD_DBG_QUEUE_ERROR_MASK; 2872 break; 2873 } 2874 } 2875 2876 if (dqm->dev->kfd->shared_resources.enable_mes) { 2877 wake_up_all(&dqm->destroy_wait); 2878 if (!err) 2879 total_resumed++; 2880 } else { 2881 per_device_resumed++; 2882 } 2883 } 2884 } 2885 2886 if (!per_device_resumed) { 2887 dqm_unlock(dqm); 2888 continue; 2889 } 2890 2891 r = execute_queues_cpsch(dqm, 2892 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 2893 0, 2894 USE_DEFAULT_GRACE_PERIOD); 2895 if (r) { 2896 pr_err("Failed to resume process queues\n"); 2897 if (queue_ids) { 2898 list_for_each_entry(q, &qpd->queues_list, list) { 2899 int q_idx = q_array_get_index( 2900 q->properties.queue_id, 2901 num_queues, 2902 queue_ids); 2903 2904 /* mask queue as error on resume fail */ 2905 if (q_idx != QUEUE_NOT_FOUND) 2906 queue_ids[q_idx] |= 2907 KFD_DBG_QUEUE_ERROR_MASK; 2908 } 2909 } 2910 } else { 2911 wake_up_all(&dqm->destroy_wait); 2912 total_resumed += per_device_resumed; 2913 } 2914 2915 dqm_unlock(dqm); 2916 } 2917 2918 if (queue_ids) { 2919 if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, 2920 num_queues * sizeof(uint32_t))) 2921 pr_err("copy_to_user failed on queue resume\n"); 2922 2923 kfree(queue_ids); 2924 } 2925 2926 return total_resumed; 2927 } 2928 2929 int suspend_queues(struct kfd_process *p, 2930 uint32_t num_queues, 2931 uint32_t grace_period, 2932 uint64_t exception_clear_mask, 2933 uint32_t *usr_queue_id_array) 2934 { 2935 uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array); 2936 int total_suspended = 0; 2937 int i; 2938 2939 if (IS_ERR(queue_ids)) 2940 return PTR_ERR(queue_ids); 2941 2942 /* mask all queues as invalid. umask on successful request */ 2943 q_array_invalidate(num_queues, queue_ids); 2944 2945 for (i = 0; i < p->n_pdds; i++) { 2946 struct kfd_process_device *pdd = p->pdds[i]; 2947 struct device_queue_manager *dqm = pdd->dev->dqm; 2948 struct qcm_process_device *qpd = &pdd->qpd; 2949 struct queue *q; 2950 int r, per_device_suspended = 0; 2951 2952 mutex_lock(&p->event_mutex); 2953 dqm_lock(dqm); 2954 2955 /* unmask queues that suspend or already suspended */ 2956 list_for_each_entry(q, &qpd->queues_list, list) { 2957 int q_idx = q_array_get_index(q->properties.queue_id, 2958 num_queues, 2959 queue_ids); 2960 2961 if (q_idx != QUEUE_NOT_FOUND) { 2962 int err = suspend_single_queue(dqm, pdd, q); 2963 bool is_mes = dqm->dev->kfd->shared_resources.enable_mes; 2964 2965 if (!err) { 2966 queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK; 2967 if (exception_clear_mask && is_mes) 2968 q->properties.exception_status &= 2969 ~exception_clear_mask; 2970 2971 if (is_mes) 2972 total_suspended++; 2973 else 2974 per_device_suspended++; 2975 } else if (err != -EBUSY) { 2976 r = err; 2977 queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; 2978 break; 2979 } 2980 } 2981 } 2982 2983 if (!per_device_suspended) { 2984 dqm_unlock(dqm); 2985 mutex_unlock(&p->event_mutex); 2986 if (total_suspended) 2987 amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev); 2988 continue; 2989 } 2990 2991 r = execute_queues_cpsch(dqm, 2992 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, 2993 grace_period); 2994 2995 if (r) 2996 pr_err("Failed to suspend process queues.\n"); 2997 else 2998 total_suspended += per_device_suspended; 2999 3000 list_for_each_entry(q, &qpd->queues_list, list) { 3001 int q_idx = q_array_get_index(q->properties.queue_id, 3002 num_queues, queue_ids); 3003 3004 if (q_idx == QUEUE_NOT_FOUND) 3005 continue; 3006 3007 /* mask queue as error on suspend fail */ 3008 if (r) 3009 queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; 3010 else if (exception_clear_mask) 3011 q->properties.exception_status &= 3012 ~exception_clear_mask; 3013 } 3014 3015 dqm_unlock(dqm); 3016 mutex_unlock(&p->event_mutex); 3017 amdgpu_device_flush_hdp(dqm->dev->adev, NULL); 3018 } 3019 3020 if (total_suspended) { 3021 struct copy_context_work_handler_workarea copy_context_worker; 3022 3023 INIT_WORK_ONSTACK( 3024 ©_context_worker.copy_context_work, 3025 copy_context_work_handler); 3026 3027 copy_context_worker.p = p; 3028 3029 schedule_work(©_context_worker.copy_context_work); 3030 3031 3032 flush_work(©_context_worker.copy_context_work); 3033 destroy_work_on_stack(©_context_worker.copy_context_work); 3034 } 3035 3036 if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, 3037 num_queues * sizeof(uint32_t))) 3038 pr_err("copy_to_user failed on queue suspend\n"); 3039 3040 kfree(queue_ids); 3041 3042 return total_suspended; 3043 } 3044 3045 static uint32_t set_queue_type_for_user(struct queue_properties *q_props) 3046 { 3047 switch (q_props->type) { 3048 case KFD_QUEUE_TYPE_COMPUTE: 3049 return q_props->format == KFD_QUEUE_FORMAT_PM4 3050 ? KFD_IOC_QUEUE_TYPE_COMPUTE 3051 : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL; 3052 case KFD_QUEUE_TYPE_SDMA: 3053 return KFD_IOC_QUEUE_TYPE_SDMA; 3054 case KFD_QUEUE_TYPE_SDMA_XGMI: 3055 return KFD_IOC_QUEUE_TYPE_SDMA_XGMI; 3056 default: 3057 WARN_ONCE(true, "queue type not recognized!"); 3058 return 0xffffffff; 3059 }; 3060 } 3061 3062 void set_queue_snapshot_entry(struct queue *q, 3063 uint64_t exception_clear_mask, 3064 struct kfd_queue_snapshot_entry *qss_entry) 3065 { 3066 qss_entry->ring_base_address = q->properties.queue_address; 3067 qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr; 3068 qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr; 3069 qss_entry->ctx_save_restore_address = 3070 q->properties.ctx_save_restore_area_address; 3071 qss_entry->ctx_save_restore_area_size = 3072 q->properties.ctx_save_restore_area_size; 3073 qss_entry->exception_status = q->properties.exception_status; 3074 qss_entry->queue_id = q->properties.queue_id; 3075 qss_entry->gpu_id = q->device->id; 3076 qss_entry->ring_size = (uint32_t)q->properties.queue_size; 3077 qss_entry->queue_type = set_queue_type_for_user(&q->properties); 3078 q->properties.exception_status &= ~exception_clear_mask; 3079 } 3080 3081 int debug_lock_and_unmap(struct device_queue_manager *dqm) 3082 { 3083 int r; 3084 3085 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 3086 pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); 3087 return -EINVAL; 3088 } 3089 3090 if (!kfd_dbg_is_per_vmid_supported(dqm->dev)) 3091 return 0; 3092 3093 dqm_lock(dqm); 3094 3095 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false); 3096 if (r) 3097 dqm_unlock(dqm); 3098 3099 return r; 3100 } 3101 3102 int debug_map_and_unlock(struct device_queue_manager *dqm) 3103 { 3104 int r; 3105 3106 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 3107 pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); 3108 return -EINVAL; 3109 } 3110 3111 if (!kfd_dbg_is_per_vmid_supported(dqm->dev)) 3112 return 0; 3113 3114 r = map_queues_cpsch(dqm); 3115 3116 dqm_unlock(dqm); 3117 3118 return r; 3119 } 3120 3121 int debug_refresh_runlist(struct device_queue_manager *dqm) 3122 { 3123 int r = debug_lock_and_unmap(dqm); 3124 3125 if (r) 3126 return r; 3127 3128 return debug_map_and_unlock(dqm); 3129 } 3130 3131 #if defined(CONFIG_DEBUG_FS) 3132 3133 static void seq_reg_dump(struct seq_file *m, 3134 uint32_t (*dump)[2], uint32_t n_regs) 3135 { 3136 uint32_t i, count; 3137 3138 for (i = 0, count = 0; i < n_regs; i++) { 3139 if (count == 0 || 3140 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) { 3141 seq_printf(m, "%s %08x: %08x", 3142 i ? "\n" : "", 3143 dump[i][0], dump[i][1]); 3144 count = 7; 3145 } else { 3146 seq_printf(m, " %08x", dump[i][1]); 3147 count--; 3148 } 3149 } 3150 3151 seq_puts(m, "\n"); 3152 } 3153 3154 int dqm_debugfs_hqds(struct seq_file *m, void *data) 3155 { 3156 struct device_queue_manager *dqm = data; 3157 uint32_t xcc_mask = dqm->dev->xcc_mask; 3158 uint32_t (*dump)[2], n_regs; 3159 int pipe, queue; 3160 int r = 0, xcc_id; 3161 uint32_t sdma_engine_start; 3162 3163 if (!dqm->sched_running) { 3164 seq_puts(m, " Device is stopped\n"); 3165 return 0; 3166 } 3167 3168 for_each_inst(xcc_id, xcc_mask) { 3169 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, 3170 KFD_CIK_HIQ_PIPE, 3171 KFD_CIK_HIQ_QUEUE, &dump, 3172 &n_regs, xcc_id); 3173 if (!r) { 3174 seq_printf( 3175 m, 3176 " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n", 3177 xcc_id, 3178 KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1, 3179 KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm), 3180 KFD_CIK_HIQ_QUEUE); 3181 seq_reg_dump(m, dump, n_regs); 3182 3183 kfree(dump); 3184 } 3185 3186 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { 3187 int pipe_offset = pipe * get_queues_per_pipe(dqm); 3188 3189 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { 3190 if (!test_bit(pipe_offset + queue, 3191 dqm->dev->kfd->shared_resources.cp_queue_bitmap)) 3192 continue; 3193 3194 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, 3195 pipe, queue, 3196 &dump, &n_regs, 3197 xcc_id); 3198 if (r) 3199 break; 3200 3201 seq_printf(m, 3202 " Inst %d, CP Pipe %d, Queue %d\n", 3203 xcc_id, pipe, queue); 3204 seq_reg_dump(m, dump, n_regs); 3205 3206 kfree(dump); 3207 } 3208 } 3209 } 3210 3211 sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm); 3212 for (pipe = sdma_engine_start; 3213 pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm)); 3214 pipe++) { 3215 for (queue = 0; 3216 queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine; 3217 queue++) { 3218 r = dqm->dev->kfd2kgd->hqd_sdma_dump( 3219 dqm->dev->adev, pipe, queue, &dump, &n_regs); 3220 if (r) 3221 break; 3222 3223 seq_printf(m, " SDMA Engine %d, RLC %d\n", 3224 pipe, queue); 3225 seq_reg_dump(m, dump, n_regs); 3226 3227 kfree(dump); 3228 } 3229 } 3230 3231 return r; 3232 } 3233 3234 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm) 3235 { 3236 int r = 0; 3237 3238 dqm_lock(dqm); 3239 r = pm_debugfs_hang_hws(&dqm->packet_mgr); 3240 if (r) { 3241 dqm_unlock(dqm); 3242 return r; 3243 } 3244 dqm->active_runlist = true; 3245 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 3246 0, USE_DEFAULT_GRACE_PERIOD); 3247 dqm_unlock(dqm); 3248 3249 return r; 3250 } 3251 3252 #endif 3253