1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <drm/drm_exec.h> 26 27 #include "amdgpu_mes.h" 28 #include "amdgpu.h" 29 #include "soc15_common.h" 30 #include "amdgpu_mes_ctx.h" 31 32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 33 #define AMDGPU_ONE_DOORBELL_SIZE 8 34 35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) 36 { 37 return roundup(AMDGPU_ONE_DOORBELL_SIZE * 38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, 39 PAGE_SIZE); 40 } 41 42 int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev, 43 unsigned int *doorbell_index) 44 { 45 int r = ida_simple_get(&adev->mes.doorbell_ida, 2, 46 adev->mes.max_doorbell_slices, 47 GFP_KERNEL); 48 if (r > 0) 49 *doorbell_index = r; 50 51 return r; 52 } 53 54 void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev, 55 unsigned int doorbell_index) 56 { 57 if (doorbell_index) 58 ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index); 59 } 60 61 unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar( 62 struct amdgpu_device *adev, 63 uint32_t doorbell_index, 64 unsigned int doorbell_id) 65 { 66 return ((doorbell_index * 67 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) + 68 doorbell_id * 2); 69 } 70 71 static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev, 72 struct amdgpu_mes_process *process, 73 int ip_type, uint64_t *doorbell_index) 74 { 75 unsigned int offset, found; 76 77 if (ip_type == AMDGPU_RING_TYPE_SDMA) { 78 offset = adev->doorbell_index.sdma_engine[0]; 79 found = find_next_zero_bit(process->doorbell_bitmap, 80 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, 81 offset); 82 } else { 83 found = find_first_zero_bit(process->doorbell_bitmap, 84 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS); 85 } 86 87 if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) { 88 DRM_WARN("No doorbell available\n"); 89 return -ENOSPC; 90 } 91 92 set_bit(found, process->doorbell_bitmap); 93 94 *doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev, 95 process->doorbell_index, found); 96 97 return 0; 98 } 99 100 static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev, 101 struct amdgpu_mes_process *process, 102 uint32_t doorbell_index) 103 { 104 unsigned int old, doorbell_id; 105 106 doorbell_id = doorbell_index - 107 (process->doorbell_index * 108 amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32); 109 doorbell_id /= 2; 110 111 old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap); 112 WARN_ON(!old); 113 } 114 115 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) 116 { 117 size_t doorbell_start_offset; 118 size_t doorbell_aperture_size; 119 size_t doorbell_process_limit; 120 size_t aggregated_doorbell_start; 121 int i; 122 123 aggregated_doorbell_start = (adev->doorbell_index.max_assignment + 1) * sizeof(u32); 124 aggregated_doorbell_start = 125 roundup(aggregated_doorbell_start, PAGE_SIZE); 126 127 doorbell_start_offset = aggregated_doorbell_start + PAGE_SIZE; 128 doorbell_start_offset = 129 roundup(doorbell_start_offset, 130 amdgpu_mes_doorbell_process_slice(adev)); 131 132 doorbell_aperture_size = adev->doorbell.size; 133 doorbell_aperture_size = 134 rounddown(doorbell_aperture_size, 135 amdgpu_mes_doorbell_process_slice(adev)); 136 137 if (doorbell_aperture_size > doorbell_start_offset) 138 doorbell_process_limit = 139 (doorbell_aperture_size - doorbell_start_offset) / 140 amdgpu_mes_doorbell_process_slice(adev); 141 else 142 return -ENOSPC; 143 144 adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32); 145 adev->mes.max_doorbell_slices = doorbell_process_limit; 146 147 /* allocate Qword range for aggregated doorbell */ 148 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) 149 adev->mes.aggregated_doorbells[i] = 150 aggregated_doorbell_start / sizeof(u32) + i * 2; 151 152 DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit); 153 return 0; 154 } 155 156 int amdgpu_mes_init(struct amdgpu_device *adev) 157 { 158 int i, r; 159 160 adev->mes.adev = adev; 161 162 idr_init(&adev->mes.pasid_idr); 163 idr_init(&adev->mes.gang_id_idr); 164 idr_init(&adev->mes.queue_id_idr); 165 ida_init(&adev->mes.doorbell_ida); 166 spin_lock_init(&adev->mes.queue_id_lock); 167 spin_lock_init(&adev->mes.ring_lock); 168 mutex_init(&adev->mes.mutex_hidden); 169 170 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; 171 adev->mes.vmid_mask_mmhub = 0xffffff00; 172 adev->mes.vmid_mask_gfxhub = 0xffffff00; 173 174 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) { 175 /* use only 1st MEC pipes */ 176 if (i >= 4) 177 continue; 178 adev->mes.compute_hqd_mask[i] = 0xc; 179 } 180 181 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) 182 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe; 183 184 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { 185 if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0)) 186 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc; 187 /* zero sdma_hqd_mask for non-existent engine */ 188 else if (adev->sdma.num_instances == 1) 189 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc; 190 else 191 adev->mes.sdma_hqd_mask[i] = 0xfc; 192 } 193 194 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs); 195 if (r) { 196 dev_err(adev->dev, 197 "(%d) ring trail_fence_offs wb alloc failed\n", r); 198 goto error_ids; 199 } 200 adev->mes.sch_ctx_gpu_addr = 201 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4); 202 adev->mes.sch_ctx_ptr = 203 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs]; 204 205 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs); 206 if (r) { 207 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 208 dev_err(adev->dev, 209 "(%d) query_status_fence_offs wb alloc failed\n", r); 210 goto error_ids; 211 } 212 adev->mes.query_status_fence_gpu_addr = 213 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4); 214 adev->mes.query_status_fence_ptr = 215 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs]; 216 217 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs); 218 if (r) { 219 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 220 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 221 dev_err(adev->dev, 222 "(%d) read_val_offs alloc failed\n", r); 223 goto error_ids; 224 } 225 adev->mes.read_val_gpu_addr = 226 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4); 227 adev->mes.read_val_ptr = 228 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs]; 229 230 r = amdgpu_mes_doorbell_init(adev); 231 if (r) 232 goto error; 233 234 return 0; 235 236 error: 237 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 238 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 239 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); 240 error_ids: 241 idr_destroy(&adev->mes.pasid_idr); 242 idr_destroy(&adev->mes.gang_id_idr); 243 idr_destroy(&adev->mes.queue_id_idr); 244 ida_destroy(&adev->mes.doorbell_ida); 245 mutex_destroy(&adev->mes.mutex_hidden); 246 return r; 247 } 248 249 void amdgpu_mes_fini(struct amdgpu_device *adev) 250 { 251 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); 252 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); 253 amdgpu_device_wb_free(adev, adev->mes.read_val_offs); 254 255 idr_destroy(&adev->mes.pasid_idr); 256 idr_destroy(&adev->mes.gang_id_idr); 257 idr_destroy(&adev->mes.queue_id_idr); 258 ida_destroy(&adev->mes.doorbell_ida); 259 mutex_destroy(&adev->mes.mutex_hidden); 260 } 261 262 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q) 263 { 264 amdgpu_bo_free_kernel(&q->mqd_obj, 265 &q->mqd_gpu_addr, 266 &q->mqd_cpu_ptr); 267 } 268 269 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid, 270 struct amdgpu_vm *vm) 271 { 272 struct amdgpu_mes_process *process; 273 int r; 274 275 /* allocate the mes process buffer */ 276 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL); 277 if (!process) { 278 DRM_ERROR("no more memory to create mes process\n"); 279 return -ENOMEM; 280 } 281 282 process->doorbell_bitmap = 283 kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, 284 BITS_PER_BYTE), GFP_KERNEL); 285 if (!process->doorbell_bitmap) { 286 DRM_ERROR("failed to allocate doorbell bitmap\n"); 287 kfree(process); 288 return -ENOMEM; 289 } 290 291 /* allocate the process context bo and map it */ 292 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE, 293 AMDGPU_GEM_DOMAIN_GTT, 294 &process->proc_ctx_bo, 295 &process->proc_ctx_gpu_addr, 296 &process->proc_ctx_cpu_ptr); 297 if (r) { 298 DRM_ERROR("failed to allocate process context bo\n"); 299 goto clean_up_memory; 300 } 301 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 302 303 /* 304 * Avoid taking any other locks under MES lock to avoid circular 305 * lock dependencies. 306 */ 307 amdgpu_mes_lock(&adev->mes); 308 309 /* add the mes process to idr list */ 310 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1, 311 GFP_KERNEL); 312 if (r < 0) { 313 DRM_ERROR("failed to lock pasid=%d\n", pasid); 314 goto clean_up_ctx; 315 } 316 317 /* allocate the starting doorbell index of the process */ 318 r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index); 319 if (r < 0) { 320 DRM_ERROR("failed to allocate doorbell for process\n"); 321 goto clean_up_pasid; 322 } 323 324 DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index); 325 326 INIT_LIST_HEAD(&process->gang_list); 327 process->vm = vm; 328 process->pasid = pasid; 329 process->process_quantum = adev->mes.default_process_quantum; 330 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo); 331 332 amdgpu_mes_unlock(&adev->mes); 333 return 0; 334 335 clean_up_pasid: 336 idr_remove(&adev->mes.pasid_idr, pasid); 337 amdgpu_mes_unlock(&adev->mes); 338 clean_up_ctx: 339 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 340 &process->proc_ctx_gpu_addr, 341 &process->proc_ctx_cpu_ptr); 342 clean_up_memory: 343 kfree(process->doorbell_bitmap); 344 kfree(process); 345 return r; 346 } 347 348 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid) 349 { 350 struct amdgpu_mes_process *process; 351 struct amdgpu_mes_gang *gang, *tmp1; 352 struct amdgpu_mes_queue *queue, *tmp2; 353 struct mes_remove_queue_input queue_input; 354 unsigned long flags; 355 int r; 356 357 /* 358 * Avoid taking any other locks under MES lock to avoid circular 359 * lock dependencies. 360 */ 361 amdgpu_mes_lock(&adev->mes); 362 363 process = idr_find(&adev->mes.pasid_idr, pasid); 364 if (!process) { 365 DRM_WARN("pasid %d doesn't exist\n", pasid); 366 amdgpu_mes_unlock(&adev->mes); 367 return; 368 } 369 370 /* Remove all queues from hardware */ 371 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 372 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 373 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 374 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 375 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 376 377 queue_input.doorbell_offset = queue->doorbell_off; 378 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 379 380 r = adev->mes.funcs->remove_hw_queue(&adev->mes, 381 &queue_input); 382 if (r) 383 DRM_WARN("failed to remove hardware queue\n"); 384 } 385 386 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 387 } 388 389 amdgpu_mes_free_process_doorbells(adev, process->doorbell_index); 390 idr_remove(&adev->mes.pasid_idr, pasid); 391 amdgpu_mes_unlock(&adev->mes); 392 393 /* free all memory allocated by the process */ 394 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) { 395 /* free all queues in the gang */ 396 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) { 397 amdgpu_mes_queue_free_mqd(queue); 398 list_del(&queue->list); 399 kfree(queue); 400 } 401 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 402 &gang->gang_ctx_gpu_addr, 403 &gang->gang_ctx_cpu_ptr); 404 list_del(&gang->list); 405 kfree(gang); 406 407 } 408 amdgpu_bo_free_kernel(&process->proc_ctx_bo, 409 &process->proc_ctx_gpu_addr, 410 &process->proc_ctx_cpu_ptr); 411 kfree(process->doorbell_bitmap); 412 kfree(process); 413 } 414 415 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid, 416 struct amdgpu_mes_gang_properties *gprops, 417 int *gang_id) 418 { 419 struct amdgpu_mes_process *process; 420 struct amdgpu_mes_gang *gang; 421 int r; 422 423 /* allocate the mes gang buffer */ 424 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL); 425 if (!gang) { 426 return -ENOMEM; 427 } 428 429 /* allocate the gang context bo and map it to cpu space */ 430 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE, 431 AMDGPU_GEM_DOMAIN_GTT, 432 &gang->gang_ctx_bo, 433 &gang->gang_ctx_gpu_addr, 434 &gang->gang_ctx_cpu_ptr); 435 if (r) { 436 DRM_ERROR("failed to allocate process context bo\n"); 437 goto clean_up_mem; 438 } 439 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE); 440 441 /* 442 * Avoid taking any other locks under MES lock to avoid circular 443 * lock dependencies. 444 */ 445 amdgpu_mes_lock(&adev->mes); 446 447 process = idr_find(&adev->mes.pasid_idr, pasid); 448 if (!process) { 449 DRM_ERROR("pasid %d doesn't exist\n", pasid); 450 r = -EINVAL; 451 goto clean_up_ctx; 452 } 453 454 /* add the mes gang to idr list */ 455 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0, 456 GFP_KERNEL); 457 if (r < 0) { 458 DRM_ERROR("failed to allocate idr for gang\n"); 459 goto clean_up_ctx; 460 } 461 462 gang->gang_id = r; 463 *gang_id = r; 464 465 INIT_LIST_HEAD(&gang->queue_list); 466 gang->process = process; 467 gang->priority = gprops->priority; 468 gang->gang_quantum = gprops->gang_quantum ? 469 gprops->gang_quantum : adev->mes.default_gang_quantum; 470 gang->global_priority_level = gprops->global_priority_level; 471 gang->inprocess_gang_priority = gprops->inprocess_gang_priority; 472 list_add_tail(&gang->list, &process->gang_list); 473 474 amdgpu_mes_unlock(&adev->mes); 475 return 0; 476 477 clean_up_ctx: 478 amdgpu_mes_unlock(&adev->mes); 479 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 480 &gang->gang_ctx_gpu_addr, 481 &gang->gang_ctx_cpu_ptr); 482 clean_up_mem: 483 kfree(gang); 484 return r; 485 } 486 487 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id) 488 { 489 struct amdgpu_mes_gang *gang; 490 491 /* 492 * Avoid taking any other locks under MES lock to avoid circular 493 * lock dependencies. 494 */ 495 amdgpu_mes_lock(&adev->mes); 496 497 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 498 if (!gang) { 499 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 500 amdgpu_mes_unlock(&adev->mes); 501 return -EINVAL; 502 } 503 504 if (!list_empty(&gang->queue_list)) { 505 DRM_ERROR("queue list is not empty\n"); 506 amdgpu_mes_unlock(&adev->mes); 507 return -EBUSY; 508 } 509 510 idr_remove(&adev->mes.gang_id_idr, gang->gang_id); 511 list_del(&gang->list); 512 amdgpu_mes_unlock(&adev->mes); 513 514 amdgpu_bo_free_kernel(&gang->gang_ctx_bo, 515 &gang->gang_ctx_gpu_addr, 516 &gang->gang_ctx_cpu_ptr); 517 518 kfree(gang); 519 520 return 0; 521 } 522 523 int amdgpu_mes_suspend(struct amdgpu_device *adev) 524 { 525 struct idr *idp; 526 struct amdgpu_mes_process *process; 527 struct amdgpu_mes_gang *gang; 528 struct mes_suspend_gang_input input; 529 int r, pasid; 530 531 /* 532 * Avoid taking any other locks under MES lock to avoid circular 533 * lock dependencies. 534 */ 535 amdgpu_mes_lock(&adev->mes); 536 537 idp = &adev->mes.pasid_idr; 538 539 idr_for_each_entry(idp, process, pasid) { 540 list_for_each_entry(gang, &process->gang_list, list) { 541 r = adev->mes.funcs->suspend_gang(&adev->mes, &input); 542 if (r) 543 DRM_ERROR("failed to suspend pasid %d gangid %d", 544 pasid, gang->gang_id); 545 } 546 } 547 548 amdgpu_mes_unlock(&adev->mes); 549 return 0; 550 } 551 552 int amdgpu_mes_resume(struct amdgpu_device *adev) 553 { 554 struct idr *idp; 555 struct amdgpu_mes_process *process; 556 struct amdgpu_mes_gang *gang; 557 struct mes_resume_gang_input input; 558 int r, pasid; 559 560 /* 561 * Avoid taking any other locks under MES lock to avoid circular 562 * lock dependencies. 563 */ 564 amdgpu_mes_lock(&adev->mes); 565 566 idp = &adev->mes.pasid_idr; 567 568 idr_for_each_entry(idp, process, pasid) { 569 list_for_each_entry(gang, &process->gang_list, list) { 570 r = adev->mes.funcs->resume_gang(&adev->mes, &input); 571 if (r) 572 DRM_ERROR("failed to resume pasid %d gangid %d", 573 pasid, gang->gang_id); 574 } 575 } 576 577 amdgpu_mes_unlock(&adev->mes); 578 return 0; 579 } 580 581 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev, 582 struct amdgpu_mes_queue *q, 583 struct amdgpu_mes_queue_properties *p) 584 { 585 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 586 u32 mqd_size = mqd_mgr->mqd_size; 587 int r; 588 589 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 590 AMDGPU_GEM_DOMAIN_GTT, 591 &q->mqd_obj, 592 &q->mqd_gpu_addr, &q->mqd_cpu_ptr); 593 if (r) { 594 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r); 595 return r; 596 } 597 memset(q->mqd_cpu_ptr, 0, mqd_size); 598 599 r = amdgpu_bo_reserve(q->mqd_obj, false); 600 if (unlikely(r != 0)) 601 goto clean_up; 602 603 return 0; 604 605 clean_up: 606 amdgpu_bo_free_kernel(&q->mqd_obj, 607 &q->mqd_gpu_addr, 608 &q->mqd_cpu_ptr); 609 return r; 610 } 611 612 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev, 613 struct amdgpu_mes_queue *q, 614 struct amdgpu_mes_queue_properties *p) 615 { 616 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type]; 617 struct amdgpu_mqd_prop mqd_prop = {0}; 618 619 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr; 620 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr; 621 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr; 622 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr; 623 mqd_prop.queue_size = p->queue_size; 624 mqd_prop.use_doorbell = true; 625 mqd_prop.doorbell_index = p->doorbell_off; 626 mqd_prop.eop_gpu_addr = p->eop_gpu_addr; 627 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority; 628 mqd_prop.hqd_queue_priority = p->hqd_queue_priority; 629 mqd_prop.hqd_active = false; 630 631 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop); 632 633 amdgpu_bo_unreserve(q->mqd_obj); 634 } 635 636 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, 637 struct amdgpu_mes_queue_properties *qprops, 638 int *queue_id) 639 { 640 struct amdgpu_mes_queue *queue; 641 struct amdgpu_mes_gang *gang; 642 struct mes_add_queue_input queue_input; 643 unsigned long flags; 644 int r; 645 646 memset(&queue_input, 0, sizeof(struct mes_add_queue_input)); 647 648 /* allocate the mes queue buffer */ 649 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL); 650 if (!queue) { 651 DRM_ERROR("Failed to allocate memory for queue\n"); 652 return -ENOMEM; 653 } 654 655 /* Allocate the queue mqd */ 656 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops); 657 if (r) 658 goto clean_up_memory; 659 660 /* 661 * Avoid taking any other locks under MES lock to avoid circular 662 * lock dependencies. 663 */ 664 amdgpu_mes_lock(&adev->mes); 665 666 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 667 if (!gang) { 668 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 669 r = -EINVAL; 670 goto clean_up_mqd; 671 } 672 673 /* add the mes gang to idr list */ 674 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 675 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0, 676 GFP_ATOMIC); 677 if (r < 0) { 678 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 679 goto clean_up_mqd; 680 } 681 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 682 *queue_id = queue->queue_id = r; 683 684 /* allocate a doorbell index for the queue */ 685 r = amdgpu_mes_queue_doorbell_get(adev, gang->process, 686 qprops->queue_type, 687 &qprops->doorbell_off); 688 if (r) 689 goto clean_up_queue_id; 690 691 /* initialize the queue mqd */ 692 amdgpu_mes_queue_init_mqd(adev, queue, qprops); 693 694 /* add hw queue to mes */ 695 queue_input.process_id = gang->process->pasid; 696 697 queue_input.page_table_base_addr = 698 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr - 699 adev->gmc.vram_start; 700 701 queue_input.process_va_start = 0; 702 queue_input.process_va_end = 703 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT; 704 queue_input.process_quantum = gang->process->process_quantum; 705 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr; 706 queue_input.gang_quantum = gang->gang_quantum; 707 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 708 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority; 709 queue_input.gang_global_priority_level = gang->global_priority_level; 710 queue_input.doorbell_offset = qprops->doorbell_off; 711 queue_input.mqd_addr = queue->mqd_gpu_addr; 712 queue_input.wptr_addr = qprops->wptr_gpu_addr; 713 queue_input.wptr_mc_addr = qprops->wptr_mc_addr; 714 queue_input.queue_type = qprops->queue_type; 715 queue_input.paging = qprops->paging; 716 queue_input.is_kfd_process = 0; 717 718 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input); 719 if (r) { 720 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n", 721 qprops->doorbell_off); 722 goto clean_up_doorbell; 723 } 724 725 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, " 726 "queue type=%d, doorbell=0x%llx\n", 727 gang->process->pasid, gang_id, qprops->queue_type, 728 qprops->doorbell_off); 729 730 queue->ring = qprops->ring; 731 queue->doorbell_off = qprops->doorbell_off; 732 queue->wptr_gpu_addr = qprops->wptr_gpu_addr; 733 queue->queue_type = qprops->queue_type; 734 queue->paging = qprops->paging; 735 queue->gang = gang; 736 queue->ring->mqd_ptr = queue->mqd_cpu_ptr; 737 list_add_tail(&queue->list, &gang->queue_list); 738 739 amdgpu_mes_unlock(&adev->mes); 740 return 0; 741 742 clean_up_doorbell: 743 amdgpu_mes_queue_doorbell_free(adev, gang->process, 744 qprops->doorbell_off); 745 clean_up_queue_id: 746 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 747 idr_remove(&adev->mes.queue_id_idr, queue->queue_id); 748 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 749 clean_up_mqd: 750 amdgpu_mes_unlock(&adev->mes); 751 amdgpu_mes_queue_free_mqd(queue); 752 clean_up_memory: 753 kfree(queue); 754 return r; 755 } 756 757 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) 758 { 759 unsigned long flags; 760 struct amdgpu_mes_queue *queue; 761 struct amdgpu_mes_gang *gang; 762 struct mes_remove_queue_input queue_input; 763 int r; 764 765 /* 766 * Avoid taking any other locks under MES lock to avoid circular 767 * lock dependencies. 768 */ 769 amdgpu_mes_lock(&adev->mes); 770 771 /* remove the mes gang from idr list */ 772 spin_lock_irqsave(&adev->mes.queue_id_lock, flags); 773 774 queue = idr_find(&adev->mes.queue_id_idr, queue_id); 775 if (!queue) { 776 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 777 amdgpu_mes_unlock(&adev->mes); 778 DRM_ERROR("queue id %d doesn't exist\n", queue_id); 779 return -EINVAL; 780 } 781 782 idr_remove(&adev->mes.queue_id_idr, queue_id); 783 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); 784 785 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n", 786 queue->doorbell_off); 787 788 gang = queue->gang; 789 queue_input.doorbell_offset = queue->doorbell_off; 790 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; 791 792 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input); 793 if (r) 794 DRM_ERROR("failed to remove hardware queue, queue id = %d\n", 795 queue_id); 796 797 list_del(&queue->list); 798 amdgpu_mes_queue_doorbell_free(adev, gang->process, 799 queue->doorbell_off); 800 amdgpu_mes_unlock(&adev->mes); 801 802 amdgpu_mes_queue_free_mqd(queue); 803 kfree(queue); 804 return 0; 805 } 806 807 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, 808 struct amdgpu_ring *ring, 809 enum amdgpu_unmap_queues_action action, 810 u64 gpu_addr, u64 seq) 811 { 812 struct mes_unmap_legacy_queue_input queue_input; 813 int r; 814 815 queue_input.action = action; 816 queue_input.queue_type = ring->funcs->type; 817 queue_input.doorbell_offset = ring->doorbell_index; 818 queue_input.pipe_id = ring->pipe; 819 queue_input.queue_id = ring->queue; 820 queue_input.trail_fence_addr = gpu_addr; 821 queue_input.trail_fence_data = seq; 822 823 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); 824 if (r) 825 DRM_ERROR("failed to unmap legacy queue\n"); 826 827 return r; 828 } 829 830 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) 831 { 832 struct mes_misc_op_input op_input; 833 int r, val = 0; 834 835 op_input.op = MES_MISC_OP_READ_REG; 836 op_input.read_reg.reg_offset = reg; 837 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr; 838 839 if (!adev->mes.funcs->misc_op) { 840 DRM_ERROR("mes rreg is not supported!\n"); 841 goto error; 842 } 843 844 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 845 if (r) 846 DRM_ERROR("failed to read reg (0x%x)\n", reg); 847 else 848 val = *(adev->mes.read_val_ptr); 849 850 error: 851 return val; 852 } 853 854 int amdgpu_mes_wreg(struct amdgpu_device *adev, 855 uint32_t reg, uint32_t val) 856 { 857 struct mes_misc_op_input op_input; 858 int r; 859 860 op_input.op = MES_MISC_OP_WRITE_REG; 861 op_input.write_reg.reg_offset = reg; 862 op_input.write_reg.reg_value = val; 863 864 if (!adev->mes.funcs->misc_op) { 865 DRM_ERROR("mes wreg is not supported!\n"); 866 r = -EINVAL; 867 goto error; 868 } 869 870 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 871 if (r) 872 DRM_ERROR("failed to write reg (0x%x)\n", reg); 873 874 error: 875 return r; 876 } 877 878 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, 879 uint32_t reg0, uint32_t reg1, 880 uint32_t ref, uint32_t mask) 881 { 882 struct mes_misc_op_input op_input; 883 int r; 884 885 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT; 886 op_input.wrm_reg.reg0 = reg0; 887 op_input.wrm_reg.reg1 = reg1; 888 op_input.wrm_reg.ref = ref; 889 op_input.wrm_reg.mask = mask; 890 891 if (!adev->mes.funcs->misc_op) { 892 DRM_ERROR("mes reg_write_reg_wait is not supported!\n"); 893 r = -EINVAL; 894 goto error; 895 } 896 897 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 898 if (r) 899 DRM_ERROR("failed to reg_write_reg_wait\n"); 900 901 error: 902 return r; 903 } 904 905 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, 906 uint32_t val, uint32_t mask) 907 { 908 struct mes_misc_op_input op_input; 909 int r; 910 911 op_input.op = MES_MISC_OP_WRM_REG_WAIT; 912 op_input.wrm_reg.reg0 = reg; 913 op_input.wrm_reg.ref = val; 914 op_input.wrm_reg.mask = mask; 915 916 if (!adev->mes.funcs->misc_op) { 917 DRM_ERROR("mes reg wait is not supported!\n"); 918 r = -EINVAL; 919 goto error; 920 } 921 922 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 923 if (r) 924 DRM_ERROR("failed to reg_write_reg_wait\n"); 925 926 error: 927 return r; 928 } 929 930 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, 931 uint64_t process_context_addr, 932 uint32_t spi_gdbg_per_vmid_cntl, 933 const uint32_t *tcp_watch_cntl, 934 uint32_t flags, 935 bool trap_en) 936 { 937 struct mes_misc_op_input op_input = {0}; 938 int r; 939 940 if (!adev->mes.funcs->misc_op) { 941 DRM_ERROR("mes set shader debugger is not supported!\n"); 942 return -EINVAL; 943 } 944 945 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; 946 op_input.set_shader_debugger.process_context_addr = process_context_addr; 947 op_input.set_shader_debugger.flags.u32all = flags; 948 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl; 949 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl, 950 sizeof(op_input.set_shader_debugger.tcp_watch_cntl)); 951 952 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> 953 AMDGPU_MES_API_VERSION_SHIFT) >= 14) 954 op_input.set_shader_debugger.trap_en = trap_en; 955 956 amdgpu_mes_lock(&adev->mes); 957 958 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 959 if (r) 960 DRM_ERROR("failed to set_shader_debugger\n"); 961 962 amdgpu_mes_unlock(&adev->mes); 963 964 return r; 965 } 966 967 static void 968 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev, 969 struct amdgpu_ring *ring, 970 struct amdgpu_mes_queue_properties *props) 971 { 972 props->queue_type = ring->funcs->type; 973 props->hqd_base_gpu_addr = ring->gpu_addr; 974 props->rptr_gpu_addr = ring->rptr_gpu_addr; 975 props->wptr_gpu_addr = ring->wptr_gpu_addr; 976 props->wptr_mc_addr = 977 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs; 978 props->queue_size = ring->ring_size; 979 props->eop_gpu_addr = ring->eop_gpu_addr; 980 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; 981 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM; 982 props->paging = false; 983 props->ring = ring; 984 } 985 986 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \ 987 do { \ 988 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \ 989 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 990 _eng[ring->idx].slots[id_offs]); \ 991 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \ 992 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 993 _eng[ring->idx].ring); \ 994 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \ 995 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 996 _eng[ring->idx].ib); \ 997 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \ 998 return offsetof(struct amdgpu_mes_ctx_meta_data, \ 999 _eng[ring->idx].padding); \ 1000 } while(0) 1001 1002 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs) 1003 { 1004 switch (ring->funcs->type) { 1005 case AMDGPU_RING_TYPE_GFX: 1006 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx); 1007 break; 1008 case AMDGPU_RING_TYPE_COMPUTE: 1009 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute); 1010 break; 1011 case AMDGPU_RING_TYPE_SDMA: 1012 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma); 1013 break; 1014 default: 1015 break; 1016 } 1017 1018 WARN_ON(1); 1019 return -EINVAL; 1020 } 1021 1022 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, 1023 int queue_type, int idx, 1024 struct amdgpu_mes_ctx_data *ctx_data, 1025 struct amdgpu_ring **out) 1026 { 1027 struct amdgpu_ring *ring; 1028 struct amdgpu_mes_gang *gang; 1029 struct amdgpu_mes_queue_properties qprops = {0}; 1030 int r, queue_id, pasid; 1031 1032 /* 1033 * Avoid taking any other locks under MES lock to avoid circular 1034 * lock dependencies. 1035 */ 1036 amdgpu_mes_lock(&adev->mes); 1037 gang = idr_find(&adev->mes.gang_id_idr, gang_id); 1038 if (!gang) { 1039 DRM_ERROR("gang id %d doesn't exist\n", gang_id); 1040 amdgpu_mes_unlock(&adev->mes); 1041 return -EINVAL; 1042 } 1043 pasid = gang->process->pasid; 1044 1045 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL); 1046 if (!ring) { 1047 amdgpu_mes_unlock(&adev->mes); 1048 return -ENOMEM; 1049 } 1050 1051 ring->ring_obj = NULL; 1052 ring->use_doorbell = true; 1053 ring->is_mes_queue = true; 1054 ring->mes_ctx = ctx_data; 1055 ring->idx = idx; 1056 ring->no_scheduler = true; 1057 1058 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 1059 int offset = offsetof(struct amdgpu_mes_ctx_meta_data, 1060 compute[ring->idx].mec_hpd); 1061 ring->eop_gpu_addr = 1062 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 1063 } 1064 1065 switch (queue_type) { 1066 case AMDGPU_RING_TYPE_GFX: 1067 ring->funcs = adev->gfx.gfx_ring[0].funcs; 1068 break; 1069 case AMDGPU_RING_TYPE_COMPUTE: 1070 ring->funcs = adev->gfx.compute_ring[0].funcs; 1071 break; 1072 case AMDGPU_RING_TYPE_SDMA: 1073 ring->funcs = adev->sdma.instance[0].ring.funcs; 1074 break; 1075 default: 1076 BUG(); 1077 } 1078 1079 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, 1080 AMDGPU_RING_PRIO_DEFAULT, NULL); 1081 if (r) 1082 goto clean_up_memory; 1083 1084 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); 1085 1086 dma_fence_wait(gang->process->vm->last_update, false); 1087 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false); 1088 amdgpu_mes_unlock(&adev->mes); 1089 1090 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id); 1091 if (r) 1092 goto clean_up_ring; 1093 1094 ring->hw_queue_id = queue_id; 1095 ring->doorbell_index = qprops.doorbell_off; 1096 1097 if (queue_type == AMDGPU_RING_TYPE_GFX) 1098 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id); 1099 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) 1100 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id, 1101 queue_id); 1102 else if (queue_type == AMDGPU_RING_TYPE_SDMA) 1103 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id, 1104 queue_id); 1105 else 1106 BUG(); 1107 1108 *out = ring; 1109 return 0; 1110 1111 clean_up_ring: 1112 amdgpu_ring_fini(ring); 1113 clean_up_memory: 1114 kfree(ring); 1115 amdgpu_mes_unlock(&adev->mes); 1116 return r; 1117 } 1118 1119 void amdgpu_mes_remove_ring(struct amdgpu_device *adev, 1120 struct amdgpu_ring *ring) 1121 { 1122 if (!ring) 1123 return; 1124 1125 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id); 1126 amdgpu_ring_fini(ring); 1127 kfree(ring); 1128 } 1129 1130 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev, 1131 enum amdgpu_mes_priority_level prio) 1132 { 1133 return adev->mes.aggregated_doorbells[prio]; 1134 } 1135 1136 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev, 1137 struct amdgpu_mes_ctx_data *ctx_data) 1138 { 1139 int r; 1140 1141 r = amdgpu_bo_create_kernel(adev, 1142 sizeof(struct amdgpu_mes_ctx_meta_data), 1143 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1144 &ctx_data->meta_data_obj, 1145 &ctx_data->meta_data_mc_addr, 1146 &ctx_data->meta_data_ptr); 1147 if (r) { 1148 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r); 1149 return r; 1150 } 1151 1152 if (!ctx_data->meta_data_obj) 1153 return -ENOMEM; 1154 1155 memset(ctx_data->meta_data_ptr, 0, 1156 sizeof(struct amdgpu_mes_ctx_meta_data)); 1157 1158 return 0; 1159 } 1160 1161 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data) 1162 { 1163 if (ctx_data->meta_data_obj) 1164 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj, 1165 &ctx_data->meta_data_mc_addr, 1166 &ctx_data->meta_data_ptr); 1167 } 1168 1169 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, 1170 struct amdgpu_vm *vm, 1171 struct amdgpu_mes_ctx_data *ctx_data) 1172 { 1173 struct amdgpu_bo_va *bo_va; 1174 struct amdgpu_sync sync; 1175 struct drm_exec exec; 1176 int r; 1177 1178 amdgpu_sync_create(&sync); 1179 1180 drm_exec_init(&exec, 0); 1181 drm_exec_until_all_locked(&exec) { 1182 r = drm_exec_lock_obj(&exec, 1183 &ctx_data->meta_data_obj->tbo.base); 1184 drm_exec_retry_on_contention(&exec); 1185 if (unlikely(r)) 1186 goto error_fini_exec; 1187 1188 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1189 drm_exec_retry_on_contention(&exec); 1190 if (unlikely(r)) 1191 goto error_fini_exec; 1192 } 1193 1194 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj); 1195 if (!bo_va) { 1196 DRM_ERROR("failed to create bo_va for meta data BO\n"); 1197 r = -ENOMEM; 1198 goto error_fini_exec; 1199 } 1200 1201 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0, 1202 sizeof(struct amdgpu_mes_ctx_meta_data), 1203 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 1204 AMDGPU_PTE_EXECUTABLE); 1205 1206 if (r) { 1207 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r); 1208 goto error_del_bo_va; 1209 } 1210 1211 r = amdgpu_vm_bo_update(adev, bo_va, false); 1212 if (r) { 1213 DRM_ERROR("failed to do vm_bo_update on meta data\n"); 1214 goto error_del_bo_va; 1215 } 1216 amdgpu_sync_fence(&sync, bo_va->last_pt_update); 1217 1218 r = amdgpu_vm_update_pdes(adev, vm, false); 1219 if (r) { 1220 DRM_ERROR("failed to update pdes on meta data\n"); 1221 goto error_del_bo_va; 1222 } 1223 amdgpu_sync_fence(&sync, vm->last_update); 1224 1225 amdgpu_sync_wait(&sync, false); 1226 drm_exec_fini(&exec); 1227 1228 amdgpu_sync_free(&sync); 1229 ctx_data->meta_data_va = bo_va; 1230 return 0; 1231 1232 error_del_bo_va: 1233 amdgpu_vm_bo_del(adev, bo_va); 1234 1235 error_fini_exec: 1236 drm_exec_fini(&exec); 1237 amdgpu_sync_free(&sync); 1238 return r; 1239 } 1240 1241 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, 1242 struct amdgpu_mes_ctx_data *ctx_data) 1243 { 1244 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va; 1245 struct amdgpu_bo *bo = ctx_data->meta_data_obj; 1246 struct amdgpu_vm *vm = bo_va->base.vm; 1247 struct dma_fence *fence; 1248 struct drm_exec exec; 1249 long r; 1250 1251 drm_exec_init(&exec, 0); 1252 drm_exec_until_all_locked(&exec) { 1253 r = drm_exec_lock_obj(&exec, 1254 &ctx_data->meta_data_obj->tbo.base); 1255 drm_exec_retry_on_contention(&exec); 1256 if (unlikely(r)) 1257 goto out_unlock; 1258 1259 r = amdgpu_vm_lock_pd(vm, &exec, 0); 1260 drm_exec_retry_on_contention(&exec); 1261 if (unlikely(r)) 1262 goto out_unlock; 1263 } 1264 1265 amdgpu_vm_bo_del(adev, bo_va); 1266 if (!amdgpu_vm_ready(vm)) 1267 goto out_unlock; 1268 1269 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, 1270 &fence); 1271 if (r) 1272 goto out_unlock; 1273 if (fence) { 1274 amdgpu_bo_fence(bo, fence, true); 1275 fence = NULL; 1276 } 1277 1278 r = amdgpu_vm_clear_freed(adev, vm, &fence); 1279 if (r || !fence) 1280 goto out_unlock; 1281 1282 dma_fence_wait(fence, false); 1283 amdgpu_bo_fence(bo, fence, true); 1284 dma_fence_put(fence); 1285 1286 out_unlock: 1287 if (unlikely(r < 0)) 1288 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r); 1289 drm_exec_fini(&exec); 1290 1291 return r; 1292 } 1293 1294 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev, 1295 int pasid, int *gang_id, 1296 int queue_type, int num_queue, 1297 struct amdgpu_ring **added_rings, 1298 struct amdgpu_mes_ctx_data *ctx_data) 1299 { 1300 struct amdgpu_ring *ring; 1301 struct amdgpu_mes_gang_properties gprops = {0}; 1302 int r, j; 1303 1304 /* create a gang for the process */ 1305 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1306 gprops.gang_quantum = adev->mes.default_gang_quantum; 1307 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1308 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1309 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL; 1310 1311 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id); 1312 if (r) { 1313 DRM_ERROR("failed to add gang\n"); 1314 return r; 1315 } 1316 1317 /* create queues for the gang */ 1318 for (j = 0; j < num_queue; j++) { 1319 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j, 1320 ctx_data, &ring); 1321 if (r) { 1322 DRM_ERROR("failed to add ring\n"); 1323 break; 1324 } 1325 1326 DRM_INFO("ring %s was added\n", ring->name); 1327 added_rings[j] = ring; 1328 } 1329 1330 return 0; 1331 } 1332 1333 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings) 1334 { 1335 struct amdgpu_ring *ring; 1336 int i, r; 1337 1338 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) { 1339 ring = added_rings[i]; 1340 if (!ring) 1341 continue; 1342 1343 r = amdgpu_ring_test_helper(ring); 1344 if (r) 1345 return r; 1346 1347 r = amdgpu_ring_test_ib(ring, 1000 * 10); 1348 if (r) { 1349 DRM_DEV_ERROR(ring->adev->dev, 1350 "ring %s ib test failed (%d)\n", 1351 ring->name, r); 1352 return r; 1353 } else 1354 DRM_INFO("ring %s ib test pass\n", ring->name); 1355 } 1356 1357 return 0; 1358 } 1359 1360 int amdgpu_mes_self_test(struct amdgpu_device *adev) 1361 { 1362 struct amdgpu_vm *vm = NULL; 1363 struct amdgpu_mes_ctx_data ctx_data = {0}; 1364 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL }; 1365 int gang_ids[3] = {0}; 1366 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 }, 1367 { AMDGPU_RING_TYPE_COMPUTE, 1 }, 1368 { AMDGPU_RING_TYPE_SDMA, 1} }; 1369 int i, r, pasid, k = 0; 1370 1371 pasid = amdgpu_pasid_alloc(16); 1372 if (pasid < 0) { 1373 dev_warn(adev->dev, "No more PASIDs available!"); 1374 pasid = 0; 1375 } 1376 1377 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 1378 if (!vm) { 1379 r = -ENOMEM; 1380 goto error_pasid; 1381 } 1382 1383 r = amdgpu_vm_init(adev, vm, -1); 1384 if (r) { 1385 DRM_ERROR("failed to initialize vm\n"); 1386 goto error_pasid; 1387 } 1388 1389 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data); 1390 if (r) { 1391 DRM_ERROR("failed to alloc ctx meta data\n"); 1392 goto error_fini; 1393 } 1394 1395 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE; 1396 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data); 1397 if (r) { 1398 DRM_ERROR("failed to map ctx meta data\n"); 1399 goto error_vm; 1400 } 1401 1402 r = amdgpu_mes_create_process(adev, pasid, vm); 1403 if (r) { 1404 DRM_ERROR("failed to create MES process\n"); 1405 goto error_vm; 1406 } 1407 1408 for (i = 0; i < ARRAY_SIZE(queue_types); i++) { 1409 /* On GFX v10.3, fw hasn't supported to map sdma queue. */ 1410 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) && 1411 adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) && 1412 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA) 1413 continue; 1414 1415 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid, 1416 &gang_ids[i], 1417 queue_types[i][0], 1418 queue_types[i][1], 1419 &added_rings[k], 1420 &ctx_data); 1421 if (r) 1422 goto error_queues; 1423 1424 k += queue_types[i][1]; 1425 } 1426 1427 /* start ring test and ib test for MES queues */ 1428 amdgpu_mes_test_queues(added_rings); 1429 1430 error_queues: 1431 /* remove all queues */ 1432 for (i = 0; i < ARRAY_SIZE(added_rings); i++) { 1433 if (!added_rings[i]) 1434 continue; 1435 amdgpu_mes_remove_ring(adev, added_rings[i]); 1436 } 1437 1438 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) { 1439 if (!gang_ids[i]) 1440 continue; 1441 amdgpu_mes_remove_gang(adev, gang_ids[i]); 1442 } 1443 1444 amdgpu_mes_destroy_process(adev, pasid); 1445 1446 error_vm: 1447 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data); 1448 1449 error_fini: 1450 amdgpu_vm_fini(adev, vm); 1451 1452 error_pasid: 1453 if (pasid) 1454 amdgpu_pasid_free(pasid); 1455 1456 amdgpu_mes_ctx_free_meta_data(&ctx_data); 1457 kfree(vm); 1458 return 0; 1459 } 1460 1461 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) 1462 { 1463 const struct mes_firmware_header_v1_0 *mes_hdr; 1464 struct amdgpu_firmware_info *info; 1465 char ucode_prefix[30]; 1466 char fw_name[40]; 1467 bool need_retry = false; 1468 int r; 1469 1470 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, 1471 sizeof(ucode_prefix)); 1472 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) { 1473 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1474 ucode_prefix, 1475 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); 1476 need_retry = true; 1477 } else { 1478 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1479 ucode_prefix, 1480 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); 1481 } 1482 1483 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name); 1484 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) { 1485 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin", 1486 ucode_prefix); 1487 DRM_INFO("try to fall back to %s\n", fw_name); 1488 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], 1489 fw_name); 1490 } 1491 1492 if (r) 1493 goto out; 1494 1495 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1496 adev->mes.fw[pipe]->data; 1497 adev->mes.uc_start_addr[pipe] = 1498 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) | 1499 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32); 1500 adev->mes.data_start_addr[pipe] = 1501 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | 1502 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); 1503 1504 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1505 int ucode, ucode_data; 1506 1507 if (pipe == AMDGPU_MES_SCHED_PIPE) { 1508 ucode = AMDGPU_UCODE_ID_CP_MES; 1509 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA; 1510 } else { 1511 ucode = AMDGPU_UCODE_ID_CP_MES1; 1512 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA; 1513 } 1514 1515 info = &adev->firmware.ucode[ucode]; 1516 info->ucode_id = ucode; 1517 info->fw = adev->mes.fw[pipe]; 1518 adev->firmware.fw_size += 1519 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes), 1520 PAGE_SIZE); 1521 1522 info = &adev->firmware.ucode[ucode_data]; 1523 info->ucode_id = ucode_data; 1524 info->fw = adev->mes.fw[pipe]; 1525 adev->firmware.fw_size += 1526 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes), 1527 PAGE_SIZE); 1528 } 1529 1530 return 0; 1531 out: 1532 amdgpu_ucode_release(&adev->mes.fw[pipe]); 1533 return r; 1534 } 1535