1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include "amdgpu.h" 28 #include "amdgpu_gfx.h" 29 #include "amdgpu_rlc.h" 30 #include "amdgpu_ras.h" 31 #include "amdgpu_xcp.h" 32 33 /* delay 0.1 second to enable gfx off feature */ 34 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) 35 36 #define GFX_OFF_NO_DELAY 0 37 38 /* 39 * GPU GFX IP block helpers function. 40 */ 41 42 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, 43 int pipe, int queue) 44 { 45 int bit = 0; 46 47 bit += mec * adev->gfx.mec.num_pipe_per_mec 48 * adev->gfx.mec.num_queue_per_pipe; 49 bit += pipe * adev->gfx.mec.num_queue_per_pipe; 50 bit += queue; 51 52 return bit; 53 } 54 55 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, 56 int *mec, int *pipe, int *queue) 57 { 58 *queue = bit % adev->gfx.mec.num_queue_per_pipe; 59 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) 60 % adev->gfx.mec.num_pipe_per_mec; 61 *mec = (bit / adev->gfx.mec.num_queue_per_pipe) 62 / adev->gfx.mec.num_pipe_per_mec; 63 64 } 65 66 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, 67 int xcc_id, int mec, int pipe, int queue) 68 { 69 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), 70 adev->gfx.mec_bitmap[xcc_id].queue_bitmap); 71 } 72 73 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, 74 int me, int pipe, int queue) 75 { 76 int bit = 0; 77 78 bit += me * adev->gfx.me.num_pipe_per_me 79 * adev->gfx.me.num_queue_per_pipe; 80 bit += pipe * adev->gfx.me.num_queue_per_pipe; 81 bit += queue; 82 83 return bit; 84 } 85 86 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, 87 int *me, int *pipe, int *queue) 88 { 89 *queue = bit % adev->gfx.me.num_queue_per_pipe; 90 *pipe = (bit / adev->gfx.me.num_queue_per_pipe) 91 % adev->gfx.me.num_pipe_per_me; 92 *me = (bit / adev->gfx.me.num_queue_per_pipe) 93 / adev->gfx.me.num_pipe_per_me; 94 } 95 96 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, 97 int me, int pipe, int queue) 98 { 99 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue), 100 adev->gfx.me.queue_bitmap); 101 } 102 103 /** 104 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter 105 * 106 * @mask: array in which the per-shader array disable masks will be stored 107 * @max_se: number of SEs 108 * @max_sh: number of SHs 109 * 110 * The bitmask of CUs to be disabled in the shader array determined by se and 111 * sh is stored in mask[se * max_sh + sh]. 112 */ 113 void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh) 114 { 115 unsigned int se, sh, cu; 116 const char *p; 117 118 memset(mask, 0, sizeof(*mask) * max_se * max_sh); 119 120 if (!amdgpu_disable_cu || !*amdgpu_disable_cu) 121 return; 122 123 p = amdgpu_disable_cu; 124 for (;;) { 125 char *next; 126 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); 127 128 if (ret < 3) { 129 DRM_ERROR("amdgpu: could not parse disable_cu\n"); 130 return; 131 } 132 133 if (se < max_se && sh < max_sh && cu < 16) { 134 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); 135 mask[se * max_sh + sh] |= 1u << cu; 136 } else { 137 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n", 138 se, sh, cu); 139 } 140 141 next = strchr(p, ','); 142 if (!next) 143 break; 144 p = next + 1; 145 } 146 } 147 148 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev) 149 { 150 return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1; 151 } 152 153 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) 154 { 155 if (amdgpu_compute_multipipe != -1) { 156 DRM_INFO("amdgpu: forcing compute pipe policy %d\n", 157 amdgpu_compute_multipipe); 158 return amdgpu_compute_multipipe == 1; 159 } 160 161 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) 162 return true; 163 164 /* FIXME: spreading the queues across pipes causes perf regressions 165 * on POLARIS11 compute workloads */ 166 if (adev->asic_type == CHIP_POLARIS11) 167 return false; 168 169 return adev->gfx.mec.num_mec > 1; 170 } 171 172 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, 173 struct amdgpu_ring *ring) 174 { 175 int queue = ring->queue; 176 int pipe = ring->pipe; 177 178 /* Policy: use pipe1 queue0 as high priority graphics queue if we 179 * have more than one gfx pipe. 180 */ 181 if (amdgpu_gfx_is_graphics_multipipe_capable(adev) && 182 adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) { 183 int me = ring->me; 184 int bit; 185 186 bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue); 187 if (ring == &adev->gfx.gfx_ring[bit]) 188 return true; 189 } 190 191 return false; 192 } 193 194 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, 195 struct amdgpu_ring *ring) 196 { 197 /* Policy: use 1st queue as high priority compute queue if we 198 * have more than one compute queue. 199 */ 200 if (adev->gfx.num_compute_rings > 1 && 201 ring == &adev->gfx.compute_ring[0]) 202 return true; 203 204 return false; 205 } 206 207 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) 208 { 209 int i, j, queue, pipe; 210 bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev); 211 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * 212 adev->gfx.mec.num_queue_per_pipe, 213 adev->gfx.num_compute_rings); 214 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; 215 216 if (multipipe_policy) { 217 /* policy: make queues evenly cross all pipes on MEC1 only 218 * for multiple xcc, just use the original policy for simplicity */ 219 for (j = 0; j < num_xcc; j++) { 220 for (i = 0; i < max_queues_per_mec; i++) { 221 pipe = i % adev->gfx.mec.num_pipe_per_mec; 222 queue = (i / adev->gfx.mec.num_pipe_per_mec) % 223 adev->gfx.mec.num_queue_per_pipe; 224 225 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, 226 adev->gfx.mec_bitmap[j].queue_bitmap); 227 } 228 } 229 } else { 230 /* policy: amdgpu owns all queues in the given pipe */ 231 for (j = 0; j < num_xcc; j++) { 232 for (i = 0; i < max_queues_per_mec; ++i) 233 set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap); 234 } 235 } 236 237 for (j = 0; j < num_xcc; j++) { 238 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", 239 bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); 240 } 241 } 242 243 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) 244 { 245 int i, queue, pipe; 246 bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev); 247 int max_queues_per_me = adev->gfx.me.num_pipe_per_me * 248 adev->gfx.me.num_queue_per_pipe; 249 250 if (multipipe_policy) { 251 /* policy: amdgpu owns the first queue per pipe at this stage 252 * will extend to mulitple queues per pipe later */ 253 for (i = 0; i < max_queues_per_me; i++) { 254 pipe = i % adev->gfx.me.num_pipe_per_me; 255 queue = (i / adev->gfx.me.num_pipe_per_me) % 256 adev->gfx.me.num_queue_per_pipe; 257 258 set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue, 259 adev->gfx.me.queue_bitmap); 260 } 261 } else { 262 for (i = 0; i < max_queues_per_me; ++i) 263 set_bit(i, adev->gfx.me.queue_bitmap); 264 } 265 266 /* update the number of active graphics rings */ 267 adev->gfx.num_gfx_rings = 268 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 269 } 270 271 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, 272 struct amdgpu_ring *ring, int xcc_id) 273 { 274 int queue_bit; 275 int mec, pipe, queue; 276 277 queue_bit = adev->gfx.mec.num_mec 278 * adev->gfx.mec.num_pipe_per_mec 279 * adev->gfx.mec.num_queue_per_pipe; 280 281 while (--queue_bit >= 0) { 282 if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) 283 continue; 284 285 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); 286 287 /* 288 * 1. Using pipes 2/3 from MEC 2 seems cause problems. 289 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN 290 * only can be issued on queue 0. 291 */ 292 if ((mec == 1 && pipe > 1) || queue != 0) 293 continue; 294 295 ring->me = mec + 1; 296 ring->pipe = pipe; 297 ring->queue = queue; 298 299 return 0; 300 } 301 302 dev_err(adev->dev, "Failed to find a queue for KIQ\n"); 303 return -EINVAL; 304 } 305 306 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, 307 struct amdgpu_ring *ring, 308 struct amdgpu_irq_src *irq, int xcc_id) 309 { 310 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; 311 int r = 0; 312 313 spin_lock_init(&kiq->ring_lock); 314 315 ring->adev = NULL; 316 ring->ring_obj = NULL; 317 ring->use_doorbell = true; 318 ring->xcc_id = xcc_id; 319 ring->vm_hub = AMDGPU_GFXHUB(xcc_id); 320 ring->doorbell_index = 321 (adev->doorbell_index.kiq + 322 xcc_id * adev->doorbell_index.xcc_doorbell_range) 323 << 1; 324 325 r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); 326 if (r) 327 return r; 328 329 ring->eop_gpu_addr = kiq->eop_gpu_addr; 330 ring->no_scheduler = true; 331 sprintf(ring->name, "kiq_%d.%d.%d.%d", xcc_id, ring->me, ring->pipe, ring->queue); 332 r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, 333 AMDGPU_RING_PRIO_DEFAULT, NULL); 334 if (r) 335 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); 336 337 return r; 338 } 339 340 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) 341 { 342 amdgpu_ring_fini(ring); 343 } 344 345 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id) 346 { 347 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; 348 349 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); 350 } 351 352 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, 353 unsigned int hpd_size, int xcc_id) 354 { 355 int r; 356 u32 *hpd; 357 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; 358 359 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, 360 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, 361 &kiq->eop_gpu_addr, (void **)&hpd); 362 if (r) { 363 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); 364 return r; 365 } 366 367 memset(hpd, 0, hpd_size); 368 369 r = amdgpu_bo_reserve(kiq->eop_obj, true); 370 if (unlikely(r != 0)) 371 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); 372 amdgpu_bo_kunmap(kiq->eop_obj); 373 amdgpu_bo_unreserve(kiq->eop_obj); 374 375 return 0; 376 } 377 378 /* create MQD for each compute/gfx queue */ 379 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, 380 unsigned int mqd_size, int xcc_id) 381 { 382 int r, i, j; 383 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; 384 struct amdgpu_ring *ring = &kiq->ring; 385 u32 domain = AMDGPU_GEM_DOMAIN_GTT; 386 387 #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) 388 /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ 389 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0)) 390 domain |= AMDGPU_GEM_DOMAIN_VRAM; 391 #endif 392 393 /* create MQD for KIQ */ 394 if (!adev->enable_mes_kiq && !ring->mqd_obj) { 395 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must 396 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD 397 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for 398 * KIQ MQD no matter SRIOV or Bare-metal 399 */ 400 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 401 AMDGPU_GEM_DOMAIN_VRAM | 402 AMDGPU_GEM_DOMAIN_GTT, 403 &ring->mqd_obj, 404 &ring->mqd_gpu_addr, 405 &ring->mqd_ptr); 406 if (r) { 407 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); 408 return r; 409 } 410 411 /* prepare MQD backup */ 412 kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL); 413 if (!kiq->mqd_backup) { 414 dev_warn(adev->dev, 415 "no memory to create MQD backup for ring %s\n", ring->name); 416 return -ENOMEM; 417 } 418 } 419 420 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { 421 /* create MQD for each KGQ */ 422 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 423 ring = &adev->gfx.gfx_ring[i]; 424 if (!ring->mqd_obj) { 425 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 426 domain, &ring->mqd_obj, 427 &ring->mqd_gpu_addr, &ring->mqd_ptr); 428 if (r) { 429 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); 430 return r; 431 } 432 433 ring->mqd_size = mqd_size; 434 /* prepare MQD backup */ 435 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); 436 if (!adev->gfx.me.mqd_backup[i]) { 437 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); 438 return -ENOMEM; 439 } 440 } 441 } 442 } 443 444 /* create MQD for each KCQ */ 445 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 446 j = i + xcc_id * adev->gfx.num_compute_rings; 447 ring = &adev->gfx.compute_ring[j]; 448 if (!ring->mqd_obj) { 449 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 450 domain, &ring->mqd_obj, 451 &ring->mqd_gpu_addr, &ring->mqd_ptr); 452 if (r) { 453 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); 454 return r; 455 } 456 457 ring->mqd_size = mqd_size; 458 /* prepare MQD backup */ 459 adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL); 460 if (!adev->gfx.mec.mqd_backup[j]) { 461 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); 462 return -ENOMEM; 463 } 464 } 465 } 466 467 return 0; 468 } 469 470 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id) 471 { 472 struct amdgpu_ring *ring = NULL; 473 int i, j; 474 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; 475 476 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { 477 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 478 ring = &adev->gfx.gfx_ring[i]; 479 kfree(adev->gfx.me.mqd_backup[i]); 480 amdgpu_bo_free_kernel(&ring->mqd_obj, 481 &ring->mqd_gpu_addr, 482 &ring->mqd_ptr); 483 } 484 } 485 486 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 487 j = i + xcc_id * adev->gfx.num_compute_rings; 488 ring = &adev->gfx.compute_ring[j]; 489 kfree(adev->gfx.mec.mqd_backup[j]); 490 amdgpu_bo_free_kernel(&ring->mqd_obj, 491 &ring->mqd_gpu_addr, 492 &ring->mqd_ptr); 493 } 494 495 ring = &kiq->ring; 496 kfree(kiq->mqd_backup); 497 amdgpu_bo_free_kernel(&ring->mqd_obj, 498 &ring->mqd_gpu_addr, 499 &ring->mqd_ptr); 500 } 501 502 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) 503 { 504 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; 505 struct amdgpu_ring *kiq_ring = &kiq->ring; 506 int i, r = 0; 507 int j; 508 509 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 510 return -EINVAL; 511 512 spin_lock(&kiq->ring_lock); 513 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * 514 adev->gfx.num_compute_rings)) { 515 spin_unlock(&kiq->ring_lock); 516 return -ENOMEM; 517 } 518 519 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 520 j = i + xcc_id * adev->gfx.num_compute_rings; 521 kiq->pmf->kiq_unmap_queues(kiq_ring, 522 &adev->gfx.compute_ring[j], 523 RESET_QUEUES, 0, 0); 524 } 525 526 if (kiq_ring->sched.ready && !adev->job_hang) 527 r = amdgpu_ring_test_helper(kiq_ring); 528 spin_unlock(&kiq->ring_lock); 529 530 return r; 531 } 532 533 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) 534 { 535 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; 536 struct amdgpu_ring *kiq_ring = &kiq->ring; 537 int i, r = 0; 538 int j; 539 540 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 541 return -EINVAL; 542 543 spin_lock(&kiq->ring_lock); 544 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { 545 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * 546 adev->gfx.num_gfx_rings)) { 547 spin_unlock(&kiq->ring_lock); 548 return -ENOMEM; 549 } 550 551 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 552 j = i + xcc_id * adev->gfx.num_gfx_rings; 553 kiq->pmf->kiq_unmap_queues(kiq_ring, 554 &adev->gfx.gfx_ring[j], 555 PREEMPT_QUEUES, 0, 0); 556 } 557 } 558 559 if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) 560 r = amdgpu_ring_test_helper(kiq_ring); 561 spin_unlock(&kiq->ring_lock); 562 563 return r; 564 } 565 566 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, 567 int queue_bit) 568 { 569 int mec, pipe, queue; 570 int set_resource_bit = 0; 571 572 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); 573 574 set_resource_bit = mec * 4 * 8 + pipe * 8 + queue; 575 576 return set_resource_bit; 577 } 578 579 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) 580 { 581 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; 582 struct amdgpu_ring *kiq_ring = &kiq->ring; 583 uint64_t queue_mask = 0; 584 int r, i, j; 585 586 if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) 587 return -EINVAL; 588 589 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { 590 if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) 591 continue; 592 593 /* This situation may be hit in the future if a new HW 594 * generation exposes more than 64 queues. If so, the 595 * definition of queue_mask needs updating */ 596 if (WARN_ON(i > (sizeof(queue_mask)*8))) { 597 DRM_ERROR("Invalid KCQ enabled: %d\n", i); 598 break; 599 } 600 601 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i)); 602 } 603 604 DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, 605 kiq_ring->queue); 606 amdgpu_device_flush_hdp(adev, NULL); 607 608 spin_lock(&kiq->ring_lock); 609 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * 610 adev->gfx.num_compute_rings + 611 kiq->pmf->set_resources_size); 612 if (r) { 613 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 614 spin_unlock(&kiq->ring_lock); 615 return r; 616 } 617 618 if (adev->enable_mes) 619 queue_mask = ~0ULL; 620 621 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); 622 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 623 j = i + xcc_id * adev->gfx.num_compute_rings; 624 kiq->pmf->kiq_map_queues(kiq_ring, 625 &adev->gfx.compute_ring[j]); 626 } 627 628 r = amdgpu_ring_test_helper(kiq_ring); 629 spin_unlock(&kiq->ring_lock); 630 if (r) 631 DRM_ERROR("KCQ enable failed\n"); 632 633 return r; 634 } 635 636 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) 637 { 638 struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; 639 struct amdgpu_ring *kiq_ring = &kiq->ring; 640 int r, i, j; 641 642 if (!kiq->pmf || !kiq->pmf->kiq_map_queues) 643 return -EINVAL; 644 645 amdgpu_device_flush_hdp(adev, NULL); 646 647 spin_lock(&kiq->ring_lock); 648 /* No need to map kcq on the slave */ 649 if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { 650 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * 651 adev->gfx.num_gfx_rings); 652 if (r) { 653 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 654 spin_unlock(&kiq->ring_lock); 655 return r; 656 } 657 658 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 659 j = i + xcc_id * adev->gfx.num_gfx_rings; 660 kiq->pmf->kiq_map_queues(kiq_ring, 661 &adev->gfx.gfx_ring[j]); 662 } 663 } 664 665 r = amdgpu_ring_test_helper(kiq_ring); 666 spin_unlock(&kiq->ring_lock); 667 if (r) 668 DRM_ERROR("KCQ enable failed\n"); 669 670 return r; 671 } 672 673 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable 674 * 675 * @adev: amdgpu_device pointer 676 * @bool enable true: enable gfx off feature, false: disable gfx off feature 677 * 678 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled. 679 * 2. other client can send request to disable gfx off feature, the request should be honored. 680 * 3. other client can cancel their request of disable gfx off feature 681 * 4. other client should not send request to enable gfx off feature before disable gfx off feature. 682 */ 683 684 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) 685 { 686 unsigned long delay = GFX_OFF_DELAY_ENABLE; 687 688 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 689 return; 690 691 mutex_lock(&adev->gfx.gfx_off_mutex); 692 693 if (enable) { 694 /* If the count is already 0, it means there's an imbalance bug somewhere. 695 * Note that the bug may be in a different caller than the one which triggers the 696 * WARN_ON_ONCE. 697 */ 698 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) 699 goto unlock; 700 701 adev->gfx.gfx_off_req_count--; 702 703 if (adev->gfx.gfx_off_req_count == 0 && 704 !adev->gfx.gfx_off_state) { 705 /* If going to s2idle, no need to wait */ 706 if (adev->in_s0ix) { 707 if (!amdgpu_dpm_set_powergating_by_smu(adev, 708 AMD_IP_BLOCK_TYPE_GFX, true)) 709 adev->gfx.gfx_off_state = true; 710 } else { 711 schedule_delayed_work(&adev->gfx.gfx_off_delay_work, 712 delay); 713 } 714 } 715 } else { 716 if (adev->gfx.gfx_off_req_count == 0) { 717 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); 718 719 if (adev->gfx.gfx_off_state && 720 !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) { 721 adev->gfx.gfx_off_state = false; 722 723 if (adev->gfx.funcs->init_spm_golden) { 724 dev_dbg(adev->dev, 725 "GFXOFF is disabled, re-init SPM golden settings\n"); 726 amdgpu_gfx_init_spm_golden(adev); 727 } 728 } 729 } 730 731 adev->gfx.gfx_off_req_count++; 732 } 733 734 unlock: 735 mutex_unlock(&adev->gfx.gfx_off_mutex); 736 } 737 738 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value) 739 { 740 int r = 0; 741 742 mutex_lock(&adev->gfx.gfx_off_mutex); 743 744 r = amdgpu_dpm_set_residency_gfxoff(adev, value); 745 746 mutex_unlock(&adev->gfx.gfx_off_mutex); 747 748 return r; 749 } 750 751 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value) 752 { 753 int r = 0; 754 755 mutex_lock(&adev->gfx.gfx_off_mutex); 756 757 r = amdgpu_dpm_get_residency_gfxoff(adev, value); 758 759 mutex_unlock(&adev->gfx.gfx_off_mutex); 760 761 return r; 762 } 763 764 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value) 765 { 766 int r = 0; 767 768 mutex_lock(&adev->gfx.gfx_off_mutex); 769 770 r = amdgpu_dpm_get_entrycount_gfxoff(adev, value); 771 772 mutex_unlock(&adev->gfx.gfx_off_mutex); 773 774 return r; 775 } 776 777 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) 778 { 779 780 int r = 0; 781 782 mutex_lock(&adev->gfx.gfx_off_mutex); 783 784 r = amdgpu_dpm_get_status_gfxoff(adev, value); 785 786 mutex_unlock(&adev->gfx.gfx_off_mutex); 787 788 return r; 789 } 790 791 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 792 { 793 int r; 794 795 if (amdgpu_ras_is_supported(adev, ras_block->block)) { 796 if (!amdgpu_persistent_edc_harvesting_supported(adev)) 797 amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); 798 799 r = amdgpu_ras_block_late_init(adev, ras_block); 800 if (r) 801 return r; 802 803 if (adev->gfx.cp_ecc_error_irq.funcs) { 804 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); 805 if (r) 806 goto late_fini; 807 } 808 } else { 809 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); 810 } 811 812 return 0; 813 late_fini: 814 amdgpu_ras_block_late_fini(adev, ras_block); 815 return r; 816 } 817 818 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev) 819 { 820 int err = 0; 821 struct amdgpu_gfx_ras *ras = NULL; 822 823 /* adev->gfx.ras is NULL, which means gfx does not 824 * support ras function, then do nothing here. 825 */ 826 if (!adev->gfx.ras) 827 return 0; 828 829 ras = adev->gfx.ras; 830 831 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); 832 if (err) { 833 dev_err(adev->dev, "Failed to register gfx ras block!\n"); 834 return err; 835 } 836 837 strcpy(ras->ras_block.ras_comm.name, "gfx"); 838 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX; 839 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 840 adev->gfx.ras_if = &ras->ras_block.ras_comm; 841 842 /* If not define special ras_late_init function, use gfx default ras_late_init */ 843 if (!ras->ras_block.ras_late_init) 844 ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; 845 846 /* If not defined special ras_cb function, use default ras_cb */ 847 if (!ras->ras_block.ras_cb) 848 ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb; 849 850 return 0; 851 } 852 853 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, 854 struct amdgpu_iv_entry *entry) 855 { 856 if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler) 857 return adev->gfx.ras->poison_consumption_handler(adev, entry); 858 859 return 0; 860 } 861 862 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, 863 void *err_data, 864 struct amdgpu_iv_entry *entry) 865 { 866 /* TODO ue will trigger an interrupt. 867 * 868 * When “Full RAS” is enabled, the per-IP interrupt sources should 869 * be disabled and the driver should only look for the aggregated 870 * interrupt via sync flood 871 */ 872 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { 873 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 874 if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops && 875 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count) 876 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); 877 amdgpu_ras_reset_gpu(adev); 878 } 879 return AMDGPU_RAS_SUCCESS; 880 } 881 882 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, 883 struct amdgpu_irq_src *source, 884 struct amdgpu_iv_entry *entry) 885 { 886 struct ras_common_if *ras_if = adev->gfx.ras_if; 887 struct ras_dispatch_if ih_data = { 888 .entry = entry, 889 }; 890 891 if (!ras_if) 892 return 0; 893 894 ih_data.head = *ras_if; 895 896 DRM_ERROR("CP ECC ERROR IRQ\n"); 897 amdgpu_ras_interrupt_dispatch(adev, &ih_data); 898 return 0; 899 } 900 901 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, 902 void *ras_error_status, 903 void (*func)(struct amdgpu_device *adev, void *ras_error_status, 904 int xcc_id)) 905 { 906 int i; 907 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; 908 uint32_t xcc_mask = GENMASK(num_xcc - 1, 0); 909 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 910 911 if (err_data) { 912 err_data->ue_count = 0; 913 err_data->ce_count = 0; 914 } 915 916 for_each_inst(i, xcc_mask) 917 func(adev, ras_error_status, i); 918 } 919 920 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 921 { 922 signed long r, cnt = 0; 923 unsigned long flags; 924 uint32_t seq, reg_val_offs = 0, value = 0; 925 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 926 struct amdgpu_ring *ring = &kiq->ring; 927 928 if (amdgpu_device_skip_hw_access(adev)) 929 return 0; 930 931 if (adev->mes.ring.sched.ready) 932 return amdgpu_mes_rreg(adev, reg); 933 934 BUG_ON(!ring->funcs->emit_rreg); 935 936 spin_lock_irqsave(&kiq->ring_lock, flags); 937 if (amdgpu_device_wb_get(adev, ®_val_offs)) { 938 pr_err("critical bug! too many kiq readers\n"); 939 goto failed_unlock; 940 } 941 amdgpu_ring_alloc(ring, 32); 942 amdgpu_ring_emit_rreg(ring, reg, reg_val_offs); 943 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); 944 if (r) 945 goto failed_undo; 946 947 amdgpu_ring_commit(ring); 948 spin_unlock_irqrestore(&kiq->ring_lock, flags); 949 950 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 951 952 /* don't wait anymore for gpu reset case because this way may 953 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 954 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 955 * never return if we keep waiting in virt_kiq_rreg, which cause 956 * gpu_recover() hang there. 957 * 958 * also don't wait anymore for IRQ context 959 * */ 960 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) 961 goto failed_kiq_read; 962 963 might_sleep(); 964 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 965 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 966 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 967 } 968 969 if (cnt > MAX_KIQ_REG_TRY) 970 goto failed_kiq_read; 971 972 mb(); 973 value = adev->wb.wb[reg_val_offs]; 974 amdgpu_device_wb_free(adev, reg_val_offs); 975 return value; 976 977 failed_undo: 978 amdgpu_ring_undo(ring); 979 failed_unlock: 980 spin_unlock_irqrestore(&kiq->ring_lock, flags); 981 failed_kiq_read: 982 if (reg_val_offs) 983 amdgpu_device_wb_free(adev, reg_val_offs); 984 dev_err(adev->dev, "failed to read reg:%x\n", reg); 985 return ~0; 986 } 987 988 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 989 { 990 signed long r, cnt = 0; 991 unsigned long flags; 992 uint32_t seq; 993 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 994 struct amdgpu_ring *ring = &kiq->ring; 995 996 BUG_ON(!ring->funcs->emit_wreg); 997 998 if (amdgpu_device_skip_hw_access(adev)) 999 return; 1000 1001 if (adev->mes.ring.sched.ready) { 1002 amdgpu_mes_wreg(adev, reg, v); 1003 return; 1004 } 1005 1006 spin_lock_irqsave(&kiq->ring_lock, flags); 1007 amdgpu_ring_alloc(ring, 32); 1008 amdgpu_ring_emit_wreg(ring, reg, v); 1009 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); 1010 if (r) 1011 goto failed_undo; 1012 1013 amdgpu_ring_commit(ring); 1014 spin_unlock_irqrestore(&kiq->ring_lock, flags); 1015 1016 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 1017 1018 /* don't wait anymore for gpu reset case because this way may 1019 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 1020 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 1021 * never return if we keep waiting in virt_kiq_rreg, which cause 1022 * gpu_recover() hang there. 1023 * 1024 * also don't wait anymore for IRQ context 1025 * */ 1026 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) 1027 goto failed_kiq_write; 1028 1029 might_sleep(); 1030 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 1031 1032 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 1033 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 1034 } 1035 1036 if (cnt > MAX_KIQ_REG_TRY) 1037 goto failed_kiq_write; 1038 1039 return; 1040 1041 failed_undo: 1042 amdgpu_ring_undo(ring); 1043 spin_unlock_irqrestore(&kiq->ring_lock, flags); 1044 failed_kiq_write: 1045 dev_err(adev->dev, "failed to write reg:%x\n", reg); 1046 } 1047 1048 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) 1049 { 1050 if (amdgpu_num_kcq == -1) { 1051 return 8; 1052 } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) { 1053 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n"); 1054 return 8; 1055 } 1056 return amdgpu_num_kcq; 1057 } 1058 1059 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, 1060 uint32_t ucode_id) 1061 { 1062 const struct gfx_firmware_header_v1_0 *cp_hdr; 1063 const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0; 1064 struct amdgpu_firmware_info *info = NULL; 1065 const struct firmware *ucode_fw; 1066 unsigned int fw_size; 1067 1068 switch (ucode_id) { 1069 case AMDGPU_UCODE_ID_CP_PFP: 1070 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1071 adev->gfx.pfp_fw->data; 1072 adev->gfx.pfp_fw_version = 1073 le32_to_cpu(cp_hdr->header.ucode_version); 1074 adev->gfx.pfp_feature_version = 1075 le32_to_cpu(cp_hdr->ucode_feature_version); 1076 ucode_fw = adev->gfx.pfp_fw; 1077 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1078 break; 1079 case AMDGPU_UCODE_ID_CP_RS64_PFP: 1080 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 1081 adev->gfx.pfp_fw->data; 1082 adev->gfx.pfp_fw_version = 1083 le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 1084 adev->gfx.pfp_feature_version = 1085 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 1086 ucode_fw = adev->gfx.pfp_fw; 1087 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); 1088 break; 1089 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 1090 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 1091 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 1092 adev->gfx.pfp_fw->data; 1093 ucode_fw = adev->gfx.pfp_fw; 1094 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); 1095 break; 1096 case AMDGPU_UCODE_ID_CP_ME: 1097 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1098 adev->gfx.me_fw->data; 1099 adev->gfx.me_fw_version = 1100 le32_to_cpu(cp_hdr->header.ucode_version); 1101 adev->gfx.me_feature_version = 1102 le32_to_cpu(cp_hdr->ucode_feature_version); 1103 ucode_fw = adev->gfx.me_fw; 1104 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1105 break; 1106 case AMDGPU_UCODE_ID_CP_RS64_ME: 1107 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 1108 adev->gfx.me_fw->data; 1109 adev->gfx.me_fw_version = 1110 le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 1111 adev->gfx.me_feature_version = 1112 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 1113 ucode_fw = adev->gfx.me_fw; 1114 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); 1115 break; 1116 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 1117 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 1118 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 1119 adev->gfx.me_fw->data; 1120 ucode_fw = adev->gfx.me_fw; 1121 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); 1122 break; 1123 case AMDGPU_UCODE_ID_CP_CE: 1124 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1125 adev->gfx.ce_fw->data; 1126 adev->gfx.ce_fw_version = 1127 le32_to_cpu(cp_hdr->header.ucode_version); 1128 adev->gfx.ce_feature_version = 1129 le32_to_cpu(cp_hdr->ucode_feature_version); 1130 ucode_fw = adev->gfx.ce_fw; 1131 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1132 break; 1133 case AMDGPU_UCODE_ID_CP_MEC1: 1134 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1135 adev->gfx.mec_fw->data; 1136 adev->gfx.mec_fw_version = 1137 le32_to_cpu(cp_hdr->header.ucode_version); 1138 adev->gfx.mec_feature_version = 1139 le32_to_cpu(cp_hdr->ucode_feature_version); 1140 ucode_fw = adev->gfx.mec_fw; 1141 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 1142 le32_to_cpu(cp_hdr->jt_size) * 4; 1143 break; 1144 case AMDGPU_UCODE_ID_CP_MEC1_JT: 1145 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1146 adev->gfx.mec_fw->data; 1147 ucode_fw = adev->gfx.mec_fw; 1148 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4; 1149 break; 1150 case AMDGPU_UCODE_ID_CP_MEC2: 1151 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1152 adev->gfx.mec2_fw->data; 1153 adev->gfx.mec2_fw_version = 1154 le32_to_cpu(cp_hdr->header.ucode_version); 1155 adev->gfx.mec2_feature_version = 1156 le32_to_cpu(cp_hdr->ucode_feature_version); 1157 ucode_fw = adev->gfx.mec2_fw; 1158 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 1159 le32_to_cpu(cp_hdr->jt_size) * 4; 1160 break; 1161 case AMDGPU_UCODE_ID_CP_MEC2_JT: 1162 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1163 adev->gfx.mec2_fw->data; 1164 ucode_fw = adev->gfx.mec2_fw; 1165 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4; 1166 break; 1167 case AMDGPU_UCODE_ID_CP_RS64_MEC: 1168 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 1169 adev->gfx.mec_fw->data; 1170 adev->gfx.mec_fw_version = 1171 le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 1172 adev->gfx.mec_feature_version = 1173 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 1174 ucode_fw = adev->gfx.mec_fw; 1175 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); 1176 break; 1177 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 1178 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 1179 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 1180 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 1181 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 1182 adev->gfx.mec_fw->data; 1183 ucode_fw = adev->gfx.mec_fw; 1184 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); 1185 break; 1186 default: 1187 break; 1188 } 1189 1190 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1191 info = &adev->firmware.ucode[ucode_id]; 1192 info->ucode_id = ucode_id; 1193 info->fw = ucode_fw; 1194 adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE); 1195 } 1196 } 1197 1198 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id) 1199 { 1200 return !(xcc_id % (adev->gfx.num_xcc_per_xcp ? 1201 adev->gfx.num_xcc_per_xcp : 1)); 1202 } 1203 1204 static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, 1205 struct device_attribute *addr, 1206 char *buf) 1207 { 1208 struct drm_device *ddev = dev_get_drvdata(dev); 1209 struct amdgpu_device *adev = drm_to_adev(ddev); 1210 int mode; 1211 1212 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, 1213 AMDGPU_XCP_FL_NONE); 1214 1215 return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode)); 1216 } 1217 1218 static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, 1219 struct device_attribute *addr, 1220 const char *buf, size_t count) 1221 { 1222 struct drm_device *ddev = dev_get_drvdata(dev); 1223 struct amdgpu_device *adev = drm_to_adev(ddev); 1224 enum amdgpu_gfx_partition mode; 1225 int ret = 0, num_xcc; 1226 1227 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1228 if (num_xcc % 2 != 0) 1229 return -EINVAL; 1230 1231 if (!strncasecmp("SPX", buf, strlen("SPX"))) { 1232 mode = AMDGPU_SPX_PARTITION_MODE; 1233 } else if (!strncasecmp("DPX", buf, strlen("DPX"))) { 1234 /* 1235 * DPX mode needs AIDs to be in multiple of 2. 1236 * Each AID connects 2 XCCs. 1237 */ 1238 if (num_xcc%4) 1239 return -EINVAL; 1240 mode = AMDGPU_DPX_PARTITION_MODE; 1241 } else if (!strncasecmp("TPX", buf, strlen("TPX"))) { 1242 if (num_xcc != 6) 1243 return -EINVAL; 1244 mode = AMDGPU_TPX_PARTITION_MODE; 1245 } else if (!strncasecmp("QPX", buf, strlen("QPX"))) { 1246 if (num_xcc != 8) 1247 return -EINVAL; 1248 mode = AMDGPU_QPX_PARTITION_MODE; 1249 } else if (!strncasecmp("CPX", buf, strlen("CPX"))) { 1250 mode = AMDGPU_CPX_PARTITION_MODE; 1251 } else { 1252 return -EINVAL; 1253 } 1254 1255 ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode); 1256 1257 if (ret) 1258 return ret; 1259 1260 return count; 1261 } 1262 1263 static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, 1264 struct device_attribute *addr, 1265 char *buf) 1266 { 1267 struct drm_device *ddev = dev_get_drvdata(dev); 1268 struct amdgpu_device *adev = drm_to_adev(ddev); 1269 char *supported_partition; 1270 1271 /* TBD */ 1272 switch (NUM_XCC(adev->gfx.xcc_mask)) { 1273 case 8: 1274 supported_partition = "SPX, DPX, QPX, CPX"; 1275 break; 1276 case 6: 1277 supported_partition = "SPX, TPX, CPX"; 1278 break; 1279 case 4: 1280 supported_partition = "SPX, DPX, CPX"; 1281 break; 1282 /* this seems only existing in emulation phase */ 1283 case 2: 1284 supported_partition = "SPX, CPX"; 1285 break; 1286 default: 1287 supported_partition = "Not supported"; 1288 break; 1289 } 1290 1291 return sysfs_emit(buf, "%s\n", supported_partition); 1292 } 1293 1294 static DEVICE_ATTR(current_compute_partition, 0644, 1295 amdgpu_gfx_get_current_compute_partition, 1296 amdgpu_gfx_set_compute_partition); 1297 1298 static DEVICE_ATTR(available_compute_partition, 0444, 1299 amdgpu_gfx_get_available_compute_partition, NULL); 1300 1301 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) 1302 { 1303 int r; 1304 1305 r = device_create_file(adev->dev, &dev_attr_current_compute_partition); 1306 if (r) 1307 return r; 1308 1309 r = device_create_file(adev->dev, &dev_attr_available_compute_partition); 1310 1311 return r; 1312 } 1313 1314 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) 1315 { 1316 device_remove_file(adev->dev, &dev_attr_current_compute_partition); 1317 device_remove_file(adev->dev, &dev_attr_available_compute_partition); 1318 } 1319