1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include "amdgpu.h" 28 #include "amdgpu_gfx.h" 29 #include "amdgpu_rlc.h" 30 #include "amdgpu_ras.h" 31 32 /* delay 0.1 second to enable gfx off feature */ 33 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) 34 35 #define GFX_OFF_NO_DELAY 0 36 37 /* 38 * GPU GFX IP block helpers function. 39 */ 40 41 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, 42 int pipe, int queue) 43 { 44 int bit = 0; 45 46 bit += mec * adev->gfx.mec.num_pipe_per_mec 47 * adev->gfx.mec.num_queue_per_pipe; 48 bit += pipe * adev->gfx.mec.num_queue_per_pipe; 49 bit += queue; 50 51 return bit; 52 } 53 54 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, 55 int *mec, int *pipe, int *queue) 56 { 57 *queue = bit % adev->gfx.mec.num_queue_per_pipe; 58 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) 59 % adev->gfx.mec.num_pipe_per_mec; 60 *mec = (bit / adev->gfx.mec.num_queue_per_pipe) 61 / adev->gfx.mec.num_pipe_per_mec; 62 63 } 64 65 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, 66 int mec, int pipe, int queue) 67 { 68 return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), 69 adev->gfx.mec.queue_bitmap); 70 } 71 72 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, 73 int me, int pipe, int queue) 74 { 75 int bit = 0; 76 77 bit += me * adev->gfx.me.num_pipe_per_me 78 * adev->gfx.me.num_queue_per_pipe; 79 bit += pipe * adev->gfx.me.num_queue_per_pipe; 80 bit += queue; 81 82 return bit; 83 } 84 85 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, 86 int *me, int *pipe, int *queue) 87 { 88 *queue = bit % adev->gfx.me.num_queue_per_pipe; 89 *pipe = (bit / adev->gfx.me.num_queue_per_pipe) 90 % adev->gfx.me.num_pipe_per_me; 91 *me = (bit / adev->gfx.me.num_queue_per_pipe) 92 / adev->gfx.me.num_pipe_per_me; 93 } 94 95 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, 96 int me, int pipe, int queue) 97 { 98 return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue), 99 adev->gfx.me.queue_bitmap); 100 } 101 102 /** 103 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter 104 * 105 * @mask: array in which the per-shader array disable masks will be stored 106 * @max_se: number of SEs 107 * @max_sh: number of SHs 108 * 109 * The bitmask of CUs to be disabled in the shader array determined by se and 110 * sh is stored in mask[se * max_sh + sh]. 111 */ 112 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh) 113 { 114 unsigned se, sh, cu; 115 const char *p; 116 117 memset(mask, 0, sizeof(*mask) * max_se * max_sh); 118 119 if (!amdgpu_disable_cu || !*amdgpu_disable_cu) 120 return; 121 122 p = amdgpu_disable_cu; 123 for (;;) { 124 char *next; 125 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); 126 if (ret < 3) { 127 DRM_ERROR("amdgpu: could not parse disable_cu\n"); 128 return; 129 } 130 131 if (se < max_se && sh < max_sh && cu < 16) { 132 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); 133 mask[se * max_sh + sh] |= 1u << cu; 134 } else { 135 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n", 136 se, sh, cu); 137 } 138 139 next = strchr(p, ','); 140 if (!next) 141 break; 142 p = next + 1; 143 } 144 } 145 146 static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev) 147 { 148 return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1; 149 } 150 151 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) 152 { 153 if (amdgpu_compute_multipipe != -1) { 154 DRM_INFO("amdgpu: forcing compute pipe policy %d\n", 155 amdgpu_compute_multipipe); 156 return amdgpu_compute_multipipe == 1; 157 } 158 159 /* FIXME: spreading the queues across pipes causes perf regressions 160 * on POLARIS11 compute workloads */ 161 if (adev->asic_type == CHIP_POLARIS11) 162 return false; 163 164 return adev->gfx.mec.num_mec > 1; 165 } 166 167 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, 168 struct amdgpu_ring *ring) 169 { 170 int queue = ring->queue; 171 int pipe = ring->pipe; 172 173 /* Policy: use pipe1 queue0 as high priority graphics queue if we 174 * have more than one gfx pipe. 175 */ 176 if (amdgpu_gfx_is_graphics_multipipe_capable(adev) && 177 adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) { 178 int me = ring->me; 179 int bit; 180 181 bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue); 182 if (ring == &adev->gfx.gfx_ring[bit]) 183 return true; 184 } 185 186 return false; 187 } 188 189 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, 190 struct amdgpu_ring *ring) 191 { 192 /* Policy: use 1st queue as high priority compute queue if we 193 * have more than one compute queue. 194 */ 195 if (adev->gfx.num_compute_rings > 1 && 196 ring == &adev->gfx.compute_ring[0]) 197 return true; 198 199 return false; 200 } 201 202 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) 203 { 204 int i, queue, pipe; 205 bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev); 206 int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * 207 adev->gfx.mec.num_queue_per_pipe, 208 adev->gfx.num_compute_rings); 209 210 if (multipipe_policy) { 211 /* policy: make queues evenly cross all pipes on MEC1 only */ 212 for (i = 0; i < max_queues_per_mec; i++) { 213 pipe = i % adev->gfx.mec.num_pipe_per_mec; 214 queue = (i / adev->gfx.mec.num_pipe_per_mec) % 215 adev->gfx.mec.num_queue_per_pipe; 216 217 set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, 218 adev->gfx.mec.queue_bitmap); 219 } 220 } else { 221 /* policy: amdgpu owns all queues in the given pipe */ 222 for (i = 0; i < max_queues_per_mec; ++i) 223 set_bit(i, adev->gfx.mec.queue_bitmap); 224 } 225 226 dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); 227 } 228 229 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) 230 { 231 int i, queue, pipe; 232 bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev); 233 int max_queues_per_me = adev->gfx.me.num_pipe_per_me * 234 adev->gfx.me.num_queue_per_pipe; 235 236 if (multipipe_policy) { 237 /* policy: amdgpu owns the first queue per pipe at this stage 238 * will extend to mulitple queues per pipe later */ 239 for (i = 0; i < max_queues_per_me; i++) { 240 pipe = i % adev->gfx.me.num_pipe_per_me; 241 queue = (i / adev->gfx.me.num_pipe_per_me) % 242 adev->gfx.me.num_queue_per_pipe; 243 244 set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue, 245 adev->gfx.me.queue_bitmap); 246 } 247 } else { 248 for (i = 0; i < max_queues_per_me; ++i) 249 set_bit(i, adev->gfx.me.queue_bitmap); 250 } 251 252 /* update the number of active graphics rings */ 253 adev->gfx.num_gfx_rings = 254 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 255 } 256 257 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, 258 struct amdgpu_ring *ring) 259 { 260 int queue_bit; 261 int mec, pipe, queue; 262 263 queue_bit = adev->gfx.mec.num_mec 264 * adev->gfx.mec.num_pipe_per_mec 265 * adev->gfx.mec.num_queue_per_pipe; 266 267 while (--queue_bit >= 0) { 268 if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) 269 continue; 270 271 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); 272 273 /* 274 * 1. Using pipes 2/3 from MEC 2 seems cause problems. 275 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN 276 * only can be issued on queue 0. 277 */ 278 if ((mec == 1 && pipe > 1) || queue != 0) 279 continue; 280 281 ring->me = mec + 1; 282 ring->pipe = pipe; 283 ring->queue = queue; 284 285 return 0; 286 } 287 288 dev_err(adev->dev, "Failed to find a queue for KIQ\n"); 289 return -EINVAL; 290 } 291 292 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, 293 struct amdgpu_ring *ring, 294 struct amdgpu_irq_src *irq) 295 { 296 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 297 int r = 0; 298 299 spin_lock_init(&kiq->ring_lock); 300 301 ring->adev = NULL; 302 ring->ring_obj = NULL; 303 ring->use_doorbell = true; 304 ring->doorbell_index = adev->doorbell_index.kiq; 305 306 r = amdgpu_gfx_kiq_acquire(adev, ring); 307 if (r) 308 return r; 309 310 ring->eop_gpu_addr = kiq->eop_gpu_addr; 311 ring->no_scheduler = true; 312 sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue); 313 r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0, 314 AMDGPU_RING_PRIO_DEFAULT, NULL); 315 if (r) 316 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); 317 318 return r; 319 } 320 321 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) 322 { 323 amdgpu_ring_fini(ring); 324 } 325 326 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev) 327 { 328 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 329 330 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); 331 } 332 333 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, 334 unsigned hpd_size) 335 { 336 int r; 337 u32 *hpd; 338 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 339 340 r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE, 341 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, 342 &kiq->eop_gpu_addr, (void **)&hpd); 343 if (r) { 344 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); 345 return r; 346 } 347 348 memset(hpd, 0, hpd_size); 349 350 r = amdgpu_bo_reserve(kiq->eop_obj, true); 351 if (unlikely(r != 0)) 352 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); 353 amdgpu_bo_kunmap(kiq->eop_obj); 354 amdgpu_bo_unreserve(kiq->eop_obj); 355 356 return 0; 357 } 358 359 /* create MQD for each compute/gfx queue */ 360 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, 361 unsigned mqd_size) 362 { 363 struct amdgpu_ring *ring = NULL; 364 int r, i; 365 366 /* create MQD for KIQ */ 367 ring = &adev->gfx.kiq.ring; 368 if (!adev->enable_mes_kiq && !ring->mqd_obj) { 369 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must 370 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD 371 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for 372 * KIQ MQD no matter SRIOV or Bare-metal 373 */ 374 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 375 AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj, 376 &ring->mqd_gpu_addr, &ring->mqd_ptr); 377 if (r) { 378 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); 379 return r; 380 } 381 382 /* prepare MQD backup */ 383 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL); 384 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]) 385 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); 386 } 387 388 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { 389 /* create MQD for each KGQ */ 390 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 391 ring = &adev->gfx.gfx_ring[i]; 392 if (!ring->mqd_obj) { 393 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 394 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, 395 &ring->mqd_gpu_addr, &ring->mqd_ptr); 396 if (r) { 397 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); 398 return r; 399 } 400 401 /* prepare MQD backup */ 402 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); 403 if (!adev->gfx.me.mqd_backup[i]) 404 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); 405 } 406 } 407 } 408 409 /* create MQD for each KCQ */ 410 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 411 ring = &adev->gfx.compute_ring[i]; 412 if (!ring->mqd_obj) { 413 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 414 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, 415 &ring->mqd_gpu_addr, &ring->mqd_ptr); 416 if (r) { 417 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); 418 return r; 419 } 420 421 /* prepare MQD backup */ 422 adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL); 423 if (!adev->gfx.mec.mqd_backup[i]) 424 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); 425 } 426 } 427 428 return 0; 429 } 430 431 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) 432 { 433 struct amdgpu_ring *ring = NULL; 434 int i; 435 436 if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { 437 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 438 ring = &adev->gfx.gfx_ring[i]; 439 kfree(adev->gfx.me.mqd_backup[i]); 440 amdgpu_bo_free_kernel(&ring->mqd_obj, 441 &ring->mqd_gpu_addr, 442 &ring->mqd_ptr); 443 } 444 } 445 446 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 447 ring = &adev->gfx.compute_ring[i]; 448 kfree(adev->gfx.mec.mqd_backup[i]); 449 amdgpu_bo_free_kernel(&ring->mqd_obj, 450 &ring->mqd_gpu_addr, 451 &ring->mqd_ptr); 452 } 453 454 ring = &adev->gfx.kiq.ring; 455 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); 456 amdgpu_bo_free_kernel(&ring->mqd_obj, 457 &ring->mqd_gpu_addr, 458 &ring->mqd_ptr); 459 } 460 461 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev) 462 { 463 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 464 struct amdgpu_ring *kiq_ring = &kiq->ring; 465 int i, r = 0; 466 467 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 468 return -EINVAL; 469 470 spin_lock(&adev->gfx.kiq.ring_lock); 471 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * 472 adev->gfx.num_compute_rings)) { 473 spin_unlock(&adev->gfx.kiq.ring_lock); 474 return -ENOMEM; 475 } 476 477 for (i = 0; i < adev->gfx.num_compute_rings; i++) 478 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], 479 RESET_QUEUES, 0, 0); 480 481 if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang) 482 r = amdgpu_ring_test_helper(kiq_ring); 483 spin_unlock(&adev->gfx.kiq.ring_lock); 484 485 return r; 486 } 487 488 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, 489 int queue_bit) 490 { 491 int mec, pipe, queue; 492 int set_resource_bit = 0; 493 494 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue); 495 496 set_resource_bit = mec * 4 * 8 + pipe * 8 + queue; 497 498 return set_resource_bit; 499 } 500 501 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) 502 { 503 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 504 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 505 uint64_t queue_mask = 0; 506 int r, i; 507 508 if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) 509 return -EINVAL; 510 511 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { 512 if (!test_bit(i, adev->gfx.mec.queue_bitmap)) 513 continue; 514 515 /* This situation may be hit in the future if a new HW 516 * generation exposes more than 64 queues. If so, the 517 * definition of queue_mask needs updating */ 518 if (WARN_ON(i > (sizeof(queue_mask)*8))) { 519 DRM_ERROR("Invalid KCQ enabled: %d\n", i); 520 break; 521 } 522 523 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i)); 524 } 525 526 DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, 527 kiq_ring->queue); 528 spin_lock(&adev->gfx.kiq.ring_lock); 529 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * 530 adev->gfx.num_compute_rings + 531 kiq->pmf->set_resources_size); 532 if (r) { 533 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 534 spin_unlock(&adev->gfx.kiq.ring_lock); 535 return r; 536 } 537 538 if (adev->enable_mes) 539 queue_mask = ~0ULL; 540 541 kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); 542 for (i = 0; i < adev->gfx.num_compute_rings; i++) 543 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); 544 545 r = amdgpu_ring_test_helper(kiq_ring); 546 spin_unlock(&adev->gfx.kiq.ring_lock); 547 if (r) 548 DRM_ERROR("KCQ enable failed\n"); 549 550 return r; 551 } 552 553 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable 554 * 555 * @adev: amdgpu_device pointer 556 * @bool enable true: enable gfx off feature, false: disable gfx off feature 557 * 558 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled. 559 * 2. other client can send request to disable gfx off feature, the request should be honored. 560 * 3. other client can cancel their request of disable gfx off feature 561 * 4. other client should not send request to enable gfx off feature before disable gfx off feature. 562 */ 563 564 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) 565 { 566 unsigned long delay = GFX_OFF_DELAY_ENABLE; 567 568 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 569 return; 570 571 mutex_lock(&adev->gfx.gfx_off_mutex); 572 573 if (enable) { 574 /* If the count is already 0, it means there's an imbalance bug somewhere. 575 * Note that the bug may be in a different caller than the one which triggers the 576 * WARN_ON_ONCE. 577 */ 578 if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) 579 goto unlock; 580 581 adev->gfx.gfx_off_req_count--; 582 583 if (adev->gfx.gfx_off_req_count == 0 && 584 !adev->gfx.gfx_off_state) { 585 /* If going to s2idle, no need to wait */ 586 if (adev->in_s0ix) { 587 if (!amdgpu_dpm_set_powergating_by_smu(adev, 588 AMD_IP_BLOCK_TYPE_GFX, true)) 589 adev->gfx.gfx_off_state = true; 590 } else { 591 schedule_delayed_work(&adev->gfx.gfx_off_delay_work, 592 delay); 593 } 594 } 595 } else { 596 if (adev->gfx.gfx_off_req_count == 0) { 597 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); 598 599 if (adev->gfx.gfx_off_state && 600 !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) { 601 adev->gfx.gfx_off_state = false; 602 603 if (adev->gfx.funcs->init_spm_golden) { 604 dev_dbg(adev->dev, 605 "GFXOFF is disabled, re-init SPM golden settings\n"); 606 amdgpu_gfx_init_spm_golden(adev); 607 } 608 } 609 } 610 611 adev->gfx.gfx_off_req_count++; 612 } 613 614 unlock: 615 mutex_unlock(&adev->gfx.gfx_off_mutex); 616 } 617 618 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value) 619 { 620 int r = 0; 621 622 mutex_lock(&adev->gfx.gfx_off_mutex); 623 624 r = amdgpu_dpm_set_residency_gfxoff(adev, value); 625 626 mutex_unlock(&adev->gfx.gfx_off_mutex); 627 628 return r; 629 } 630 631 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value) 632 { 633 int r = 0; 634 635 mutex_lock(&adev->gfx.gfx_off_mutex); 636 637 r = amdgpu_dpm_get_residency_gfxoff(adev, value); 638 639 mutex_unlock(&adev->gfx.gfx_off_mutex); 640 641 return r; 642 } 643 644 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value) 645 { 646 int r = 0; 647 648 mutex_lock(&adev->gfx.gfx_off_mutex); 649 650 r = amdgpu_dpm_get_entrycount_gfxoff(adev, value); 651 652 mutex_unlock(&adev->gfx.gfx_off_mutex); 653 654 return r; 655 } 656 657 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) 658 { 659 660 int r = 0; 661 662 mutex_lock(&adev->gfx.gfx_off_mutex); 663 664 r = amdgpu_dpm_get_status_gfxoff(adev, value); 665 666 mutex_unlock(&adev->gfx.gfx_off_mutex); 667 668 return r; 669 } 670 671 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 672 { 673 int r; 674 675 if (amdgpu_ras_is_supported(adev, ras_block->block)) { 676 if (!amdgpu_persistent_edc_harvesting_supported(adev)) 677 amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); 678 679 r = amdgpu_ras_block_late_init(adev, ras_block); 680 if (r) 681 return r; 682 683 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); 684 if (r) 685 goto late_fini; 686 } else { 687 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); 688 } 689 690 return 0; 691 late_fini: 692 amdgpu_ras_block_late_fini(adev, ras_block); 693 return r; 694 } 695 696 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, 697 void *err_data, 698 struct amdgpu_iv_entry *entry) 699 { 700 /* TODO ue will trigger an interrupt. 701 * 702 * When “Full RAS” is enabled, the per-IP interrupt sources should 703 * be disabled and the driver should only look for the aggregated 704 * interrupt via sync flood 705 */ 706 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { 707 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 708 if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops && 709 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count) 710 adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); 711 amdgpu_ras_reset_gpu(adev); 712 } 713 return AMDGPU_RAS_SUCCESS; 714 } 715 716 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, 717 struct amdgpu_irq_src *source, 718 struct amdgpu_iv_entry *entry) 719 { 720 struct ras_common_if *ras_if = adev->gfx.ras_if; 721 struct ras_dispatch_if ih_data = { 722 .entry = entry, 723 }; 724 725 if (!ras_if) 726 return 0; 727 728 ih_data.head = *ras_if; 729 730 DRM_ERROR("CP ECC ERROR IRQ\n"); 731 amdgpu_ras_interrupt_dispatch(adev, &ih_data); 732 return 0; 733 } 734 735 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 736 { 737 signed long r, cnt = 0; 738 unsigned long flags; 739 uint32_t seq, reg_val_offs = 0, value = 0; 740 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 741 struct amdgpu_ring *ring = &kiq->ring; 742 743 if (amdgpu_device_skip_hw_access(adev)) 744 return 0; 745 746 if (adev->mes.ring.sched.ready) 747 return amdgpu_mes_rreg(adev, reg); 748 749 BUG_ON(!ring->funcs->emit_rreg); 750 751 spin_lock_irqsave(&kiq->ring_lock, flags); 752 if (amdgpu_device_wb_get(adev, ®_val_offs)) { 753 pr_err("critical bug! too many kiq readers\n"); 754 goto failed_unlock; 755 } 756 amdgpu_ring_alloc(ring, 32); 757 amdgpu_ring_emit_rreg(ring, reg, reg_val_offs); 758 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); 759 if (r) 760 goto failed_undo; 761 762 amdgpu_ring_commit(ring); 763 spin_unlock_irqrestore(&kiq->ring_lock, flags); 764 765 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 766 767 /* don't wait anymore for gpu reset case because this way may 768 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 769 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 770 * never return if we keep waiting in virt_kiq_rreg, which cause 771 * gpu_recover() hang there. 772 * 773 * also don't wait anymore for IRQ context 774 * */ 775 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) 776 goto failed_kiq_read; 777 778 might_sleep(); 779 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 780 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 781 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 782 } 783 784 if (cnt > MAX_KIQ_REG_TRY) 785 goto failed_kiq_read; 786 787 mb(); 788 value = adev->wb.wb[reg_val_offs]; 789 amdgpu_device_wb_free(adev, reg_val_offs); 790 return value; 791 792 failed_undo: 793 amdgpu_ring_undo(ring); 794 failed_unlock: 795 spin_unlock_irqrestore(&kiq->ring_lock, flags); 796 failed_kiq_read: 797 if (reg_val_offs) 798 amdgpu_device_wb_free(adev, reg_val_offs); 799 dev_err(adev->dev, "failed to read reg:%x\n", reg); 800 return ~0; 801 } 802 803 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 804 { 805 signed long r, cnt = 0; 806 unsigned long flags; 807 uint32_t seq; 808 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 809 struct amdgpu_ring *ring = &kiq->ring; 810 811 BUG_ON(!ring->funcs->emit_wreg); 812 813 if (amdgpu_device_skip_hw_access(adev)) 814 return; 815 816 if (adev->mes.ring.sched.ready) { 817 amdgpu_mes_wreg(adev, reg, v); 818 return; 819 } 820 821 spin_lock_irqsave(&kiq->ring_lock, flags); 822 amdgpu_ring_alloc(ring, 32); 823 amdgpu_ring_emit_wreg(ring, reg, v); 824 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); 825 if (r) 826 goto failed_undo; 827 828 amdgpu_ring_commit(ring); 829 spin_unlock_irqrestore(&kiq->ring_lock, flags); 830 831 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 832 833 /* don't wait anymore for gpu reset case because this way may 834 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 835 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 836 * never return if we keep waiting in virt_kiq_rreg, which cause 837 * gpu_recover() hang there. 838 * 839 * also don't wait anymore for IRQ context 840 * */ 841 if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) 842 goto failed_kiq_write; 843 844 might_sleep(); 845 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 846 847 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 848 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 849 } 850 851 if (cnt > MAX_KIQ_REG_TRY) 852 goto failed_kiq_write; 853 854 return; 855 856 failed_undo: 857 amdgpu_ring_undo(ring); 858 spin_unlock_irqrestore(&kiq->ring_lock, flags); 859 failed_kiq_write: 860 dev_err(adev->dev, "failed to write reg:%x\n", reg); 861 } 862 863 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) 864 { 865 if (amdgpu_num_kcq == -1) { 866 return 8; 867 } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) { 868 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n"); 869 return 8; 870 } 871 return amdgpu_num_kcq; 872 } 873 874 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, 875 uint32_t ucode_id) 876 { 877 const struct gfx_firmware_header_v1_0 *cp_hdr; 878 const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0; 879 struct amdgpu_firmware_info *info = NULL; 880 const struct firmware *ucode_fw; 881 unsigned int fw_size; 882 883 switch (ucode_id) { 884 case AMDGPU_UCODE_ID_CP_PFP: 885 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 886 adev->gfx.pfp_fw->data; 887 adev->gfx.pfp_fw_version = 888 le32_to_cpu(cp_hdr->header.ucode_version); 889 adev->gfx.pfp_feature_version = 890 le32_to_cpu(cp_hdr->ucode_feature_version); 891 ucode_fw = adev->gfx.pfp_fw; 892 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 893 break; 894 case AMDGPU_UCODE_ID_CP_RS64_PFP: 895 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 896 adev->gfx.pfp_fw->data; 897 adev->gfx.pfp_fw_version = 898 le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 899 adev->gfx.pfp_feature_version = 900 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 901 ucode_fw = adev->gfx.pfp_fw; 902 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); 903 break; 904 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 905 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 906 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 907 adev->gfx.pfp_fw->data; 908 ucode_fw = adev->gfx.pfp_fw; 909 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); 910 break; 911 case AMDGPU_UCODE_ID_CP_ME: 912 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 913 adev->gfx.me_fw->data; 914 adev->gfx.me_fw_version = 915 le32_to_cpu(cp_hdr->header.ucode_version); 916 adev->gfx.me_feature_version = 917 le32_to_cpu(cp_hdr->ucode_feature_version); 918 ucode_fw = adev->gfx.me_fw; 919 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 920 break; 921 case AMDGPU_UCODE_ID_CP_RS64_ME: 922 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 923 adev->gfx.me_fw->data; 924 adev->gfx.me_fw_version = 925 le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 926 adev->gfx.me_feature_version = 927 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 928 ucode_fw = adev->gfx.me_fw; 929 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); 930 break; 931 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 932 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 933 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 934 adev->gfx.me_fw->data; 935 ucode_fw = adev->gfx.me_fw; 936 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); 937 break; 938 case AMDGPU_UCODE_ID_CP_CE: 939 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 940 adev->gfx.ce_fw->data; 941 adev->gfx.ce_fw_version = 942 le32_to_cpu(cp_hdr->header.ucode_version); 943 adev->gfx.ce_feature_version = 944 le32_to_cpu(cp_hdr->ucode_feature_version); 945 ucode_fw = adev->gfx.ce_fw; 946 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 947 break; 948 case AMDGPU_UCODE_ID_CP_MEC1: 949 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 950 adev->gfx.mec_fw->data; 951 adev->gfx.mec_fw_version = 952 le32_to_cpu(cp_hdr->header.ucode_version); 953 adev->gfx.mec_feature_version = 954 le32_to_cpu(cp_hdr->ucode_feature_version); 955 ucode_fw = adev->gfx.mec_fw; 956 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 957 le32_to_cpu(cp_hdr->jt_size) * 4; 958 break; 959 case AMDGPU_UCODE_ID_CP_MEC1_JT: 960 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 961 adev->gfx.mec_fw->data; 962 ucode_fw = adev->gfx.mec_fw; 963 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4; 964 break; 965 case AMDGPU_UCODE_ID_CP_MEC2: 966 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 967 adev->gfx.mec2_fw->data; 968 adev->gfx.mec2_fw_version = 969 le32_to_cpu(cp_hdr->header.ucode_version); 970 adev->gfx.mec2_feature_version = 971 le32_to_cpu(cp_hdr->ucode_feature_version); 972 ucode_fw = adev->gfx.mec2_fw; 973 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 974 le32_to_cpu(cp_hdr->jt_size) * 4; 975 break; 976 case AMDGPU_UCODE_ID_CP_MEC2_JT: 977 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 978 adev->gfx.mec2_fw->data; 979 ucode_fw = adev->gfx.mec2_fw; 980 fw_size = le32_to_cpu(cp_hdr->jt_size) * 4; 981 break; 982 case AMDGPU_UCODE_ID_CP_RS64_MEC: 983 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 984 adev->gfx.mec_fw->data; 985 adev->gfx.mec_fw_version = 986 le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 987 adev->gfx.mec_feature_version = 988 le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 989 ucode_fw = adev->gfx.mec_fw; 990 fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); 991 break; 992 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 993 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 994 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 995 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 996 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) 997 adev->gfx.mec_fw->data; 998 ucode_fw = adev->gfx.mec_fw; 999 fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); 1000 break; 1001 default: 1002 break; 1003 } 1004 1005 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1006 info = &adev->firmware.ucode[ucode_id]; 1007 info->ucode_id = ucode_id; 1008 info->fw = ucode_fw; 1009 adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE); 1010 } 1011 } 1012