1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/bsearch.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include "kfd_priv.h" 28 #include "kfd_device_queue_manager.h" 29 #include "kfd_pm4_headers_vi.h" 30 #include "kfd_pm4_headers_aldebaran.h" 31 #include "cwsr_trap_handler.h" 32 #include "kfd_iommu.h" 33 #include "amdgpu_amdkfd.h" 34 #include "kfd_smi_events.h" 35 #include "kfd_migrate.h" 36 #include "amdgpu.h" 37 38 #define MQD_SIZE_ALIGNED 768 39 40 /* 41 * kfd_locked is used to lock the kfd driver during suspend or reset 42 * once locked, kfd driver will stop any further GPU execution. 43 * create process (open) will return -EAGAIN. 44 */ 45 static atomic_t kfd_locked = ATOMIC_INIT(0); 46 47 #ifdef CONFIG_DRM_AMDGPU_CIK 48 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd; 49 #endif 50 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd; 51 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd; 52 extern const struct kfd2kgd_calls arcturus_kfd2kgd; 53 extern const struct kfd2kgd_calls aldebaran_kfd2kgd; 54 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; 55 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; 56 57 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 58 unsigned int chunk_size); 59 static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 60 61 static int kfd_resume(struct kfd_dev *kfd); 62 63 static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd) 64 { 65 uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0]; 66 67 switch (sdma_version) { 68 case IP_VERSION(4, 0, 0):/* VEGA10 */ 69 case IP_VERSION(4, 0, 1):/* VEGA12 */ 70 case IP_VERSION(4, 1, 0):/* RAVEN */ 71 case IP_VERSION(4, 1, 1):/* RAVEN */ 72 case IP_VERSION(4, 1, 2):/* RENOIR */ 73 case IP_VERSION(5, 2, 1):/* VANGOGH */ 74 case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ 75 kfd->device_info.num_sdma_queues_per_engine = 2; 76 break; 77 case IP_VERSION(4, 2, 0):/* VEGA20 */ 78 case IP_VERSION(4, 2, 2):/* ARCTURUS */ 79 case IP_VERSION(4, 4, 0):/* ALDEBARAN */ 80 case IP_VERSION(5, 0, 0):/* NAVI10 */ 81 case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ 82 case IP_VERSION(5, 0, 2):/* NAVI14 */ 83 case IP_VERSION(5, 0, 5):/* NAVI12 */ 84 case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */ 85 case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */ 86 case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ 87 case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ 88 kfd->device_info.num_sdma_queues_per_engine = 8; 89 break; 90 default: 91 dev_warn(kfd_device, 92 "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n", 93 sdma_version); 94 kfd->device_info.num_sdma_queues_per_engine = 8; 95 } 96 } 97 98 static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) 99 { 100 uint32_t gc_version = KFD_GC_VERSION(kfd); 101 102 switch (gc_version) { 103 case IP_VERSION(9, 0, 1): /* VEGA10 */ 104 case IP_VERSION(9, 1, 0): /* RAVEN */ 105 case IP_VERSION(9, 2, 1): /* VEGA12 */ 106 case IP_VERSION(9, 2, 2): /* RAVEN */ 107 case IP_VERSION(9, 3, 0): /* RENOIR */ 108 case IP_VERSION(9, 4, 0): /* VEGA20 */ 109 case IP_VERSION(9, 4, 1): /* ARCTURUS */ 110 case IP_VERSION(9, 4, 2): /* ALDEBARAN */ 111 case IP_VERSION(10, 3, 1): /* VANGOGH */ 112 case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ 113 case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */ 114 case IP_VERSION(10, 1, 4): 115 case IP_VERSION(10, 1, 10): /* NAVI10 */ 116 case IP_VERSION(10, 1, 2): /* NAVI12 */ 117 case IP_VERSION(10, 1, 1): /* NAVI14 */ 118 case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */ 119 case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */ 120 case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */ 121 case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */ 122 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; 123 break; 124 default: 125 dev_warn(kfd_device, "v9 event interrupt handler is set due to " 126 "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version); 127 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; 128 } 129 } 130 131 static void kfd_device_info_init(struct kfd_dev *kfd, 132 bool vf, uint32_t gfx_target_version) 133 { 134 uint32_t gc_version = KFD_GC_VERSION(kfd); 135 uint32_t asic_type = kfd->adev->asic_type; 136 137 kfd->device_info.max_pasid_bits = 16; 138 kfd->device_info.max_no_of_hqd = 24; 139 kfd->device_info.num_of_watch_points = 4; 140 kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED; 141 kfd->device_info.gfx_target_version = gfx_target_version; 142 143 if (KFD_IS_SOC15(kfd)) { 144 kfd->device_info.doorbell_size = 8; 145 kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t); 146 kfd->device_info.supports_cwsr = true; 147 148 kfd_device_info_set_sdma_queue_num(kfd); 149 150 kfd_device_info_set_event_interrupt_class(kfd); 151 152 /* Raven */ 153 if (gc_version == IP_VERSION(9, 1, 0) || 154 gc_version == IP_VERSION(9, 2, 2)) 155 kfd->device_info.needs_iommu_device = true; 156 157 if (gc_version < IP_VERSION(11, 0, 0)) { 158 /* Navi2x+, Navi1x+ */ 159 if (gc_version >= IP_VERSION(10, 3, 0)) 160 kfd->device_info.no_atomic_fw_version = 92; 161 else if (gc_version >= IP_VERSION(10, 1, 1)) 162 kfd->device_info.no_atomic_fw_version = 145; 163 164 /* Navi1x+ */ 165 if (gc_version >= IP_VERSION(10, 1, 1)) 166 kfd->device_info.needs_pci_atomics = true; 167 } 168 } else { 169 kfd->device_info.doorbell_size = 4; 170 kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t); 171 kfd->device_info.event_interrupt_class = &event_interrupt_class_cik; 172 kfd->device_info.num_sdma_queues_per_engine = 2; 173 174 if (asic_type != CHIP_KAVERI && 175 asic_type != CHIP_HAWAII && 176 asic_type != CHIP_TONGA) 177 kfd->device_info.supports_cwsr = true; 178 179 if (asic_type == CHIP_KAVERI || 180 asic_type == CHIP_CARRIZO) 181 kfd->device_info.needs_iommu_device = true; 182 183 if (asic_type != CHIP_HAWAII && !vf) 184 kfd->device_info.needs_pci_atomics = true; 185 } 186 } 187 188 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) 189 { 190 struct kfd_dev *kfd = NULL; 191 const struct kfd2kgd_calls *f2g = NULL; 192 struct pci_dev *pdev = adev->pdev; 193 uint32_t gfx_target_version = 0; 194 195 switch (adev->asic_type) { 196 #ifdef KFD_SUPPORT_IOMMU_V2 197 #ifdef CONFIG_DRM_AMDGPU_CIK 198 case CHIP_KAVERI: 199 gfx_target_version = 70000; 200 if (!vf) 201 f2g = &gfx_v7_kfd2kgd; 202 break; 203 #endif 204 case CHIP_CARRIZO: 205 gfx_target_version = 80001; 206 if (!vf) 207 f2g = &gfx_v8_kfd2kgd; 208 break; 209 #endif 210 #ifdef CONFIG_DRM_AMDGPU_CIK 211 case CHIP_HAWAII: 212 gfx_target_version = 70001; 213 if (!amdgpu_exp_hw_support) 214 pr_info( 215 "KFD support on Hawaii is experimental. See modparam exp_hw_support\n" 216 ); 217 else if (!vf) 218 f2g = &gfx_v7_kfd2kgd; 219 break; 220 #endif 221 case CHIP_TONGA: 222 gfx_target_version = 80002; 223 if (!vf) 224 f2g = &gfx_v8_kfd2kgd; 225 break; 226 case CHIP_FIJI: 227 gfx_target_version = 80003; 228 f2g = &gfx_v8_kfd2kgd; 229 break; 230 case CHIP_POLARIS10: 231 gfx_target_version = 80003; 232 f2g = &gfx_v8_kfd2kgd; 233 break; 234 case CHIP_POLARIS11: 235 gfx_target_version = 80003; 236 if (!vf) 237 f2g = &gfx_v8_kfd2kgd; 238 break; 239 case CHIP_POLARIS12: 240 gfx_target_version = 80003; 241 if (!vf) 242 f2g = &gfx_v8_kfd2kgd; 243 break; 244 case CHIP_VEGAM: 245 gfx_target_version = 80003; 246 if (!vf) 247 f2g = &gfx_v8_kfd2kgd; 248 break; 249 default: 250 switch (adev->ip_versions[GC_HWIP][0]) { 251 /* Vega 10 */ 252 case IP_VERSION(9, 0, 1): 253 gfx_target_version = 90000; 254 f2g = &gfx_v9_kfd2kgd; 255 break; 256 #ifdef KFD_SUPPORT_IOMMU_V2 257 /* Raven */ 258 case IP_VERSION(9, 1, 0): 259 case IP_VERSION(9, 2, 2): 260 gfx_target_version = 90002; 261 if (!vf) 262 f2g = &gfx_v9_kfd2kgd; 263 break; 264 #endif 265 /* Vega12 */ 266 case IP_VERSION(9, 2, 1): 267 gfx_target_version = 90004; 268 if (!vf) 269 f2g = &gfx_v9_kfd2kgd; 270 break; 271 /* Renoir */ 272 case IP_VERSION(9, 3, 0): 273 gfx_target_version = 90012; 274 if (!vf) 275 f2g = &gfx_v9_kfd2kgd; 276 break; 277 /* Vega20 */ 278 case IP_VERSION(9, 4, 0): 279 gfx_target_version = 90006; 280 if (!vf) 281 f2g = &gfx_v9_kfd2kgd; 282 break; 283 /* Arcturus */ 284 case IP_VERSION(9, 4, 1): 285 gfx_target_version = 90008; 286 f2g = &arcturus_kfd2kgd; 287 break; 288 /* Aldebaran */ 289 case IP_VERSION(9, 4, 2): 290 gfx_target_version = 90010; 291 f2g = &aldebaran_kfd2kgd; 292 break; 293 /* Navi10 */ 294 case IP_VERSION(10, 1, 10): 295 gfx_target_version = 100100; 296 if (!vf) 297 f2g = &gfx_v10_kfd2kgd; 298 break; 299 /* Navi12 */ 300 case IP_VERSION(10, 1, 2): 301 gfx_target_version = 100101; 302 f2g = &gfx_v10_kfd2kgd; 303 break; 304 /* Navi14 */ 305 case IP_VERSION(10, 1, 1): 306 gfx_target_version = 100102; 307 if (!vf) 308 f2g = &gfx_v10_kfd2kgd; 309 break; 310 /* Cyan Skillfish */ 311 case IP_VERSION(10, 1, 3): 312 case IP_VERSION(10, 1, 4): 313 gfx_target_version = 100103; 314 if (!vf) 315 f2g = &gfx_v10_kfd2kgd; 316 break; 317 /* Sienna Cichlid */ 318 case IP_VERSION(10, 3, 0): 319 gfx_target_version = 100300; 320 f2g = &gfx_v10_3_kfd2kgd; 321 break; 322 /* Navy Flounder */ 323 case IP_VERSION(10, 3, 2): 324 gfx_target_version = 100301; 325 f2g = &gfx_v10_3_kfd2kgd; 326 break; 327 /* Van Gogh */ 328 case IP_VERSION(10, 3, 1): 329 gfx_target_version = 100303; 330 if (!vf) 331 f2g = &gfx_v10_3_kfd2kgd; 332 break; 333 /* Dimgrey Cavefish */ 334 case IP_VERSION(10, 3, 4): 335 gfx_target_version = 100302; 336 f2g = &gfx_v10_3_kfd2kgd; 337 break; 338 /* Beige Goby */ 339 case IP_VERSION(10, 3, 5): 340 gfx_target_version = 100304; 341 f2g = &gfx_v10_3_kfd2kgd; 342 break; 343 /* Yellow Carp */ 344 case IP_VERSION(10, 3, 3): 345 gfx_target_version = 100305; 346 if (!vf) 347 f2g = &gfx_v10_3_kfd2kgd; 348 break; 349 default: 350 break; 351 } 352 break; 353 } 354 355 if (!f2g) { 356 if (adev->ip_versions[GC_HWIP][0]) 357 dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n", 358 adev->ip_versions[GC_HWIP][0], vf ? "VF" : ""); 359 else 360 dev_err(kfd_device, "%s %s not supported in kfd\n", 361 amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); 362 return NULL; 363 } 364 365 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); 366 if (!kfd) 367 return NULL; 368 369 kfd->adev = adev; 370 kfd_device_info_init(kfd, vf, gfx_target_version); 371 kfd->pdev = pdev; 372 kfd->init_complete = false; 373 kfd->kfd2kgd = f2g; 374 atomic_set(&kfd->compute_profile, 0); 375 376 mutex_init(&kfd->doorbell_mutex); 377 memset(&kfd->doorbell_available_index, 0, 378 sizeof(kfd->doorbell_available_index)); 379 380 atomic_set(&kfd->sram_ecc_flag, 0); 381 382 ida_init(&kfd->doorbell_ida); 383 384 return kfd; 385 } 386 387 static void kfd_cwsr_init(struct kfd_dev *kfd) 388 { 389 if (cwsr_enable && kfd->device_info.supports_cwsr) { 390 if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) { 391 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); 392 kfd->cwsr_isa = cwsr_trap_gfx8_hex; 393 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); 394 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) { 395 BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE); 396 kfd->cwsr_isa = cwsr_trap_arcturus_hex; 397 kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex); 398 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) { 399 BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE); 400 kfd->cwsr_isa = cwsr_trap_aldebaran_hex; 401 kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex); 402 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) { 403 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); 404 kfd->cwsr_isa = cwsr_trap_gfx9_hex; 405 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); 406 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) { 407 BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE); 408 kfd->cwsr_isa = cwsr_trap_nv1x_hex; 409 kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex); 410 } else { 411 BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE); 412 kfd->cwsr_isa = cwsr_trap_gfx10_hex; 413 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex); 414 } 415 416 kfd->cwsr_enabled = true; 417 } 418 } 419 420 static int kfd_gws_init(struct kfd_dev *kfd) 421 { 422 int ret = 0; 423 424 if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) 425 return 0; 426 427 if (hws_gws_support || (KFD_IS_SOC15(kfd) && 428 ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) 429 && kfd->mec2_fw_version >= 0x81b3) || 430 (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) 431 && kfd->mec2_fw_version >= 0x1b3) || 432 (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) 433 && kfd->mec2_fw_version >= 0x30) || 434 (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) 435 && kfd->mec2_fw_version >= 0x28)))) 436 ret = amdgpu_amdkfd_alloc_gws(kfd->adev, 437 kfd->adev->gds.gws_size, &kfd->gws); 438 439 return ret; 440 } 441 442 static void kfd_smi_init(struct kfd_dev *dev) 443 { 444 INIT_LIST_HEAD(&dev->smi_clients); 445 spin_lock_init(&dev->smi_lock); 446 } 447 448 bool kgd2kfd_device_init(struct kfd_dev *kfd, 449 struct drm_device *ddev, 450 const struct kgd2kfd_shared_resources *gpu_resources) 451 { 452 unsigned int size, map_process_packet_size; 453 454 kfd->ddev = ddev; 455 kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 456 KGD_ENGINE_MEC1); 457 kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 458 KGD_ENGINE_MEC2); 459 kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 460 KGD_ENGINE_SDMA1); 461 kfd->shared_resources = *gpu_resources; 462 463 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; 464 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; 465 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd 466 - kfd->vm_info.first_vmid_kfd + 1; 467 468 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. 469 * 32 and 64-bit requests are possible and must be 470 * supported. 471 */ 472 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev); 473 if (!kfd->pci_atomic_requested && 474 kfd->device_info.needs_pci_atomics && 475 (!kfd->device_info.no_atomic_fw_version || 476 kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) { 477 dev_info(kfd_device, 478 "skipped device %x:%x, PCI rejects atomics %d<%d\n", 479 kfd->pdev->vendor, kfd->pdev->device, 480 kfd->mec_fw_version, 481 kfd->device_info.no_atomic_fw_version); 482 return false; 483 } 484 485 /* Verify module parameters regarding mapped process number*/ 486 if (hws_max_conc_proc >= 0) 487 kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); 488 else 489 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; 490 491 /* calculate max size of mqds needed for queues */ 492 size = max_num_of_queues_per_device * 493 kfd->device_info.mqd_size_aligned; 494 495 /* 496 * calculate max size of runlist packet. 497 * There can be only 2 packets at once 498 */ 499 map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ? 500 sizeof(struct pm4_mes_map_process_aldebaran) : 501 sizeof(struct pm4_mes_map_process); 502 size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size + 503 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) 504 + sizeof(struct pm4_mes_runlist)) * 2; 505 506 /* Add size of HIQ & DIQ */ 507 size += KFD_KERNEL_QUEUE_SIZE * 2; 508 509 /* add another 512KB for all other allocations on gart (HPD, fences) */ 510 size += 512 * 1024; 511 512 if (amdgpu_amdkfd_alloc_gtt_mem( 513 kfd->adev, size, &kfd->gtt_mem, 514 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, 515 false)) { 516 dev_err(kfd_device, "Could not allocate %d bytes\n", size); 517 goto alloc_gtt_mem_failure; 518 } 519 520 dev_info(kfd_device, "Allocated %d bytes on gart\n", size); 521 522 /* Initialize GTT sa with 512 byte chunk size */ 523 if (kfd_gtt_sa_init(kfd, size, 512) != 0) { 524 dev_err(kfd_device, "Error initializing gtt sub-allocator\n"); 525 goto kfd_gtt_sa_init_error; 526 } 527 528 if (kfd_doorbell_init(kfd)) { 529 dev_err(kfd_device, 530 "Error initializing doorbell aperture\n"); 531 goto kfd_doorbell_error; 532 } 533 534 if (amdgpu_use_xgmi_p2p) 535 kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; 536 537 kfd->noretry = kfd->adev->gmc.noretry; 538 539 if (kfd_interrupt_init(kfd)) { 540 dev_err(kfd_device, "Error initializing interrupts\n"); 541 goto kfd_interrupt_error; 542 } 543 544 kfd->dqm = device_queue_manager_init(kfd); 545 if (!kfd->dqm) { 546 dev_err(kfd_device, "Error initializing queue manager\n"); 547 goto device_queue_manager_error; 548 } 549 550 /* If supported on this device, allocate global GWS that is shared 551 * by all KFD processes 552 */ 553 if (kfd_gws_init(kfd)) { 554 dev_err(kfd_device, "Could not allocate %d gws\n", 555 kfd->adev->gds.gws_size); 556 goto gws_error; 557 } 558 559 /* If CRAT is broken, won't set iommu enabled */ 560 kfd_double_confirm_iommu_support(kfd); 561 562 if (kfd_iommu_device_init(kfd)) { 563 kfd->use_iommu_v2 = false; 564 dev_err(kfd_device, "Error initializing iommuv2\n"); 565 goto device_iommu_error; 566 } 567 568 kfd_cwsr_init(kfd); 569 570 svm_migrate_init(kfd->adev); 571 572 if (kgd2kfd_resume_iommu(kfd)) 573 goto device_iommu_error; 574 575 if (kfd_resume(kfd)) 576 goto kfd_resume_error; 577 578 if (kfd_topology_add_device(kfd)) { 579 dev_err(kfd_device, "Error adding device to topology\n"); 580 goto kfd_topology_add_device_error; 581 } 582 583 kfd_smi_init(kfd); 584 585 kfd->init_complete = true; 586 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor, 587 kfd->pdev->device); 588 589 pr_debug("Starting kfd with the following scheduling policy %d\n", 590 kfd->dqm->sched_policy); 591 592 goto out; 593 594 kfd_topology_add_device_error: 595 kfd_resume_error: 596 device_iommu_error: 597 gws_error: 598 device_queue_manager_uninit(kfd->dqm); 599 device_queue_manager_error: 600 kfd_interrupt_exit(kfd); 601 kfd_interrupt_error: 602 kfd_doorbell_fini(kfd); 603 kfd_doorbell_error: 604 kfd_gtt_sa_fini(kfd); 605 kfd_gtt_sa_init_error: 606 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); 607 alloc_gtt_mem_failure: 608 if (kfd->gws) 609 amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); 610 dev_err(kfd_device, 611 "device %x:%x NOT added due to errors\n", 612 kfd->pdev->vendor, kfd->pdev->device); 613 out: 614 return kfd->init_complete; 615 } 616 617 void kgd2kfd_device_exit(struct kfd_dev *kfd) 618 { 619 if (kfd->init_complete) { 620 device_queue_manager_uninit(kfd->dqm); 621 kfd_interrupt_exit(kfd); 622 kfd_topology_remove_device(kfd); 623 kfd_doorbell_fini(kfd); 624 ida_destroy(&kfd->doorbell_ida); 625 kfd_gtt_sa_fini(kfd); 626 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); 627 if (kfd->gws) 628 amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); 629 } 630 631 kfree(kfd); 632 } 633 634 int kgd2kfd_pre_reset(struct kfd_dev *kfd) 635 { 636 if (!kfd->init_complete) 637 return 0; 638 639 kfd_smi_event_update_gpu_reset(kfd, false); 640 641 kfd->dqm->ops.pre_reset(kfd->dqm); 642 643 kgd2kfd_suspend(kfd, false); 644 645 kfd_signal_reset_event(kfd); 646 return 0; 647 } 648 649 /* 650 * Fix me. KFD won't be able to resume existing process for now. 651 * We will keep all existing process in a evicted state and 652 * wait the process to be terminated. 653 */ 654 655 int kgd2kfd_post_reset(struct kfd_dev *kfd) 656 { 657 int ret; 658 659 if (!kfd->init_complete) 660 return 0; 661 662 ret = kfd_resume(kfd); 663 if (ret) 664 return ret; 665 atomic_dec(&kfd_locked); 666 667 atomic_set(&kfd->sram_ecc_flag, 0); 668 669 kfd_smi_event_update_gpu_reset(kfd, true); 670 671 return 0; 672 } 673 674 bool kfd_is_locked(void) 675 { 676 return (atomic_read(&kfd_locked) > 0); 677 } 678 679 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 680 { 681 if (!kfd->init_complete) 682 return; 683 684 /* for runtime suspend, skip locking kfd */ 685 if (!run_pm) { 686 /* For first KFD device suspend all the KFD processes */ 687 if (atomic_inc_return(&kfd_locked) == 1) 688 kfd_suspend_all_processes(); 689 } 690 691 kfd->dqm->ops.stop(kfd->dqm); 692 kfd_iommu_suspend(kfd); 693 } 694 695 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) 696 { 697 int ret, count; 698 699 if (!kfd->init_complete) 700 return 0; 701 702 ret = kfd_resume(kfd); 703 if (ret) 704 return ret; 705 706 /* for runtime resume, skip unlocking kfd */ 707 if (!run_pm) { 708 count = atomic_dec_return(&kfd_locked); 709 WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); 710 if (count == 0) 711 ret = kfd_resume_all_processes(); 712 } 713 714 return ret; 715 } 716 717 int kgd2kfd_resume_iommu(struct kfd_dev *kfd) 718 { 719 int err = 0; 720 721 err = kfd_iommu_resume(kfd); 722 if (err) 723 dev_err(kfd_device, 724 "Failed to resume IOMMU for device %x:%x\n", 725 kfd->pdev->vendor, kfd->pdev->device); 726 return err; 727 } 728 729 static int kfd_resume(struct kfd_dev *kfd) 730 { 731 int err = 0; 732 733 err = kfd->dqm->ops.start(kfd->dqm); 734 if (err) 735 dev_err(kfd_device, 736 "Error starting queue manager for device %x:%x\n", 737 kfd->pdev->vendor, kfd->pdev->device); 738 739 return err; 740 } 741 742 static inline void kfd_queue_work(struct workqueue_struct *wq, 743 struct work_struct *work) 744 { 745 int cpu, new_cpu; 746 747 cpu = new_cpu = smp_processor_id(); 748 do { 749 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; 750 if (cpu_to_node(new_cpu) == numa_node_id()) 751 break; 752 } while (cpu != new_cpu); 753 754 queue_work_on(new_cpu, wq, work); 755 } 756 757 /* This is called directly from KGD at ISR. */ 758 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 759 { 760 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; 761 bool is_patched = false; 762 unsigned long flags; 763 764 if (!kfd->init_complete) 765 return; 766 767 if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) { 768 dev_err_once(kfd_device, "Ring entry too small\n"); 769 return; 770 } 771 772 spin_lock_irqsave(&kfd->interrupt_lock, flags); 773 774 if (kfd->interrupts_active 775 && interrupt_is_wanted(kfd, ih_ring_entry, 776 patched_ihre, &is_patched) 777 && enqueue_ih_ring_entry(kfd, 778 is_patched ? patched_ihre : ih_ring_entry)) 779 kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work); 780 781 spin_unlock_irqrestore(&kfd->interrupt_lock, flags); 782 } 783 784 int kgd2kfd_quiesce_mm(struct mm_struct *mm) 785 { 786 struct kfd_process *p; 787 int r; 788 789 /* Because we are called from arbitrary context (workqueue) as opposed 790 * to process context, kfd_process could attempt to exit while we are 791 * running so the lookup function increments the process ref count. 792 */ 793 p = kfd_lookup_process_by_mm(mm); 794 if (!p) 795 return -ESRCH; 796 797 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); 798 r = kfd_process_evict_queues(p); 799 800 kfd_unref_process(p); 801 return r; 802 } 803 804 int kgd2kfd_resume_mm(struct mm_struct *mm) 805 { 806 struct kfd_process *p; 807 int r; 808 809 /* Because we are called from arbitrary context (workqueue) as opposed 810 * to process context, kfd_process could attempt to exit while we are 811 * running so the lookup function increments the process ref count. 812 */ 813 p = kfd_lookup_process_by_mm(mm); 814 if (!p) 815 return -ESRCH; 816 817 r = kfd_process_restore_queues(p); 818 819 kfd_unref_process(p); 820 return r; 821 } 822 823 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will 824 * prepare for safe eviction of KFD BOs that belong to the specified 825 * process. 826 * 827 * @mm: mm_struct that identifies the specified KFD process 828 * @fence: eviction fence attached to KFD process BOs 829 * 830 */ 831 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 832 struct dma_fence *fence) 833 { 834 struct kfd_process *p; 835 unsigned long active_time; 836 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS); 837 838 if (!fence) 839 return -EINVAL; 840 841 if (dma_fence_is_signaled(fence)) 842 return 0; 843 844 p = kfd_lookup_process_by_mm(mm); 845 if (!p) 846 return -ENODEV; 847 848 if (fence->seqno == p->last_eviction_seqno) 849 goto out; 850 851 p->last_eviction_seqno = fence->seqno; 852 853 /* Avoid KFD process starvation. Wait for at least 854 * PROCESS_ACTIVE_TIME_MS before evicting the process again 855 */ 856 active_time = get_jiffies_64() - p->last_restore_timestamp; 857 if (delay_jiffies > active_time) 858 delay_jiffies -= active_time; 859 else 860 delay_jiffies = 0; 861 862 /* During process initialization eviction_work.dwork is initialized 863 * to kfd_evict_bo_worker 864 */ 865 WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies", 866 p->lead_thread->pid, delay_jiffies); 867 schedule_delayed_work(&p->eviction_work, delay_jiffies); 868 out: 869 kfd_unref_process(p); 870 return 0; 871 } 872 873 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 874 unsigned int chunk_size) 875 { 876 unsigned int num_of_longs; 877 878 if (WARN_ON(buf_size < chunk_size)) 879 return -EINVAL; 880 if (WARN_ON(buf_size == 0)) 881 return -EINVAL; 882 if (WARN_ON(chunk_size == 0)) 883 return -EINVAL; 884 885 kfd->gtt_sa_chunk_size = chunk_size; 886 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; 887 888 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) / 889 BITS_PER_LONG; 890 891 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL); 892 893 if (!kfd->gtt_sa_bitmap) 894 return -ENOMEM; 895 896 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", 897 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); 898 899 mutex_init(&kfd->gtt_sa_lock); 900 901 return 0; 902 903 } 904 905 static void kfd_gtt_sa_fini(struct kfd_dev *kfd) 906 { 907 mutex_destroy(&kfd->gtt_sa_lock); 908 kfree(kfd->gtt_sa_bitmap); 909 } 910 911 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr, 912 unsigned int bit_num, 913 unsigned int chunk_size) 914 { 915 return start_addr + bit_num * chunk_size; 916 } 917 918 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, 919 unsigned int bit_num, 920 unsigned int chunk_size) 921 { 922 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); 923 } 924 925 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 926 struct kfd_mem_obj **mem_obj) 927 { 928 unsigned int found, start_search, cur_size; 929 930 if (size == 0) 931 return -EINVAL; 932 933 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) 934 return -ENOMEM; 935 936 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); 937 if (!(*mem_obj)) 938 return -ENOMEM; 939 940 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size); 941 942 start_search = 0; 943 944 mutex_lock(&kfd->gtt_sa_lock); 945 946 kfd_gtt_restart_search: 947 /* Find the first chunk that is free */ 948 found = find_next_zero_bit(kfd->gtt_sa_bitmap, 949 kfd->gtt_sa_num_of_chunks, 950 start_search); 951 952 pr_debug("Found = %d\n", found); 953 954 /* If there wasn't any free chunk, bail out */ 955 if (found == kfd->gtt_sa_num_of_chunks) 956 goto kfd_gtt_no_free_chunk; 957 958 /* Update fields of mem_obj */ 959 (*mem_obj)->range_start = found; 960 (*mem_obj)->range_end = found; 961 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr( 962 kfd->gtt_start_gpu_addr, 963 found, 964 kfd->gtt_sa_chunk_size); 965 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr( 966 kfd->gtt_start_cpu_ptr, 967 found, 968 kfd->gtt_sa_chunk_size); 969 970 pr_debug("gpu_addr = %p, cpu_addr = %p\n", 971 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); 972 973 /* If we need only one chunk, mark it as allocated and get out */ 974 if (size <= kfd->gtt_sa_chunk_size) { 975 pr_debug("Single bit\n"); 976 set_bit(found, kfd->gtt_sa_bitmap); 977 goto kfd_gtt_out; 978 } 979 980 /* Otherwise, try to see if we have enough contiguous chunks */ 981 cur_size = size - kfd->gtt_sa_chunk_size; 982 do { 983 (*mem_obj)->range_end = 984 find_next_zero_bit(kfd->gtt_sa_bitmap, 985 kfd->gtt_sa_num_of_chunks, ++found); 986 /* 987 * If next free chunk is not contiguous than we need to 988 * restart our search from the last free chunk we found (which 989 * wasn't contiguous to the previous ones 990 */ 991 if ((*mem_obj)->range_end != found) { 992 start_search = found; 993 goto kfd_gtt_restart_search; 994 } 995 996 /* 997 * If we reached end of buffer, bail out with error 998 */ 999 if (found == kfd->gtt_sa_num_of_chunks) 1000 goto kfd_gtt_no_free_chunk; 1001 1002 /* Check if we don't need another chunk */ 1003 if (cur_size <= kfd->gtt_sa_chunk_size) 1004 cur_size = 0; 1005 else 1006 cur_size -= kfd->gtt_sa_chunk_size; 1007 1008 } while (cur_size > 0); 1009 1010 pr_debug("range_start = %d, range_end = %d\n", 1011 (*mem_obj)->range_start, (*mem_obj)->range_end); 1012 1013 /* Mark the chunks as allocated */ 1014 for (found = (*mem_obj)->range_start; 1015 found <= (*mem_obj)->range_end; 1016 found++) 1017 set_bit(found, kfd->gtt_sa_bitmap); 1018 1019 kfd_gtt_out: 1020 mutex_unlock(&kfd->gtt_sa_lock); 1021 return 0; 1022 1023 kfd_gtt_no_free_chunk: 1024 pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); 1025 mutex_unlock(&kfd->gtt_sa_lock); 1026 kfree(*mem_obj); 1027 return -ENOMEM; 1028 } 1029 1030 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) 1031 { 1032 unsigned int bit; 1033 1034 /* Act like kfree when trying to free a NULL object */ 1035 if (!mem_obj) 1036 return 0; 1037 1038 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n", 1039 mem_obj, mem_obj->range_start, mem_obj->range_end); 1040 1041 mutex_lock(&kfd->gtt_sa_lock); 1042 1043 /* Mark the chunks as free */ 1044 for (bit = mem_obj->range_start; 1045 bit <= mem_obj->range_end; 1046 bit++) 1047 clear_bit(bit, kfd->gtt_sa_bitmap); 1048 1049 mutex_unlock(&kfd->gtt_sa_lock); 1050 1051 kfree(mem_obj); 1052 return 0; 1053 } 1054 1055 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) 1056 { 1057 if (kfd) 1058 atomic_inc(&kfd->sram_ecc_flag); 1059 } 1060 1061 void kfd_inc_compute_active(struct kfd_dev *kfd) 1062 { 1063 if (atomic_inc_return(&kfd->compute_profile) == 1) 1064 amdgpu_amdkfd_set_compute_idle(kfd->adev, false); 1065 } 1066 1067 void kfd_dec_compute_active(struct kfd_dev *kfd) 1068 { 1069 int count = atomic_dec_return(&kfd->compute_profile); 1070 1071 if (count == 0) 1072 amdgpu_amdkfd_set_compute_idle(kfd->adev, true); 1073 WARN_ONCE(count < 0, "Compute profile ref. count error"); 1074 } 1075 1076 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) 1077 { 1078 if (kfd && kfd->init_complete) 1079 kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); 1080 } 1081 1082 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and 1083 * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA. 1084 * When the device has more than two engines, we reserve two for PCIe to enable 1085 * full-duplex and the rest are used as XGMI. 1086 */ 1087 unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) 1088 { 1089 /* If XGMI is not supported, all SDMA engines are PCIe */ 1090 if (!kdev->adev->gmc.xgmi.supported) 1091 return kdev->adev->sdma.num_instances; 1092 1093 return min(kdev->adev->sdma.num_instances, 2); 1094 } 1095 1096 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) 1097 { 1098 /* After reserved for PCIe, the rest of engines are XGMI */ 1099 return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); 1100 } 1101 1102 #if defined(CONFIG_DEBUG_FS) 1103 1104 /* This function will send a package to HIQ to hang the HWS 1105 * which will trigger a GPU reset and bring the HWS back to normal state 1106 */ 1107 int kfd_debugfs_hang_hws(struct kfd_dev *dev) 1108 { 1109 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { 1110 pr_err("HWS is not enabled"); 1111 return -EINVAL; 1112 } 1113 1114 return dqm_debugfs_hang_hws(dev->dqm); 1115 } 1116 1117 #endif 1118