1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/bsearch.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include "kfd_priv.h" 28 #include "kfd_device_queue_manager.h" 29 #include "kfd_pm4_headers_vi.h" 30 #include "kfd_pm4_headers_aldebaran.h" 31 #include "cwsr_trap_handler.h" 32 #include "kfd_iommu.h" 33 #include "amdgpu_amdkfd.h" 34 #include "kfd_smi_events.h" 35 #include "kfd_migrate.h" 36 #include "amdgpu.h" 37 38 #define MQD_SIZE_ALIGNED 768 39 40 /* 41 * kfd_locked is used to lock the kfd driver during suspend or reset 42 * once locked, kfd driver will stop any further GPU execution. 43 * create process (open) will return -EAGAIN. 44 */ 45 static atomic_t kfd_locked = ATOMIC_INIT(0); 46 47 #ifdef CONFIG_DRM_AMDGPU_CIK 48 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd; 49 #endif 50 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd; 51 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd; 52 extern const struct kfd2kgd_calls arcturus_kfd2kgd; 53 extern const struct kfd2kgd_calls aldebaran_kfd2kgd; 54 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; 55 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; 56 extern const struct kfd2kgd_calls gfx_v11_kfd2kgd; 57 58 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 59 unsigned int chunk_size); 60 static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 61 62 static int kfd_resume(struct kfd_dev *kfd); 63 64 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) 65 { 66 uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0]; 67 68 switch (sdma_version) { 69 case IP_VERSION(4, 0, 0):/* VEGA10 */ 70 case IP_VERSION(4, 0, 1):/* VEGA12 */ 71 case IP_VERSION(4, 1, 0):/* RAVEN */ 72 case IP_VERSION(4, 1, 1):/* RAVEN */ 73 case IP_VERSION(4, 1, 2):/* RENOIR */ 74 case IP_VERSION(5, 2, 1):/* VANGOGH */ 75 case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ 76 case IP_VERSION(6, 0, 1): 77 kfd->device_info.num_sdma_queues_per_engine = 2; 78 break; 79 case IP_VERSION(4, 2, 0):/* VEGA20 */ 80 case IP_VERSION(4, 2, 2):/* ARCTURUS */ 81 case IP_VERSION(4, 4, 0):/* ALDEBARAN */ 82 case IP_VERSION(5, 0, 0):/* NAVI10 */ 83 case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ 84 case IP_VERSION(5, 0, 2):/* NAVI14 */ 85 case IP_VERSION(5, 0, 5):/* NAVI12 */ 86 case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */ 87 case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */ 88 case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ 89 case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ 90 case IP_VERSION(6, 0, 0): 91 case IP_VERSION(6, 0, 2): 92 kfd->device_info.num_sdma_queues_per_engine = 8; 93 break; 94 default: 95 dev_warn(kfd_device, 96 "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n", 97 sdma_version); 98 kfd->device_info.num_sdma_queues_per_engine = 8; 99 } 100 101 switch (sdma_version) { 102 case IP_VERSION(6, 0, 0): 103 case IP_VERSION(6, 0, 1): 104 case IP_VERSION(6, 0, 2): 105 /* Reserve 1 for paging and 1 for gfx */ 106 kfd->device_info.num_reserved_sdma_queues_per_engine = 2; 107 /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */ 108 kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL; 109 break; 110 default: 111 break; 112 } 113 } 114 115 static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) 116 { 117 uint32_t gc_version = KFD_GC_VERSION(kfd); 118 119 switch (gc_version) { 120 case IP_VERSION(9, 0, 1): /* VEGA10 */ 121 case IP_VERSION(9, 1, 0): /* RAVEN */ 122 case IP_VERSION(9, 2, 1): /* VEGA12 */ 123 case IP_VERSION(9, 2, 2): /* RAVEN */ 124 case IP_VERSION(9, 3, 0): /* RENOIR */ 125 case IP_VERSION(9, 4, 0): /* VEGA20 */ 126 case IP_VERSION(9, 4, 1): /* ARCTURUS */ 127 case IP_VERSION(9, 4, 2): /* ALDEBARAN */ 128 case IP_VERSION(10, 3, 1): /* VANGOGH */ 129 case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ 130 case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */ 131 case IP_VERSION(10, 1, 4): 132 case IP_VERSION(10, 1, 10): /* NAVI10 */ 133 case IP_VERSION(10, 1, 2): /* NAVI12 */ 134 case IP_VERSION(10, 1, 1): /* NAVI14 */ 135 case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */ 136 case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */ 137 case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */ 138 case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */ 139 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; 140 break; 141 case IP_VERSION(11, 0, 0): 142 case IP_VERSION(11, 0, 1): 143 case IP_VERSION(11, 0, 2): 144 kfd->device_info.event_interrupt_class = &event_interrupt_class_v11; 145 break; 146 default: 147 dev_warn(kfd_device, "v9 event interrupt handler is set due to " 148 "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version); 149 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; 150 } 151 } 152 153 static void kfd_device_info_init(struct kfd_dev *kfd, 154 bool vf, uint32_t gfx_target_version) 155 { 156 uint32_t gc_version = KFD_GC_VERSION(kfd); 157 uint32_t asic_type = kfd->adev->asic_type; 158 159 kfd->device_info.max_pasid_bits = 16; 160 kfd->device_info.max_no_of_hqd = 24; 161 kfd->device_info.num_of_watch_points = 4; 162 kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED; 163 kfd->device_info.gfx_target_version = gfx_target_version; 164 165 if (KFD_IS_SOC15(kfd)) { 166 kfd->device_info.doorbell_size = 8; 167 kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t); 168 kfd->device_info.supports_cwsr = true; 169 170 kfd_device_info_set_sdma_info(kfd); 171 172 kfd_device_info_set_event_interrupt_class(kfd); 173 174 /* Raven */ 175 if (gc_version == IP_VERSION(9, 1, 0) || 176 gc_version == IP_VERSION(9, 2, 2)) 177 kfd->device_info.needs_iommu_device = true; 178 179 if (gc_version < IP_VERSION(11, 0, 0)) { 180 /* Navi2x+, Navi1x+ */ 181 if (gc_version >= IP_VERSION(10, 3, 0)) 182 kfd->device_info.no_atomic_fw_version = 92; 183 else if (gc_version >= IP_VERSION(10, 1, 1)) 184 kfd->device_info.no_atomic_fw_version = 145; 185 186 /* Navi1x+ */ 187 if (gc_version >= IP_VERSION(10, 1, 1)) 188 kfd->device_info.needs_pci_atomics = true; 189 } 190 } else { 191 kfd->device_info.doorbell_size = 4; 192 kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t); 193 kfd->device_info.event_interrupt_class = &event_interrupt_class_cik; 194 kfd->device_info.num_sdma_queues_per_engine = 2; 195 196 if (asic_type != CHIP_KAVERI && 197 asic_type != CHIP_HAWAII && 198 asic_type != CHIP_TONGA) 199 kfd->device_info.supports_cwsr = true; 200 201 if (asic_type == CHIP_KAVERI || 202 asic_type == CHIP_CARRIZO) 203 kfd->device_info.needs_iommu_device = true; 204 205 if (asic_type != CHIP_HAWAII && !vf) 206 kfd->device_info.needs_pci_atomics = true; 207 } 208 } 209 210 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) 211 { 212 struct kfd_dev *kfd = NULL; 213 const struct kfd2kgd_calls *f2g = NULL; 214 struct pci_dev *pdev = adev->pdev; 215 uint32_t gfx_target_version = 0; 216 217 switch (adev->asic_type) { 218 #ifdef KFD_SUPPORT_IOMMU_V2 219 #ifdef CONFIG_DRM_AMDGPU_CIK 220 case CHIP_KAVERI: 221 gfx_target_version = 70000; 222 if (!vf) 223 f2g = &gfx_v7_kfd2kgd; 224 break; 225 #endif 226 case CHIP_CARRIZO: 227 gfx_target_version = 80001; 228 if (!vf) 229 f2g = &gfx_v8_kfd2kgd; 230 break; 231 #endif 232 #ifdef CONFIG_DRM_AMDGPU_CIK 233 case CHIP_HAWAII: 234 gfx_target_version = 70001; 235 if (!amdgpu_exp_hw_support) 236 pr_info( 237 "KFD support on Hawaii is experimental. See modparam exp_hw_support\n" 238 ); 239 else if (!vf) 240 f2g = &gfx_v7_kfd2kgd; 241 break; 242 #endif 243 case CHIP_TONGA: 244 gfx_target_version = 80002; 245 if (!vf) 246 f2g = &gfx_v8_kfd2kgd; 247 break; 248 case CHIP_FIJI: 249 gfx_target_version = 80003; 250 f2g = &gfx_v8_kfd2kgd; 251 break; 252 case CHIP_POLARIS10: 253 gfx_target_version = 80003; 254 f2g = &gfx_v8_kfd2kgd; 255 break; 256 case CHIP_POLARIS11: 257 gfx_target_version = 80003; 258 if (!vf) 259 f2g = &gfx_v8_kfd2kgd; 260 break; 261 case CHIP_POLARIS12: 262 gfx_target_version = 80003; 263 if (!vf) 264 f2g = &gfx_v8_kfd2kgd; 265 break; 266 case CHIP_VEGAM: 267 gfx_target_version = 80003; 268 if (!vf) 269 f2g = &gfx_v8_kfd2kgd; 270 break; 271 default: 272 switch (adev->ip_versions[GC_HWIP][0]) { 273 /* Vega 10 */ 274 case IP_VERSION(9, 0, 1): 275 gfx_target_version = 90000; 276 f2g = &gfx_v9_kfd2kgd; 277 break; 278 #ifdef KFD_SUPPORT_IOMMU_V2 279 /* Raven */ 280 case IP_VERSION(9, 1, 0): 281 case IP_VERSION(9, 2, 2): 282 gfx_target_version = 90002; 283 if (!vf) 284 f2g = &gfx_v9_kfd2kgd; 285 break; 286 #endif 287 /* Vega12 */ 288 case IP_VERSION(9, 2, 1): 289 gfx_target_version = 90004; 290 if (!vf) 291 f2g = &gfx_v9_kfd2kgd; 292 break; 293 /* Renoir */ 294 case IP_VERSION(9, 3, 0): 295 gfx_target_version = 90012; 296 if (!vf) 297 f2g = &gfx_v9_kfd2kgd; 298 break; 299 /* Vega20 */ 300 case IP_VERSION(9, 4, 0): 301 gfx_target_version = 90006; 302 if (!vf) 303 f2g = &gfx_v9_kfd2kgd; 304 break; 305 /* Arcturus */ 306 case IP_VERSION(9, 4, 1): 307 gfx_target_version = 90008; 308 f2g = &arcturus_kfd2kgd; 309 break; 310 /* Aldebaran */ 311 case IP_VERSION(9, 4, 2): 312 gfx_target_version = 90010; 313 f2g = &aldebaran_kfd2kgd; 314 break; 315 /* Navi10 */ 316 case IP_VERSION(10, 1, 10): 317 gfx_target_version = 100100; 318 if (!vf) 319 f2g = &gfx_v10_kfd2kgd; 320 break; 321 /* Navi12 */ 322 case IP_VERSION(10, 1, 2): 323 gfx_target_version = 100101; 324 f2g = &gfx_v10_kfd2kgd; 325 break; 326 /* Navi14 */ 327 case IP_VERSION(10, 1, 1): 328 gfx_target_version = 100102; 329 if (!vf) 330 f2g = &gfx_v10_kfd2kgd; 331 break; 332 /* Cyan Skillfish */ 333 case IP_VERSION(10, 1, 3): 334 case IP_VERSION(10, 1, 4): 335 gfx_target_version = 100103; 336 if (!vf) 337 f2g = &gfx_v10_kfd2kgd; 338 break; 339 /* Sienna Cichlid */ 340 case IP_VERSION(10, 3, 0): 341 gfx_target_version = 100300; 342 f2g = &gfx_v10_3_kfd2kgd; 343 break; 344 /* Navy Flounder */ 345 case IP_VERSION(10, 3, 2): 346 gfx_target_version = 100301; 347 f2g = &gfx_v10_3_kfd2kgd; 348 break; 349 /* Van Gogh */ 350 case IP_VERSION(10, 3, 1): 351 gfx_target_version = 100303; 352 if (!vf) 353 f2g = &gfx_v10_3_kfd2kgd; 354 break; 355 /* Dimgrey Cavefish */ 356 case IP_VERSION(10, 3, 4): 357 gfx_target_version = 100302; 358 f2g = &gfx_v10_3_kfd2kgd; 359 break; 360 /* Beige Goby */ 361 case IP_VERSION(10, 3, 5): 362 gfx_target_version = 100304; 363 f2g = &gfx_v10_3_kfd2kgd; 364 break; 365 /* Yellow Carp */ 366 case IP_VERSION(10, 3, 3): 367 gfx_target_version = 100305; 368 if (!vf) 369 f2g = &gfx_v10_3_kfd2kgd; 370 break; 371 case IP_VERSION(11, 0, 0): 372 gfx_target_version = 110000; 373 f2g = &gfx_v11_kfd2kgd; 374 break; 375 case IP_VERSION(11, 0, 1): 376 gfx_target_version = 110003; 377 f2g = &gfx_v11_kfd2kgd; 378 break; 379 case IP_VERSION(11, 0, 2): 380 gfx_target_version = 110002; 381 f2g = &gfx_v11_kfd2kgd; 382 break; 383 default: 384 break; 385 } 386 break; 387 } 388 389 if (!f2g) { 390 if (adev->ip_versions[GC_HWIP][0]) 391 dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n", 392 adev->ip_versions[GC_HWIP][0], vf ? "VF" : ""); 393 else 394 dev_err(kfd_device, "%s %s not supported in kfd\n", 395 amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); 396 return NULL; 397 } 398 399 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); 400 if (!kfd) 401 return NULL; 402 403 kfd->adev = adev; 404 kfd_device_info_init(kfd, vf, gfx_target_version); 405 kfd->pdev = pdev; 406 kfd->init_complete = false; 407 kfd->kfd2kgd = f2g; 408 atomic_set(&kfd->compute_profile, 0); 409 410 mutex_init(&kfd->doorbell_mutex); 411 memset(&kfd->doorbell_available_index, 0, 412 sizeof(kfd->doorbell_available_index)); 413 414 atomic_set(&kfd->sram_ecc_flag, 0); 415 416 ida_init(&kfd->doorbell_ida); 417 418 return kfd; 419 } 420 421 static void kfd_cwsr_init(struct kfd_dev *kfd) 422 { 423 if (cwsr_enable && kfd->device_info.supports_cwsr) { 424 if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) { 425 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); 426 kfd->cwsr_isa = cwsr_trap_gfx8_hex; 427 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); 428 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) { 429 BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE); 430 kfd->cwsr_isa = cwsr_trap_arcturus_hex; 431 kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex); 432 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) { 433 BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE); 434 kfd->cwsr_isa = cwsr_trap_aldebaran_hex; 435 kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex); 436 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) { 437 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); 438 kfd->cwsr_isa = cwsr_trap_gfx9_hex; 439 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); 440 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) { 441 BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE); 442 kfd->cwsr_isa = cwsr_trap_nv1x_hex; 443 kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex); 444 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) { 445 BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE); 446 kfd->cwsr_isa = cwsr_trap_gfx10_hex; 447 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex); 448 } else { 449 BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE); 450 kfd->cwsr_isa = cwsr_trap_gfx11_hex; 451 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex); 452 } 453 454 kfd->cwsr_enabled = true; 455 } 456 } 457 458 static int kfd_gws_init(struct kfd_dev *kfd) 459 { 460 int ret = 0; 461 462 if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) 463 return 0; 464 465 if (hws_gws_support || (KFD_IS_SOC15(kfd) && 466 ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) 467 && kfd->mec2_fw_version >= 0x81b3) || 468 (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) 469 && kfd->mec2_fw_version >= 0x1b3) || 470 (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) 471 && kfd->mec2_fw_version >= 0x30) || 472 (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) 473 && kfd->mec2_fw_version >= 0x28)))) 474 ret = amdgpu_amdkfd_alloc_gws(kfd->adev, 475 kfd->adev->gds.gws_size, &kfd->gws); 476 477 return ret; 478 } 479 480 static void kfd_smi_init(struct kfd_dev *dev) 481 { 482 INIT_LIST_HEAD(&dev->smi_clients); 483 spin_lock_init(&dev->smi_lock); 484 } 485 486 bool kgd2kfd_device_init(struct kfd_dev *kfd, 487 struct drm_device *ddev, 488 const struct kgd2kfd_shared_resources *gpu_resources) 489 { 490 unsigned int size, map_process_packet_size; 491 492 kfd->ddev = ddev; 493 kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 494 KGD_ENGINE_MEC1); 495 kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 496 KGD_ENGINE_MEC2); 497 kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 498 KGD_ENGINE_SDMA1); 499 kfd->shared_resources = *gpu_resources; 500 501 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; 502 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; 503 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd 504 - kfd->vm_info.first_vmid_kfd + 1; 505 506 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. 507 * 32 and 64-bit requests are possible and must be 508 * supported. 509 */ 510 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev); 511 if (!kfd->pci_atomic_requested && 512 kfd->device_info.needs_pci_atomics && 513 (!kfd->device_info.no_atomic_fw_version || 514 kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) { 515 dev_info(kfd_device, 516 "skipped device %x:%x, PCI rejects atomics %d<%d\n", 517 kfd->pdev->vendor, kfd->pdev->device, 518 kfd->mec_fw_version, 519 kfd->device_info.no_atomic_fw_version); 520 return false; 521 } 522 523 /* Verify module parameters regarding mapped process number*/ 524 if (hws_max_conc_proc >= 0) 525 kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); 526 else 527 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; 528 529 /* calculate max size of mqds needed for queues */ 530 size = max_num_of_queues_per_device * 531 kfd->device_info.mqd_size_aligned; 532 533 /* 534 * calculate max size of runlist packet. 535 * There can be only 2 packets at once 536 */ 537 map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ? 538 sizeof(struct pm4_mes_map_process_aldebaran) : 539 sizeof(struct pm4_mes_map_process); 540 size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size + 541 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) 542 + sizeof(struct pm4_mes_runlist)) * 2; 543 544 /* Add size of HIQ & DIQ */ 545 size += KFD_KERNEL_QUEUE_SIZE * 2; 546 547 /* add another 512KB for all other allocations on gart (HPD, fences) */ 548 size += 512 * 1024; 549 550 if (amdgpu_amdkfd_alloc_gtt_mem( 551 kfd->adev, size, &kfd->gtt_mem, 552 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, 553 false)) { 554 dev_err(kfd_device, "Could not allocate %d bytes\n", size); 555 goto alloc_gtt_mem_failure; 556 } 557 558 dev_info(kfd_device, "Allocated %d bytes on gart\n", size); 559 560 /* Initialize GTT sa with 512 byte chunk size */ 561 if (kfd_gtt_sa_init(kfd, size, 512) != 0) { 562 dev_err(kfd_device, "Error initializing gtt sub-allocator\n"); 563 goto kfd_gtt_sa_init_error; 564 } 565 566 if (kfd_doorbell_init(kfd)) { 567 dev_err(kfd_device, 568 "Error initializing doorbell aperture\n"); 569 goto kfd_doorbell_error; 570 } 571 572 if (amdgpu_use_xgmi_p2p) 573 kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; 574 575 kfd->noretry = kfd->adev->gmc.noretry; 576 577 if (kfd_interrupt_init(kfd)) { 578 dev_err(kfd_device, "Error initializing interrupts\n"); 579 goto kfd_interrupt_error; 580 } 581 582 kfd->dqm = device_queue_manager_init(kfd); 583 if (!kfd->dqm) { 584 dev_err(kfd_device, "Error initializing queue manager\n"); 585 goto device_queue_manager_error; 586 } 587 588 /* If supported on this device, allocate global GWS that is shared 589 * by all KFD processes 590 */ 591 if (kfd_gws_init(kfd)) { 592 dev_err(kfd_device, "Could not allocate %d gws\n", 593 kfd->adev->gds.gws_size); 594 goto gws_error; 595 } 596 597 /* If CRAT is broken, won't set iommu enabled */ 598 kfd_double_confirm_iommu_support(kfd); 599 600 if (kfd_iommu_device_init(kfd)) { 601 kfd->use_iommu_v2 = false; 602 dev_err(kfd_device, "Error initializing iommuv2\n"); 603 goto device_iommu_error; 604 } 605 606 kfd_cwsr_init(kfd); 607 608 svm_migrate_init(kfd->adev); 609 610 if (kgd2kfd_resume_iommu(kfd)) 611 goto device_iommu_error; 612 613 if (kfd_resume(kfd)) 614 goto kfd_resume_error; 615 616 amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); 617 618 if (kfd_topology_add_device(kfd)) { 619 dev_err(kfd_device, "Error adding device to topology\n"); 620 goto kfd_topology_add_device_error; 621 } 622 623 kfd_smi_init(kfd); 624 625 kfd->init_complete = true; 626 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor, 627 kfd->pdev->device); 628 629 pr_debug("Starting kfd with the following scheduling policy %d\n", 630 kfd->dqm->sched_policy); 631 632 goto out; 633 634 kfd_topology_add_device_error: 635 kfd_resume_error: 636 device_iommu_error: 637 gws_error: 638 device_queue_manager_uninit(kfd->dqm); 639 device_queue_manager_error: 640 kfd_interrupt_exit(kfd); 641 kfd_interrupt_error: 642 kfd_doorbell_fini(kfd); 643 kfd_doorbell_error: 644 kfd_gtt_sa_fini(kfd); 645 kfd_gtt_sa_init_error: 646 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); 647 alloc_gtt_mem_failure: 648 if (kfd->gws) 649 amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); 650 dev_err(kfd_device, 651 "device %x:%x NOT added due to errors\n", 652 kfd->pdev->vendor, kfd->pdev->device); 653 out: 654 return kfd->init_complete; 655 } 656 657 void kgd2kfd_device_exit(struct kfd_dev *kfd) 658 { 659 if (kfd->init_complete) { 660 device_queue_manager_uninit(kfd->dqm); 661 kfd_interrupt_exit(kfd); 662 kfd_topology_remove_device(kfd); 663 kfd_doorbell_fini(kfd); 664 ida_destroy(&kfd->doorbell_ida); 665 kfd_gtt_sa_fini(kfd); 666 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); 667 if (kfd->gws) 668 amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); 669 } 670 671 kfree(kfd); 672 } 673 674 int kgd2kfd_pre_reset(struct kfd_dev *kfd) 675 { 676 if (!kfd->init_complete) 677 return 0; 678 679 kfd_smi_event_update_gpu_reset(kfd, false); 680 681 kfd->dqm->ops.pre_reset(kfd->dqm); 682 683 kgd2kfd_suspend(kfd, false); 684 685 kfd_signal_reset_event(kfd); 686 return 0; 687 } 688 689 /* 690 * Fix me. KFD won't be able to resume existing process for now. 691 * We will keep all existing process in a evicted state and 692 * wait the process to be terminated. 693 */ 694 695 int kgd2kfd_post_reset(struct kfd_dev *kfd) 696 { 697 int ret; 698 699 if (!kfd->init_complete) 700 return 0; 701 702 ret = kfd_resume(kfd); 703 if (ret) 704 return ret; 705 atomic_dec(&kfd_locked); 706 707 atomic_set(&kfd->sram_ecc_flag, 0); 708 709 kfd_smi_event_update_gpu_reset(kfd, true); 710 711 return 0; 712 } 713 714 bool kfd_is_locked(void) 715 { 716 return (atomic_read(&kfd_locked) > 0); 717 } 718 719 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 720 { 721 if (!kfd->init_complete) 722 return; 723 724 /* for runtime suspend, skip locking kfd */ 725 if (!run_pm) { 726 /* For first KFD device suspend all the KFD processes */ 727 if (atomic_inc_return(&kfd_locked) == 1) 728 kfd_suspend_all_processes(); 729 } 730 731 kfd->dqm->ops.stop(kfd->dqm); 732 kfd_iommu_suspend(kfd); 733 } 734 735 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) 736 { 737 int ret, count; 738 739 if (!kfd->init_complete) 740 return 0; 741 742 ret = kfd_resume(kfd); 743 if (ret) 744 return ret; 745 746 /* for runtime resume, skip unlocking kfd */ 747 if (!run_pm) { 748 count = atomic_dec_return(&kfd_locked); 749 WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); 750 if (count == 0) 751 ret = kfd_resume_all_processes(); 752 } 753 754 return ret; 755 } 756 757 int kgd2kfd_resume_iommu(struct kfd_dev *kfd) 758 { 759 int err = 0; 760 761 err = kfd_iommu_resume(kfd); 762 if (err) 763 dev_err(kfd_device, 764 "Failed to resume IOMMU for device %x:%x\n", 765 kfd->pdev->vendor, kfd->pdev->device); 766 return err; 767 } 768 769 static int kfd_resume(struct kfd_dev *kfd) 770 { 771 int err = 0; 772 773 err = kfd->dqm->ops.start(kfd->dqm); 774 if (err) 775 dev_err(kfd_device, 776 "Error starting queue manager for device %x:%x\n", 777 kfd->pdev->vendor, kfd->pdev->device); 778 779 return err; 780 } 781 782 static inline void kfd_queue_work(struct workqueue_struct *wq, 783 struct work_struct *work) 784 { 785 int cpu, new_cpu; 786 787 cpu = new_cpu = smp_processor_id(); 788 do { 789 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; 790 if (cpu_to_node(new_cpu) == numa_node_id()) 791 break; 792 } while (cpu != new_cpu); 793 794 queue_work_on(new_cpu, wq, work); 795 } 796 797 /* This is called directly from KGD at ISR. */ 798 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 799 { 800 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; 801 bool is_patched = false; 802 unsigned long flags; 803 804 if (!kfd->init_complete) 805 return; 806 807 if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) { 808 dev_err_once(kfd_device, "Ring entry too small\n"); 809 return; 810 } 811 812 spin_lock_irqsave(&kfd->interrupt_lock, flags); 813 814 if (kfd->interrupts_active 815 && interrupt_is_wanted(kfd, ih_ring_entry, 816 patched_ihre, &is_patched) 817 && enqueue_ih_ring_entry(kfd, 818 is_patched ? patched_ihre : ih_ring_entry)) 819 kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work); 820 821 spin_unlock_irqrestore(&kfd->interrupt_lock, flags); 822 } 823 824 int kgd2kfd_quiesce_mm(struct mm_struct *mm) 825 { 826 struct kfd_process *p; 827 int r; 828 829 /* Because we are called from arbitrary context (workqueue) as opposed 830 * to process context, kfd_process could attempt to exit while we are 831 * running so the lookup function increments the process ref count. 832 */ 833 p = kfd_lookup_process_by_mm(mm); 834 if (!p) 835 return -ESRCH; 836 837 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); 838 r = kfd_process_evict_queues(p); 839 840 kfd_unref_process(p); 841 return r; 842 } 843 844 int kgd2kfd_resume_mm(struct mm_struct *mm) 845 { 846 struct kfd_process *p; 847 int r; 848 849 /* Because we are called from arbitrary context (workqueue) as opposed 850 * to process context, kfd_process could attempt to exit while we are 851 * running so the lookup function increments the process ref count. 852 */ 853 p = kfd_lookup_process_by_mm(mm); 854 if (!p) 855 return -ESRCH; 856 857 r = kfd_process_restore_queues(p); 858 859 kfd_unref_process(p); 860 return r; 861 } 862 863 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will 864 * prepare for safe eviction of KFD BOs that belong to the specified 865 * process. 866 * 867 * @mm: mm_struct that identifies the specified KFD process 868 * @fence: eviction fence attached to KFD process BOs 869 * 870 */ 871 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 872 struct dma_fence *fence) 873 { 874 struct kfd_process *p; 875 unsigned long active_time; 876 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS); 877 878 if (!fence) 879 return -EINVAL; 880 881 if (dma_fence_is_signaled(fence)) 882 return 0; 883 884 p = kfd_lookup_process_by_mm(mm); 885 if (!p) 886 return -ENODEV; 887 888 if (fence->seqno == p->last_eviction_seqno) 889 goto out; 890 891 p->last_eviction_seqno = fence->seqno; 892 893 /* Avoid KFD process starvation. Wait for at least 894 * PROCESS_ACTIVE_TIME_MS before evicting the process again 895 */ 896 active_time = get_jiffies_64() - p->last_restore_timestamp; 897 if (delay_jiffies > active_time) 898 delay_jiffies -= active_time; 899 else 900 delay_jiffies = 0; 901 902 /* During process initialization eviction_work.dwork is initialized 903 * to kfd_evict_bo_worker 904 */ 905 WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies", 906 p->lead_thread->pid, delay_jiffies); 907 schedule_delayed_work(&p->eviction_work, delay_jiffies); 908 out: 909 kfd_unref_process(p); 910 return 0; 911 } 912 913 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 914 unsigned int chunk_size) 915 { 916 if (WARN_ON(buf_size < chunk_size)) 917 return -EINVAL; 918 if (WARN_ON(buf_size == 0)) 919 return -EINVAL; 920 if (WARN_ON(chunk_size == 0)) 921 return -EINVAL; 922 923 kfd->gtt_sa_chunk_size = chunk_size; 924 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; 925 926 kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks, 927 GFP_KERNEL); 928 if (!kfd->gtt_sa_bitmap) 929 return -ENOMEM; 930 931 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", 932 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); 933 934 mutex_init(&kfd->gtt_sa_lock); 935 936 return 0; 937 } 938 939 static void kfd_gtt_sa_fini(struct kfd_dev *kfd) 940 { 941 mutex_destroy(&kfd->gtt_sa_lock); 942 bitmap_free(kfd->gtt_sa_bitmap); 943 } 944 945 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr, 946 unsigned int bit_num, 947 unsigned int chunk_size) 948 { 949 return start_addr + bit_num * chunk_size; 950 } 951 952 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, 953 unsigned int bit_num, 954 unsigned int chunk_size) 955 { 956 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); 957 } 958 959 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 960 struct kfd_mem_obj **mem_obj) 961 { 962 unsigned int found, start_search, cur_size; 963 964 if (size == 0) 965 return -EINVAL; 966 967 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) 968 return -ENOMEM; 969 970 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); 971 if (!(*mem_obj)) 972 return -ENOMEM; 973 974 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size); 975 976 start_search = 0; 977 978 mutex_lock(&kfd->gtt_sa_lock); 979 980 kfd_gtt_restart_search: 981 /* Find the first chunk that is free */ 982 found = find_next_zero_bit(kfd->gtt_sa_bitmap, 983 kfd->gtt_sa_num_of_chunks, 984 start_search); 985 986 pr_debug("Found = %d\n", found); 987 988 /* If there wasn't any free chunk, bail out */ 989 if (found == kfd->gtt_sa_num_of_chunks) 990 goto kfd_gtt_no_free_chunk; 991 992 /* Update fields of mem_obj */ 993 (*mem_obj)->range_start = found; 994 (*mem_obj)->range_end = found; 995 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr( 996 kfd->gtt_start_gpu_addr, 997 found, 998 kfd->gtt_sa_chunk_size); 999 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr( 1000 kfd->gtt_start_cpu_ptr, 1001 found, 1002 kfd->gtt_sa_chunk_size); 1003 1004 pr_debug("gpu_addr = %p, cpu_addr = %p\n", 1005 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); 1006 1007 /* If we need only one chunk, mark it as allocated and get out */ 1008 if (size <= kfd->gtt_sa_chunk_size) { 1009 pr_debug("Single bit\n"); 1010 __set_bit(found, kfd->gtt_sa_bitmap); 1011 goto kfd_gtt_out; 1012 } 1013 1014 /* Otherwise, try to see if we have enough contiguous chunks */ 1015 cur_size = size - kfd->gtt_sa_chunk_size; 1016 do { 1017 (*mem_obj)->range_end = 1018 find_next_zero_bit(kfd->gtt_sa_bitmap, 1019 kfd->gtt_sa_num_of_chunks, ++found); 1020 /* 1021 * If next free chunk is not contiguous than we need to 1022 * restart our search from the last free chunk we found (which 1023 * wasn't contiguous to the previous ones 1024 */ 1025 if ((*mem_obj)->range_end != found) { 1026 start_search = found; 1027 goto kfd_gtt_restart_search; 1028 } 1029 1030 /* 1031 * If we reached end of buffer, bail out with error 1032 */ 1033 if (found == kfd->gtt_sa_num_of_chunks) 1034 goto kfd_gtt_no_free_chunk; 1035 1036 /* Check if we don't need another chunk */ 1037 if (cur_size <= kfd->gtt_sa_chunk_size) 1038 cur_size = 0; 1039 else 1040 cur_size -= kfd->gtt_sa_chunk_size; 1041 1042 } while (cur_size > 0); 1043 1044 pr_debug("range_start = %d, range_end = %d\n", 1045 (*mem_obj)->range_start, (*mem_obj)->range_end); 1046 1047 /* Mark the chunks as allocated */ 1048 bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start, 1049 (*mem_obj)->range_end - (*mem_obj)->range_start + 1); 1050 1051 kfd_gtt_out: 1052 mutex_unlock(&kfd->gtt_sa_lock); 1053 return 0; 1054 1055 kfd_gtt_no_free_chunk: 1056 pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); 1057 mutex_unlock(&kfd->gtt_sa_lock); 1058 kfree(*mem_obj); 1059 return -ENOMEM; 1060 } 1061 1062 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) 1063 { 1064 /* Act like kfree when trying to free a NULL object */ 1065 if (!mem_obj) 1066 return 0; 1067 1068 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n", 1069 mem_obj, mem_obj->range_start, mem_obj->range_end); 1070 1071 mutex_lock(&kfd->gtt_sa_lock); 1072 1073 /* Mark the chunks as free */ 1074 bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start, 1075 mem_obj->range_end - mem_obj->range_start + 1); 1076 1077 mutex_unlock(&kfd->gtt_sa_lock); 1078 1079 kfree(mem_obj); 1080 return 0; 1081 } 1082 1083 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) 1084 { 1085 if (kfd) 1086 atomic_inc(&kfd->sram_ecc_flag); 1087 } 1088 1089 void kfd_inc_compute_active(struct kfd_dev *kfd) 1090 { 1091 if (atomic_inc_return(&kfd->compute_profile) == 1) 1092 amdgpu_amdkfd_set_compute_idle(kfd->adev, false); 1093 } 1094 1095 void kfd_dec_compute_active(struct kfd_dev *kfd) 1096 { 1097 int count = atomic_dec_return(&kfd->compute_profile); 1098 1099 if (count == 0) 1100 amdgpu_amdkfd_set_compute_idle(kfd->adev, true); 1101 WARN_ONCE(count < 0, "Compute profile ref. count error"); 1102 } 1103 1104 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) 1105 { 1106 if (kfd && kfd->init_complete) 1107 kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); 1108 } 1109 1110 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and 1111 * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA. 1112 * When the device has more than two engines, we reserve two for PCIe to enable 1113 * full-duplex and the rest are used as XGMI. 1114 */ 1115 unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) 1116 { 1117 /* If XGMI is not supported, all SDMA engines are PCIe */ 1118 if (!kdev->adev->gmc.xgmi.supported) 1119 return kdev->adev->sdma.num_instances; 1120 1121 return min(kdev->adev->sdma.num_instances, 2); 1122 } 1123 1124 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) 1125 { 1126 /* After reserved for PCIe, the rest of engines are XGMI */ 1127 return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); 1128 } 1129 1130 #if defined(CONFIG_DEBUG_FS) 1131 1132 /* This function will send a package to HIQ to hang the HWS 1133 * which will trigger a GPU reset and bring the HWS back to normal state 1134 */ 1135 int kfd_debugfs_hang_hws(struct kfd_dev *dev) 1136 { 1137 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { 1138 pr_err("HWS is not enabled"); 1139 return -EINVAL; 1140 } 1141 1142 return dqm_debugfs_hang_hws(dev->dqm); 1143 } 1144 1145 #endif 1146