1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/bsearch.h> 24 #include <linux/pci.h> 25 #include <linux/slab.h> 26 #include "kfd_priv.h" 27 #include "kfd_device_queue_manager.h" 28 #include "kfd_pm4_headers_vi.h" 29 #include "cwsr_trap_handler.h" 30 #include "kfd_iommu.h" 31 32 #define MQD_SIZE_ALIGNED 768 33 static atomic_t kfd_device_suspended = ATOMIC_INIT(0); 34 35 #ifdef KFD_SUPPORT_IOMMU_V2 36 static const struct kfd_device_info kaveri_device_info = { 37 .asic_family = CHIP_KAVERI, 38 .max_pasid_bits = 16, 39 /* max num of queues for KV.TODO should be a dynamic value */ 40 .max_no_of_hqd = 24, 41 .doorbell_size = 4, 42 .ih_ring_entry_size = 4 * sizeof(uint32_t), 43 .event_interrupt_class = &event_interrupt_class_cik, 44 .num_of_watch_points = 4, 45 .mqd_size_aligned = MQD_SIZE_ALIGNED, 46 .supports_cwsr = false, 47 .needs_iommu_device = true, 48 .needs_pci_atomics = false, 49 }; 50 51 static const struct kfd_device_info carrizo_device_info = { 52 .asic_family = CHIP_CARRIZO, 53 .max_pasid_bits = 16, 54 /* max num of queues for CZ.TODO should be a dynamic value */ 55 .max_no_of_hqd = 24, 56 .doorbell_size = 4, 57 .ih_ring_entry_size = 4 * sizeof(uint32_t), 58 .event_interrupt_class = &event_interrupt_class_cik, 59 .num_of_watch_points = 4, 60 .mqd_size_aligned = MQD_SIZE_ALIGNED, 61 .supports_cwsr = true, 62 .needs_iommu_device = true, 63 .needs_pci_atomics = false, 64 }; 65 #endif 66 67 static const struct kfd_device_info hawaii_device_info = { 68 .asic_family = CHIP_HAWAII, 69 .max_pasid_bits = 16, 70 /* max num of queues for KV.TODO should be a dynamic value */ 71 .max_no_of_hqd = 24, 72 .doorbell_size = 4, 73 .ih_ring_entry_size = 4 * sizeof(uint32_t), 74 .event_interrupt_class = &event_interrupt_class_cik, 75 .num_of_watch_points = 4, 76 .mqd_size_aligned = MQD_SIZE_ALIGNED, 77 .supports_cwsr = false, 78 .needs_iommu_device = false, 79 .needs_pci_atomics = false, 80 }; 81 82 static const struct kfd_device_info tonga_device_info = { 83 .asic_family = CHIP_TONGA, 84 .max_pasid_bits = 16, 85 .max_no_of_hqd = 24, 86 .doorbell_size = 4, 87 .ih_ring_entry_size = 4 * sizeof(uint32_t), 88 .event_interrupt_class = &event_interrupt_class_cik, 89 .num_of_watch_points = 4, 90 .mqd_size_aligned = MQD_SIZE_ALIGNED, 91 .supports_cwsr = false, 92 .needs_iommu_device = false, 93 .needs_pci_atomics = true, 94 }; 95 96 static const struct kfd_device_info tonga_vf_device_info = { 97 .asic_family = CHIP_TONGA, 98 .max_pasid_bits = 16, 99 .max_no_of_hqd = 24, 100 .doorbell_size = 4, 101 .ih_ring_entry_size = 4 * sizeof(uint32_t), 102 .event_interrupt_class = &event_interrupt_class_cik, 103 .num_of_watch_points = 4, 104 .mqd_size_aligned = MQD_SIZE_ALIGNED, 105 .supports_cwsr = false, 106 .needs_iommu_device = false, 107 .needs_pci_atomics = false, 108 }; 109 110 static const struct kfd_device_info fiji_device_info = { 111 .asic_family = CHIP_FIJI, 112 .max_pasid_bits = 16, 113 .max_no_of_hqd = 24, 114 .doorbell_size = 4, 115 .ih_ring_entry_size = 4 * sizeof(uint32_t), 116 .event_interrupt_class = &event_interrupt_class_cik, 117 .num_of_watch_points = 4, 118 .mqd_size_aligned = MQD_SIZE_ALIGNED, 119 .supports_cwsr = true, 120 .needs_iommu_device = false, 121 .needs_pci_atomics = true, 122 }; 123 124 static const struct kfd_device_info fiji_vf_device_info = { 125 .asic_family = CHIP_FIJI, 126 .max_pasid_bits = 16, 127 .max_no_of_hqd = 24, 128 .doorbell_size = 4, 129 .ih_ring_entry_size = 4 * sizeof(uint32_t), 130 .event_interrupt_class = &event_interrupt_class_cik, 131 .num_of_watch_points = 4, 132 .mqd_size_aligned = MQD_SIZE_ALIGNED, 133 .supports_cwsr = true, 134 .needs_iommu_device = false, 135 .needs_pci_atomics = false, 136 }; 137 138 139 static const struct kfd_device_info polaris10_device_info = { 140 .asic_family = CHIP_POLARIS10, 141 .max_pasid_bits = 16, 142 .max_no_of_hqd = 24, 143 .doorbell_size = 4, 144 .ih_ring_entry_size = 4 * sizeof(uint32_t), 145 .event_interrupt_class = &event_interrupt_class_cik, 146 .num_of_watch_points = 4, 147 .mqd_size_aligned = MQD_SIZE_ALIGNED, 148 .supports_cwsr = true, 149 .needs_iommu_device = false, 150 .needs_pci_atomics = true, 151 }; 152 153 static const struct kfd_device_info polaris10_vf_device_info = { 154 .asic_family = CHIP_POLARIS10, 155 .max_pasid_bits = 16, 156 .max_no_of_hqd = 24, 157 .doorbell_size = 4, 158 .ih_ring_entry_size = 4 * sizeof(uint32_t), 159 .event_interrupt_class = &event_interrupt_class_cik, 160 .num_of_watch_points = 4, 161 .mqd_size_aligned = MQD_SIZE_ALIGNED, 162 .supports_cwsr = true, 163 .needs_iommu_device = false, 164 .needs_pci_atomics = false, 165 }; 166 167 static const struct kfd_device_info polaris11_device_info = { 168 .asic_family = CHIP_POLARIS11, 169 .max_pasid_bits = 16, 170 .max_no_of_hqd = 24, 171 .doorbell_size = 4, 172 .ih_ring_entry_size = 4 * sizeof(uint32_t), 173 .event_interrupt_class = &event_interrupt_class_cik, 174 .num_of_watch_points = 4, 175 .mqd_size_aligned = MQD_SIZE_ALIGNED, 176 .supports_cwsr = true, 177 .needs_iommu_device = false, 178 .needs_pci_atomics = true, 179 }; 180 181 static const struct kfd_device_info vega10_device_info = { 182 .asic_family = CHIP_VEGA10, 183 .max_pasid_bits = 16, 184 .max_no_of_hqd = 24, 185 .doorbell_size = 8, 186 .ih_ring_entry_size = 8 * sizeof(uint32_t), 187 .event_interrupt_class = &event_interrupt_class_v9, 188 .num_of_watch_points = 4, 189 .mqd_size_aligned = MQD_SIZE_ALIGNED, 190 .supports_cwsr = true, 191 .needs_iommu_device = false, 192 .needs_pci_atomics = false, 193 }; 194 195 static const struct kfd_device_info vega10_vf_device_info = { 196 .asic_family = CHIP_VEGA10, 197 .max_pasid_bits = 16, 198 .max_no_of_hqd = 24, 199 .doorbell_size = 8, 200 .ih_ring_entry_size = 8 * sizeof(uint32_t), 201 .event_interrupt_class = &event_interrupt_class_v9, 202 .num_of_watch_points = 4, 203 .mqd_size_aligned = MQD_SIZE_ALIGNED, 204 .supports_cwsr = true, 205 .needs_iommu_device = false, 206 .needs_pci_atomics = false, 207 }; 208 209 210 struct kfd_deviceid { 211 unsigned short did; 212 const struct kfd_device_info *device_info; 213 }; 214 215 static const struct kfd_deviceid supported_devices[] = { 216 #ifdef KFD_SUPPORT_IOMMU_V2 217 { 0x1304, &kaveri_device_info }, /* Kaveri */ 218 { 0x1305, &kaveri_device_info }, /* Kaveri */ 219 { 0x1306, &kaveri_device_info }, /* Kaveri */ 220 { 0x1307, &kaveri_device_info }, /* Kaveri */ 221 { 0x1309, &kaveri_device_info }, /* Kaveri */ 222 { 0x130A, &kaveri_device_info }, /* Kaveri */ 223 { 0x130B, &kaveri_device_info }, /* Kaveri */ 224 { 0x130C, &kaveri_device_info }, /* Kaveri */ 225 { 0x130D, &kaveri_device_info }, /* Kaveri */ 226 { 0x130E, &kaveri_device_info }, /* Kaveri */ 227 { 0x130F, &kaveri_device_info }, /* Kaveri */ 228 { 0x1310, &kaveri_device_info }, /* Kaveri */ 229 { 0x1311, &kaveri_device_info }, /* Kaveri */ 230 { 0x1312, &kaveri_device_info }, /* Kaveri */ 231 { 0x1313, &kaveri_device_info }, /* Kaveri */ 232 { 0x1315, &kaveri_device_info }, /* Kaveri */ 233 { 0x1316, &kaveri_device_info }, /* Kaveri */ 234 { 0x1317, &kaveri_device_info }, /* Kaveri */ 235 { 0x1318, &kaveri_device_info }, /* Kaveri */ 236 { 0x131B, &kaveri_device_info }, /* Kaveri */ 237 { 0x131C, &kaveri_device_info }, /* Kaveri */ 238 { 0x131D, &kaveri_device_info }, /* Kaveri */ 239 { 0x9870, &carrizo_device_info }, /* Carrizo */ 240 { 0x9874, &carrizo_device_info }, /* Carrizo */ 241 { 0x9875, &carrizo_device_info }, /* Carrizo */ 242 { 0x9876, &carrizo_device_info }, /* Carrizo */ 243 { 0x9877, &carrizo_device_info }, /* Carrizo */ 244 #endif 245 { 0x67A0, &hawaii_device_info }, /* Hawaii */ 246 { 0x67A1, &hawaii_device_info }, /* Hawaii */ 247 { 0x67A2, &hawaii_device_info }, /* Hawaii */ 248 { 0x67A8, &hawaii_device_info }, /* Hawaii */ 249 { 0x67A9, &hawaii_device_info }, /* Hawaii */ 250 { 0x67AA, &hawaii_device_info }, /* Hawaii */ 251 { 0x67B0, &hawaii_device_info }, /* Hawaii */ 252 { 0x67B1, &hawaii_device_info }, /* Hawaii */ 253 { 0x67B8, &hawaii_device_info }, /* Hawaii */ 254 { 0x67B9, &hawaii_device_info }, /* Hawaii */ 255 { 0x67BA, &hawaii_device_info }, /* Hawaii */ 256 { 0x67BE, &hawaii_device_info }, /* Hawaii */ 257 { 0x6920, &tonga_device_info }, /* Tonga */ 258 { 0x6921, &tonga_device_info }, /* Tonga */ 259 { 0x6928, &tonga_device_info }, /* Tonga */ 260 { 0x6929, &tonga_device_info }, /* Tonga */ 261 { 0x692B, &tonga_device_info }, /* Tonga */ 262 { 0x692F, &tonga_vf_device_info }, /* Tonga vf */ 263 { 0x6938, &tonga_device_info }, /* Tonga */ 264 { 0x6939, &tonga_device_info }, /* Tonga */ 265 { 0x7300, &fiji_device_info }, /* Fiji */ 266 { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/ 267 { 0x67C0, &polaris10_device_info }, /* Polaris10 */ 268 { 0x67C1, &polaris10_device_info }, /* Polaris10 */ 269 { 0x67C2, &polaris10_device_info }, /* Polaris10 */ 270 { 0x67C4, &polaris10_device_info }, /* Polaris10 */ 271 { 0x67C7, &polaris10_device_info }, /* Polaris10 */ 272 { 0x67C8, &polaris10_device_info }, /* Polaris10 */ 273 { 0x67C9, &polaris10_device_info }, /* Polaris10 */ 274 { 0x67CA, &polaris10_device_info }, /* Polaris10 */ 275 { 0x67CC, &polaris10_device_info }, /* Polaris10 */ 276 { 0x67CF, &polaris10_device_info }, /* Polaris10 */ 277 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/ 278 { 0x67DF, &polaris10_device_info }, /* Polaris10 */ 279 { 0x67E0, &polaris11_device_info }, /* Polaris11 */ 280 { 0x67E1, &polaris11_device_info }, /* Polaris11 */ 281 { 0x67E3, &polaris11_device_info }, /* Polaris11 */ 282 { 0x67E7, &polaris11_device_info }, /* Polaris11 */ 283 { 0x67E8, &polaris11_device_info }, /* Polaris11 */ 284 { 0x67E9, &polaris11_device_info }, /* Polaris11 */ 285 { 0x67EB, &polaris11_device_info }, /* Polaris11 */ 286 { 0x67EF, &polaris11_device_info }, /* Polaris11 */ 287 { 0x67FF, &polaris11_device_info }, /* Polaris11 */ 288 { 0x6860, &vega10_device_info }, /* Vega10 */ 289 { 0x6861, &vega10_device_info }, /* Vega10 */ 290 { 0x6862, &vega10_device_info }, /* Vega10 */ 291 { 0x6863, &vega10_device_info }, /* Vega10 */ 292 { 0x6864, &vega10_device_info }, /* Vega10 */ 293 { 0x6867, &vega10_device_info }, /* Vega10 */ 294 { 0x6868, &vega10_device_info }, /* Vega10 */ 295 { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ 296 { 0x687F, &vega10_device_info }, /* Vega10 */ 297 }; 298 299 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 300 unsigned int chunk_size); 301 static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 302 303 static int kfd_resume(struct kfd_dev *kfd); 304 305 static const struct kfd_device_info *lookup_device_info(unsigned short did) 306 { 307 size_t i; 308 309 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { 310 if (supported_devices[i].did == did) { 311 WARN_ON(!supported_devices[i].device_info); 312 return supported_devices[i].device_info; 313 } 314 } 315 316 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n", 317 did); 318 319 return NULL; 320 } 321 322 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, 323 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g) 324 { 325 struct kfd_dev *kfd; 326 int ret; 327 const struct kfd_device_info *device_info = 328 lookup_device_info(pdev->device); 329 330 if (!device_info) { 331 dev_err(kfd_device, "kgd2kfd_probe failed\n"); 332 return NULL; 333 } 334 335 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. 336 * 32 and 64-bit requests are possible and must be 337 * supported. 338 */ 339 ret = pci_enable_atomic_ops_to_root(pdev, 340 PCI_EXP_DEVCAP2_ATOMIC_COMP32 | 341 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 342 if (device_info->needs_pci_atomics && ret < 0) { 343 dev_info(kfd_device, 344 "skipped device %x:%x, PCI rejects atomics\n", 345 pdev->vendor, pdev->device); 346 return NULL; 347 } 348 349 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); 350 if (!kfd) 351 return NULL; 352 353 kfd->kgd = kgd; 354 kfd->device_info = device_info; 355 kfd->pdev = pdev; 356 kfd->init_complete = false; 357 kfd->kfd2kgd = f2g; 358 359 mutex_init(&kfd->doorbell_mutex); 360 memset(&kfd->doorbell_available_index, 0, 361 sizeof(kfd->doorbell_available_index)); 362 363 return kfd; 364 } 365 366 static void kfd_cwsr_init(struct kfd_dev *kfd) 367 { 368 if (cwsr_enable && kfd->device_info->supports_cwsr) { 369 if (kfd->device_info->asic_family < CHIP_VEGA10) { 370 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); 371 kfd->cwsr_isa = cwsr_trap_gfx8_hex; 372 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); 373 } else { 374 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); 375 kfd->cwsr_isa = cwsr_trap_gfx9_hex; 376 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); 377 } 378 379 kfd->cwsr_enabled = true; 380 } 381 } 382 383 bool kgd2kfd_device_init(struct kfd_dev *kfd, 384 const struct kgd2kfd_shared_resources *gpu_resources) 385 { 386 unsigned int size; 387 388 kfd->shared_resources = *gpu_resources; 389 390 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; 391 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; 392 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd 393 - kfd->vm_info.first_vmid_kfd + 1; 394 395 /* Verify module parameters regarding mapped process number*/ 396 if ((hws_max_conc_proc < 0) 397 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { 398 dev_err(kfd_device, 399 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n", 400 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd, 401 kfd->vm_info.vmid_num_kfd); 402 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; 403 } else 404 kfd->max_proc_per_quantum = hws_max_conc_proc; 405 406 /* calculate max size of mqds needed for queues */ 407 size = max_num_of_queues_per_device * 408 kfd->device_info->mqd_size_aligned; 409 410 /* 411 * calculate max size of runlist packet. 412 * There can be only 2 packets at once 413 */ 414 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) + 415 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) 416 + sizeof(struct pm4_mes_runlist)) * 2; 417 418 /* Add size of HIQ & DIQ */ 419 size += KFD_KERNEL_QUEUE_SIZE * 2; 420 421 /* add another 512KB for all other allocations on gart (HPD, fences) */ 422 size += 512 * 1024; 423 424 if (kfd->kfd2kgd->init_gtt_mem_allocation( 425 kfd->kgd, size, &kfd->gtt_mem, 426 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ 427 dev_err(kfd_device, "Could not allocate %d bytes\n", size); 428 goto out; 429 } 430 431 dev_info(kfd_device, "Allocated %d bytes on gart\n", size); 432 433 /* Initialize GTT sa with 512 byte chunk size */ 434 if (kfd_gtt_sa_init(kfd, size, 512) != 0) { 435 dev_err(kfd_device, "Error initializing gtt sub-allocator\n"); 436 goto kfd_gtt_sa_init_error; 437 } 438 439 if (kfd_doorbell_init(kfd)) { 440 dev_err(kfd_device, 441 "Error initializing doorbell aperture\n"); 442 goto kfd_doorbell_error; 443 } 444 445 if (kfd_topology_add_device(kfd)) { 446 dev_err(kfd_device, "Error adding device to topology\n"); 447 goto kfd_topology_add_device_error; 448 } 449 450 if (kfd_interrupt_init(kfd)) { 451 dev_err(kfd_device, "Error initializing interrupts\n"); 452 goto kfd_interrupt_error; 453 } 454 455 kfd->dqm = device_queue_manager_init(kfd); 456 if (!kfd->dqm) { 457 dev_err(kfd_device, "Error initializing queue manager\n"); 458 goto device_queue_manager_error; 459 } 460 461 if (kfd_iommu_device_init(kfd)) { 462 dev_err(kfd_device, "Error initializing iommuv2\n"); 463 goto device_iommu_error; 464 } 465 466 kfd_cwsr_init(kfd); 467 468 if (kfd_resume(kfd)) 469 goto kfd_resume_error; 470 471 kfd->dbgmgr = NULL; 472 473 kfd->init_complete = true; 474 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor, 475 kfd->pdev->device); 476 477 pr_debug("Starting kfd with the following scheduling policy %d\n", 478 kfd->dqm->sched_policy); 479 480 goto out; 481 482 kfd_resume_error: 483 device_iommu_error: 484 device_queue_manager_uninit(kfd->dqm); 485 device_queue_manager_error: 486 kfd_interrupt_exit(kfd); 487 kfd_interrupt_error: 488 kfd_topology_remove_device(kfd); 489 kfd_topology_add_device_error: 490 kfd_doorbell_fini(kfd); 491 kfd_doorbell_error: 492 kfd_gtt_sa_fini(kfd); 493 kfd_gtt_sa_init_error: 494 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); 495 dev_err(kfd_device, 496 "device %x:%x NOT added due to errors\n", 497 kfd->pdev->vendor, kfd->pdev->device); 498 out: 499 return kfd->init_complete; 500 } 501 502 void kgd2kfd_device_exit(struct kfd_dev *kfd) 503 { 504 if (kfd->init_complete) { 505 kgd2kfd_suspend(kfd); 506 device_queue_manager_uninit(kfd->dqm); 507 kfd_interrupt_exit(kfd); 508 kfd_topology_remove_device(kfd); 509 kfd_doorbell_fini(kfd); 510 kfd_gtt_sa_fini(kfd); 511 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); 512 } 513 514 kfree(kfd); 515 } 516 517 void kgd2kfd_suspend(struct kfd_dev *kfd) 518 { 519 if (!kfd->init_complete) 520 return; 521 522 /* For first KFD device suspend all the KFD processes */ 523 if (atomic_inc_return(&kfd_device_suspended) == 1) 524 kfd_suspend_all_processes(); 525 526 kfd->dqm->ops.stop(kfd->dqm); 527 528 kfd_iommu_suspend(kfd); 529 } 530 531 int kgd2kfd_resume(struct kfd_dev *kfd) 532 { 533 int ret, count; 534 535 if (!kfd->init_complete) 536 return 0; 537 538 ret = kfd_resume(kfd); 539 if (ret) 540 return ret; 541 542 count = atomic_dec_return(&kfd_device_suspended); 543 WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); 544 if (count == 0) 545 ret = kfd_resume_all_processes(); 546 547 return ret; 548 } 549 550 static int kfd_resume(struct kfd_dev *kfd) 551 { 552 int err = 0; 553 554 err = kfd_iommu_resume(kfd); 555 if (err) { 556 dev_err(kfd_device, 557 "Failed to resume IOMMU for device %x:%x\n", 558 kfd->pdev->vendor, kfd->pdev->device); 559 return err; 560 } 561 562 err = kfd->dqm->ops.start(kfd->dqm); 563 if (err) { 564 dev_err(kfd_device, 565 "Error starting queue manager for device %x:%x\n", 566 kfd->pdev->vendor, kfd->pdev->device); 567 goto dqm_start_error; 568 } 569 570 return err; 571 572 dqm_start_error: 573 kfd_iommu_suspend(kfd); 574 return err; 575 } 576 577 /* This is called directly from KGD at ISR. */ 578 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 579 { 580 if (!kfd->init_complete) 581 return; 582 583 spin_lock(&kfd->interrupt_lock); 584 585 if (kfd->interrupts_active 586 && interrupt_is_wanted(kfd, ih_ring_entry) 587 && enqueue_ih_ring_entry(kfd, ih_ring_entry)) 588 queue_work(kfd->ih_wq, &kfd->interrupt_work); 589 590 spin_unlock(&kfd->interrupt_lock); 591 } 592 593 int kgd2kfd_quiesce_mm(struct mm_struct *mm) 594 { 595 struct kfd_process *p; 596 int r; 597 598 /* Because we are called from arbitrary context (workqueue) as opposed 599 * to process context, kfd_process could attempt to exit while we are 600 * running so the lookup function increments the process ref count. 601 */ 602 p = kfd_lookup_process_by_mm(mm); 603 if (!p) 604 return -ESRCH; 605 606 r = kfd_process_evict_queues(p); 607 608 kfd_unref_process(p); 609 return r; 610 } 611 612 int kgd2kfd_resume_mm(struct mm_struct *mm) 613 { 614 struct kfd_process *p; 615 int r; 616 617 /* Because we are called from arbitrary context (workqueue) as opposed 618 * to process context, kfd_process could attempt to exit while we are 619 * running so the lookup function increments the process ref count. 620 */ 621 p = kfd_lookup_process_by_mm(mm); 622 if (!p) 623 return -ESRCH; 624 625 r = kfd_process_restore_queues(p); 626 627 kfd_unref_process(p); 628 return r; 629 } 630 631 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will 632 * prepare for safe eviction of KFD BOs that belong to the specified 633 * process. 634 * 635 * @mm: mm_struct that identifies the specified KFD process 636 * @fence: eviction fence attached to KFD process BOs 637 * 638 */ 639 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 640 struct dma_fence *fence) 641 { 642 struct kfd_process *p; 643 unsigned long active_time; 644 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS); 645 646 if (!fence) 647 return -EINVAL; 648 649 if (dma_fence_is_signaled(fence)) 650 return 0; 651 652 p = kfd_lookup_process_by_mm(mm); 653 if (!p) 654 return -ENODEV; 655 656 if (fence->seqno == p->last_eviction_seqno) 657 goto out; 658 659 p->last_eviction_seqno = fence->seqno; 660 661 /* Avoid KFD process starvation. Wait for at least 662 * PROCESS_ACTIVE_TIME_MS before evicting the process again 663 */ 664 active_time = get_jiffies_64() - p->last_restore_timestamp; 665 if (delay_jiffies > active_time) 666 delay_jiffies -= active_time; 667 else 668 delay_jiffies = 0; 669 670 /* During process initialization eviction_work.dwork is initialized 671 * to kfd_evict_bo_worker 672 */ 673 schedule_delayed_work(&p->eviction_work, delay_jiffies); 674 out: 675 kfd_unref_process(p); 676 return 0; 677 } 678 679 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 680 unsigned int chunk_size) 681 { 682 unsigned int num_of_longs; 683 684 if (WARN_ON(buf_size < chunk_size)) 685 return -EINVAL; 686 if (WARN_ON(buf_size == 0)) 687 return -EINVAL; 688 if (WARN_ON(chunk_size == 0)) 689 return -EINVAL; 690 691 kfd->gtt_sa_chunk_size = chunk_size; 692 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; 693 694 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) / 695 BITS_PER_LONG; 696 697 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL); 698 699 if (!kfd->gtt_sa_bitmap) 700 return -ENOMEM; 701 702 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", 703 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); 704 705 mutex_init(&kfd->gtt_sa_lock); 706 707 return 0; 708 709 } 710 711 static void kfd_gtt_sa_fini(struct kfd_dev *kfd) 712 { 713 mutex_destroy(&kfd->gtt_sa_lock); 714 kfree(kfd->gtt_sa_bitmap); 715 } 716 717 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr, 718 unsigned int bit_num, 719 unsigned int chunk_size) 720 { 721 return start_addr + bit_num * chunk_size; 722 } 723 724 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, 725 unsigned int bit_num, 726 unsigned int chunk_size) 727 { 728 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); 729 } 730 731 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 732 struct kfd_mem_obj **mem_obj) 733 { 734 unsigned int found, start_search, cur_size; 735 736 if (size == 0) 737 return -EINVAL; 738 739 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) 740 return -ENOMEM; 741 742 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); 743 if ((*mem_obj) == NULL) 744 return -ENOMEM; 745 746 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size); 747 748 start_search = 0; 749 750 mutex_lock(&kfd->gtt_sa_lock); 751 752 kfd_gtt_restart_search: 753 /* Find the first chunk that is free */ 754 found = find_next_zero_bit(kfd->gtt_sa_bitmap, 755 kfd->gtt_sa_num_of_chunks, 756 start_search); 757 758 pr_debug("Found = %d\n", found); 759 760 /* If there wasn't any free chunk, bail out */ 761 if (found == kfd->gtt_sa_num_of_chunks) 762 goto kfd_gtt_no_free_chunk; 763 764 /* Update fields of mem_obj */ 765 (*mem_obj)->range_start = found; 766 (*mem_obj)->range_end = found; 767 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr( 768 kfd->gtt_start_gpu_addr, 769 found, 770 kfd->gtt_sa_chunk_size); 771 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr( 772 kfd->gtt_start_cpu_ptr, 773 found, 774 kfd->gtt_sa_chunk_size); 775 776 pr_debug("gpu_addr = %p, cpu_addr = %p\n", 777 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); 778 779 /* If we need only one chunk, mark it as allocated and get out */ 780 if (size <= kfd->gtt_sa_chunk_size) { 781 pr_debug("Single bit\n"); 782 set_bit(found, kfd->gtt_sa_bitmap); 783 goto kfd_gtt_out; 784 } 785 786 /* Otherwise, try to see if we have enough contiguous chunks */ 787 cur_size = size - kfd->gtt_sa_chunk_size; 788 do { 789 (*mem_obj)->range_end = 790 find_next_zero_bit(kfd->gtt_sa_bitmap, 791 kfd->gtt_sa_num_of_chunks, ++found); 792 /* 793 * If next free chunk is not contiguous than we need to 794 * restart our search from the last free chunk we found (which 795 * wasn't contiguous to the previous ones 796 */ 797 if ((*mem_obj)->range_end != found) { 798 start_search = found; 799 goto kfd_gtt_restart_search; 800 } 801 802 /* 803 * If we reached end of buffer, bail out with error 804 */ 805 if (found == kfd->gtt_sa_num_of_chunks) 806 goto kfd_gtt_no_free_chunk; 807 808 /* Check if we don't need another chunk */ 809 if (cur_size <= kfd->gtt_sa_chunk_size) 810 cur_size = 0; 811 else 812 cur_size -= kfd->gtt_sa_chunk_size; 813 814 } while (cur_size > 0); 815 816 pr_debug("range_start = %d, range_end = %d\n", 817 (*mem_obj)->range_start, (*mem_obj)->range_end); 818 819 /* Mark the chunks as allocated */ 820 for (found = (*mem_obj)->range_start; 821 found <= (*mem_obj)->range_end; 822 found++) 823 set_bit(found, kfd->gtt_sa_bitmap); 824 825 kfd_gtt_out: 826 mutex_unlock(&kfd->gtt_sa_lock); 827 return 0; 828 829 kfd_gtt_no_free_chunk: 830 pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); 831 mutex_unlock(&kfd->gtt_sa_lock); 832 kfree(mem_obj); 833 return -ENOMEM; 834 } 835 836 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) 837 { 838 unsigned int bit; 839 840 /* Act like kfree when trying to free a NULL object */ 841 if (!mem_obj) 842 return 0; 843 844 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n", 845 mem_obj, mem_obj->range_start, mem_obj->range_end); 846 847 mutex_lock(&kfd->gtt_sa_lock); 848 849 /* Mark the chunks as free */ 850 for (bit = mem_obj->range_start; 851 bit <= mem_obj->range_end; 852 bit++) 853 clear_bit(bit, kfd->gtt_sa_bitmap); 854 855 mutex_unlock(&kfd->gtt_sa_lock); 856 857 kfree(mem_obj); 858 return 0; 859 } 860