1d87f36a0SRajneesh Bhardwaj // SPDX-License-Identifier: GPL-2.0 OR MIT 24a488a7aSOded Gabbay /* 3d87f36a0SRajneesh Bhardwaj * Copyright 2014-2022 Advanced Micro Devices, Inc. 44a488a7aSOded Gabbay * 54a488a7aSOded Gabbay * Permission is hereby granted, free of charge, to any person obtaining a 64a488a7aSOded Gabbay * copy of this software and associated documentation files (the "Software"), 74a488a7aSOded Gabbay * to deal in the Software without restriction, including without limitation 84a488a7aSOded Gabbay * the rights to use, copy, modify, merge, publish, distribute, sublicense, 94a488a7aSOded Gabbay * and/or sell copies of the Software, and to permit persons to whom the 104a488a7aSOded Gabbay * Software is furnished to do so, subject to the following conditions: 114a488a7aSOded Gabbay * 124a488a7aSOded Gabbay * The above copyright notice and this permission notice shall be included in 134a488a7aSOded Gabbay * all copies or substantial portions of the Software. 144a488a7aSOded Gabbay * 154a488a7aSOded Gabbay * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 164a488a7aSOded Gabbay * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 174a488a7aSOded Gabbay * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 184a488a7aSOded Gabbay * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 194a488a7aSOded Gabbay * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 204a488a7aSOded Gabbay * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 214a488a7aSOded Gabbay * OTHER DEALINGS IN THE SOFTWARE. 224a488a7aSOded Gabbay */ 234a488a7aSOded Gabbay 244a488a7aSOded Gabbay #include <linux/bsearch.h> 254a488a7aSOded Gabbay #include <linux/pci.h> 264a488a7aSOded Gabbay #include <linux/slab.h> 274a488a7aSOded Gabbay #include "kfd_priv.h" 2864c7f8cfSBen Goz #include "kfd_device_queue_manager.h" 29507968ddSFelix Kuehling #include "kfd_pm4_headers_vi.h" 30fd6a440eSJonathan Kim #include "kfd_pm4_headers_aldebaran.h" 310db54b24SYong Zhao #include "cwsr_trap_handler.h" 3264d1c3a4SFelix Kuehling #include "kfd_iommu.h" 335b87245fSAmber Lin #include "amdgpu_amdkfd.h" 342c2b0d88SMukul Joshi #include "kfd_smi_events.h" 35814ab993SPhilip Yang #include "kfd_migrate.h" 365b983db8SAlex Deucher #include "amdgpu.h" 374a488a7aSOded Gabbay 3819f6d2a6SOded Gabbay #define MQD_SIZE_ALIGNED 768 39e42051d2SShaoyun Liu 40e42051d2SShaoyun Liu /* 41e42051d2SShaoyun Liu * kfd_locked is used to lock the kfd driver during suspend or reset 42e42051d2SShaoyun Liu * once locked, kfd driver will stop any further GPU execution. 43e42051d2SShaoyun Liu * create process (open) will return -EAGAIN. 44e42051d2SShaoyun Liu */ 45e42051d2SShaoyun Liu static atomic_t kfd_locked = ATOMIC_INIT(0); 4619f6d2a6SOded Gabbay 47a3e520a2SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK 48e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v7_kfd2kgd; 49a3e520a2SAlex Deucher #endif 50e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v8_kfd2kgd; 51e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v9_kfd2kgd; 52e392c887SYong Zhao extern const struct kfd2kgd_calls arcturus_kfd2kgd; 535073506cSJonathan Kim extern const struct kfd2kgd_calls aldebaran_kfd2kgd; 54e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; 553a2f0c81SYong Zhao extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; 56e392c887SYong Zhao 576e81090bSOded Gabbay static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 586e81090bSOded Gabbay unsigned int chunk_size); 596e81090bSOded Gabbay static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 606e81090bSOded Gabbay 61b8935a7cSYong Zhao static int kfd_resume(struct kfd_dev *kfd); 62b8935a7cSYong Zhao 63f89c6bf7SGuchun Chen static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd) 64f89c6bf7SGuchun Chen { 65f89c6bf7SGuchun Chen uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0]; 66f89c6bf7SGuchun Chen 67f89c6bf7SGuchun Chen switch (sdma_version) { 68f89c6bf7SGuchun Chen case IP_VERSION(4, 0, 0):/* VEGA10 */ 69f89c6bf7SGuchun Chen case IP_VERSION(4, 0, 1):/* VEGA12 */ 70f89c6bf7SGuchun Chen case IP_VERSION(4, 1, 0):/* RAVEN */ 71f89c6bf7SGuchun Chen case IP_VERSION(4, 1, 1):/* RAVEN */ 725eb877b2SKent Russell case IP_VERSION(4, 1, 2):/* RENOIR */ 73f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 1):/* VANGOGH */ 74f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ 75f89c6bf7SGuchun Chen kfd->device_info.num_sdma_queues_per_engine = 2; 76f89c6bf7SGuchun Chen break; 77f89c6bf7SGuchun Chen case IP_VERSION(4, 2, 0):/* VEGA20 */ 785eb877b2SKent Russell case IP_VERSION(4, 2, 2):/* ARCTURUS */ 79f89c6bf7SGuchun Chen case IP_VERSION(4, 4, 0):/* ALDEBARAN */ 80f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 0):/* NAVI10 */ 81f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ 82f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 2):/* NAVI14 */ 83f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 5):/* NAVI12 */ 84f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */ 855eb877b2SKent Russell case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */ 86f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ 87f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ 88f89c6bf7SGuchun Chen kfd->device_info.num_sdma_queues_per_engine = 8; 89f89c6bf7SGuchun Chen break; 90f89c6bf7SGuchun Chen default: 91f89c6bf7SGuchun Chen dev_warn(kfd_device, 9220c5e425SGraham Sider "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n", 93f89c6bf7SGuchun Chen sdma_version); 94f89c6bf7SGuchun Chen kfd->device_info.num_sdma_queues_per_engine = 8; 95f89c6bf7SGuchun Chen } 96f89c6bf7SGuchun Chen } 97f89c6bf7SGuchun Chen 98f89c6bf7SGuchun Chen static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) 99f89c6bf7SGuchun Chen { 100f89c6bf7SGuchun Chen uint32_t gc_version = KFD_GC_VERSION(kfd); 101f89c6bf7SGuchun Chen 102f89c6bf7SGuchun Chen switch (gc_version) { 103f89c6bf7SGuchun Chen case IP_VERSION(9, 0, 1): /* VEGA10 */ 104f89c6bf7SGuchun Chen case IP_VERSION(9, 1, 0): /* RAVEN */ 105f89c6bf7SGuchun Chen case IP_VERSION(9, 2, 1): /* VEGA12 */ 106f89c6bf7SGuchun Chen case IP_VERSION(9, 2, 2): /* RAVEN */ 107f89c6bf7SGuchun Chen case IP_VERSION(9, 3, 0): /* RENOIR */ 108f89c6bf7SGuchun Chen case IP_VERSION(9, 4, 0): /* VEGA20 */ 109f89c6bf7SGuchun Chen case IP_VERSION(9, 4, 1): /* ARCTURUS */ 110f89c6bf7SGuchun Chen case IP_VERSION(9, 4, 2): /* ALDEBARAN */ 111f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 1): /* VANGOGH */ 112f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ 113f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */ 114f9ed188dSLang Yu case IP_VERSION(10, 1, 4): 115f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 10): /* NAVI10 */ 116f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 2): /* NAVI12 */ 117f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 1): /* NAVI14 */ 118f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */ 119f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */ 120f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */ 121f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */ 122f89c6bf7SGuchun Chen kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; 123f89c6bf7SGuchun Chen break; 124f89c6bf7SGuchun Chen default: 125f89c6bf7SGuchun Chen dev_warn(kfd_device, "v9 event interrupt handler is set due to " 126f89c6bf7SGuchun Chen "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version); 127f89c6bf7SGuchun Chen kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; 128f89c6bf7SGuchun Chen } 129f89c6bf7SGuchun Chen } 130f89c6bf7SGuchun Chen 131f0dc99a6SGraham Sider static void kfd_device_info_init(struct kfd_dev *kfd, 132f0dc99a6SGraham Sider bool vf, uint32_t gfx_target_version) 133f0dc99a6SGraham Sider { 134f0dc99a6SGraham Sider uint32_t gc_version = KFD_GC_VERSION(kfd); 135f0dc99a6SGraham Sider uint32_t asic_type = kfd->adev->asic_type; 136f0dc99a6SGraham Sider 137f0dc99a6SGraham Sider kfd->device_info.max_pasid_bits = 16; 138f0dc99a6SGraham Sider kfd->device_info.max_no_of_hqd = 24; 139f0dc99a6SGraham Sider kfd->device_info.num_of_watch_points = 4; 140f0dc99a6SGraham Sider kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED; 141f0dc99a6SGraham Sider kfd->device_info.gfx_target_version = gfx_target_version; 142f0dc99a6SGraham Sider 143f0dc99a6SGraham Sider if (KFD_IS_SOC15(kfd)) { 144f0dc99a6SGraham Sider kfd->device_info.doorbell_size = 8; 145f0dc99a6SGraham Sider kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t); 146f0dc99a6SGraham Sider kfd->device_info.supports_cwsr = true; 147f0dc99a6SGraham Sider 148f89c6bf7SGuchun Chen kfd_device_info_set_sdma_queue_num(kfd); 149f89c6bf7SGuchun Chen 150f89c6bf7SGuchun Chen kfd_device_info_set_event_interrupt_class(kfd); 151f0dc99a6SGraham Sider 152f0dc99a6SGraham Sider /* Raven */ 153f0dc99a6SGraham Sider if (gc_version == IP_VERSION(9, 1, 0) || 154f0dc99a6SGraham Sider gc_version == IP_VERSION(9, 2, 2)) 155f0dc99a6SGraham Sider kfd->device_info.needs_iommu_device = true; 156f0dc99a6SGraham Sider 157f0dc99a6SGraham Sider if (gc_version < IP_VERSION(11, 0, 0)) { 158f0dc99a6SGraham Sider /* Navi2x+, Navi1x+ */ 159f0dc99a6SGraham Sider if (gc_version >= IP_VERSION(10, 3, 0)) 160f0dc99a6SGraham Sider kfd->device_info.no_atomic_fw_version = 92; 16127cc310fSchen gong else if (gc_version >= IP_VERSION(10, 1, 1)) 16227cc310fSchen gong kfd->device_info.no_atomic_fw_version = 145; 163f0dc99a6SGraham Sider 164f0dc99a6SGraham Sider /* Navi1x+ */ 165f0dc99a6SGraham Sider if (gc_version >= IP_VERSION(10, 1, 1)) 166f0dc99a6SGraham Sider kfd->device_info.needs_pci_atomics = true; 167f0dc99a6SGraham Sider } 168f0dc99a6SGraham Sider } else { 169f0dc99a6SGraham Sider kfd->device_info.doorbell_size = 4; 170f0dc99a6SGraham Sider kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t); 171f0dc99a6SGraham Sider kfd->device_info.event_interrupt_class = &event_interrupt_class_cik; 172f0dc99a6SGraham Sider kfd->device_info.num_sdma_queues_per_engine = 2; 173f0dc99a6SGraham Sider 174f0dc99a6SGraham Sider if (asic_type != CHIP_KAVERI && 175f0dc99a6SGraham Sider asic_type != CHIP_HAWAII && 176f0dc99a6SGraham Sider asic_type != CHIP_TONGA) 177f0dc99a6SGraham Sider kfd->device_info.supports_cwsr = true; 178f0dc99a6SGraham Sider 179f0dc99a6SGraham Sider if (asic_type == CHIP_KAVERI || 180f0dc99a6SGraham Sider asic_type == CHIP_CARRIZO) 181f0dc99a6SGraham Sider kfd->device_info.needs_iommu_device = true; 182f0dc99a6SGraham Sider 183f0dc99a6SGraham Sider if (asic_type != CHIP_HAWAII && !vf) 184f0dc99a6SGraham Sider kfd->device_info.needs_pci_atomics = true; 185f0dc99a6SGraham Sider } 186f0dc99a6SGraham Sider } 187f0dc99a6SGraham Sider 188b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) 1894a488a7aSOded Gabbay { 190f0dc99a6SGraham Sider struct kfd_dev *kfd = NULL; 191f0dc99a6SGraham Sider const struct kfd2kgd_calls *f2g = NULL; 1925b983db8SAlex Deucher struct pci_dev *pdev = adev->pdev; 193f0dc99a6SGraham Sider uint32_t gfx_target_version = 0; 194050091abSYong Zhao 195c868d584SAlex Deucher switch (adev->asic_type) { 196c868d584SAlex Deucher #ifdef KFD_SUPPORT_IOMMU_V2 197c868d584SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK 198c868d584SAlex Deucher case CHIP_KAVERI: 199f0dc99a6SGraham Sider gfx_target_version = 70000; 200f0dc99a6SGraham Sider if (!vf) 201c868d584SAlex Deucher f2g = &gfx_v7_kfd2kgd; 202c868d584SAlex Deucher break; 203c868d584SAlex Deucher #endif 204c868d584SAlex Deucher case CHIP_CARRIZO: 205f0dc99a6SGraham Sider gfx_target_version = 80001; 206f0dc99a6SGraham Sider if (!vf) 207c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 208c868d584SAlex Deucher break; 209c868d584SAlex Deucher #endif 210c868d584SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK 211c868d584SAlex Deucher case CHIP_HAWAII: 212f0dc99a6SGraham Sider gfx_target_version = 70001; 2130f7ef0b9SFelix Kuehling if (!amdgpu_exp_hw_support) 2140f7ef0b9SFelix Kuehling pr_info( 2150f7ef0b9SFelix Kuehling "KFD support on Hawaii is experimental. See modparam exp_hw_support\n" 2160f7ef0b9SFelix Kuehling ); 2170f7ef0b9SFelix Kuehling else if (!vf) 218c868d584SAlex Deucher f2g = &gfx_v7_kfd2kgd; 219c868d584SAlex Deucher break; 220c868d584SAlex Deucher #endif 221c868d584SAlex Deucher case CHIP_TONGA: 222f0dc99a6SGraham Sider gfx_target_version = 80002; 223f0dc99a6SGraham Sider if (!vf) 224c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 225c868d584SAlex Deucher break; 226c868d584SAlex Deucher case CHIP_FIJI: 227f0dc99a6SGraham Sider gfx_target_version = 80003; 228c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 229c868d584SAlex Deucher break; 230c868d584SAlex Deucher case CHIP_POLARIS10: 231f0dc99a6SGraham Sider gfx_target_version = 80003; 232c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 233c868d584SAlex Deucher break; 234c868d584SAlex Deucher case CHIP_POLARIS11: 235f0dc99a6SGraham Sider gfx_target_version = 80003; 236f0dc99a6SGraham Sider if (!vf) 237c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 238c868d584SAlex Deucher break; 239c868d584SAlex Deucher case CHIP_POLARIS12: 240f0dc99a6SGraham Sider gfx_target_version = 80003; 241f0dc99a6SGraham Sider if (!vf) 242c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 243c868d584SAlex Deucher break; 244c868d584SAlex Deucher case CHIP_VEGAM: 245f0dc99a6SGraham Sider gfx_target_version = 80003; 246f0dc99a6SGraham Sider if (!vf) 247c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 248c868d584SAlex Deucher break; 249c868d584SAlex Deucher default: 250c868d584SAlex Deucher switch (adev->ip_versions[GC_HWIP][0]) { 2512c1f19b3SGraham Sider /* Vega 10 */ 252c868d584SAlex Deucher case IP_VERSION(9, 0, 1): 253f0dc99a6SGraham Sider gfx_target_version = 90000; 254c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 255c868d584SAlex Deucher break; 256c868d584SAlex Deucher #ifdef KFD_SUPPORT_IOMMU_V2 2572c1f19b3SGraham Sider /* Raven */ 258c868d584SAlex Deucher case IP_VERSION(9, 1, 0): 259c868d584SAlex Deucher case IP_VERSION(9, 2, 2): 260f0dc99a6SGraham Sider gfx_target_version = 90002; 261f0dc99a6SGraham Sider if (!vf) 262c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 263c868d584SAlex Deucher break; 264c868d584SAlex Deucher #endif 2652c1f19b3SGraham Sider /* Vega12 */ 266c868d584SAlex Deucher case IP_VERSION(9, 2, 1): 267f0dc99a6SGraham Sider gfx_target_version = 90004; 268f0dc99a6SGraham Sider if (!vf) 269c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 270c868d584SAlex Deucher break; 2712c1f19b3SGraham Sider /* Renoir */ 272c868d584SAlex Deucher case IP_VERSION(9, 3, 0): 273f0dc99a6SGraham Sider gfx_target_version = 90012; 274f0dc99a6SGraham Sider if (!vf) 275c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 276c868d584SAlex Deucher break; 2772c1f19b3SGraham Sider /* Vega20 */ 278c868d584SAlex Deucher case IP_VERSION(9, 4, 0): 279f0dc99a6SGraham Sider gfx_target_version = 90006; 280f0dc99a6SGraham Sider if (!vf) 281c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 282c868d584SAlex Deucher break; 2832c1f19b3SGraham Sider /* Arcturus */ 284c868d584SAlex Deucher case IP_VERSION(9, 4, 1): 285f0dc99a6SGraham Sider gfx_target_version = 90008; 286c868d584SAlex Deucher f2g = &arcturus_kfd2kgd; 287c868d584SAlex Deucher break; 2882c1f19b3SGraham Sider /* Aldebaran */ 289c868d584SAlex Deucher case IP_VERSION(9, 4, 2): 290f0dc99a6SGraham Sider gfx_target_version = 90010; 291c868d584SAlex Deucher f2g = &aldebaran_kfd2kgd; 292c868d584SAlex Deucher break; 2932c1f19b3SGraham Sider /* Navi10 */ 294c868d584SAlex Deucher case IP_VERSION(10, 1, 10): 295f0dc99a6SGraham Sider gfx_target_version = 100100; 296f0dc99a6SGraham Sider if (!vf) 297c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 298c868d584SAlex Deucher break; 2992c1f19b3SGraham Sider /* Navi12 */ 300c868d584SAlex Deucher case IP_VERSION(10, 1, 2): 301f0dc99a6SGraham Sider gfx_target_version = 100101; 302c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 303c868d584SAlex Deucher break; 3042c1f19b3SGraham Sider /* Navi14 */ 305c868d584SAlex Deucher case IP_VERSION(10, 1, 1): 306f0dc99a6SGraham Sider gfx_target_version = 100102; 307f0dc99a6SGraham Sider if (!vf) 308c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 309c868d584SAlex Deucher break; 3102c1f19b3SGraham Sider /* Cyan Skillfish */ 311c868d584SAlex Deucher case IP_VERSION(10, 1, 3): 312f9ed188dSLang Yu case IP_VERSION(10, 1, 4): 313f0dc99a6SGraham Sider gfx_target_version = 100103; 314f0dc99a6SGraham Sider if (!vf) 315c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 316c868d584SAlex Deucher break; 3172c1f19b3SGraham Sider /* Sienna Cichlid */ 318c868d584SAlex Deucher case IP_VERSION(10, 3, 0): 319f0dc99a6SGraham Sider gfx_target_version = 100300; 320c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 321c868d584SAlex Deucher break; 3222c1f19b3SGraham Sider /* Navy Flounder */ 323c868d584SAlex Deucher case IP_VERSION(10, 3, 2): 324f0dc99a6SGraham Sider gfx_target_version = 100301; 325c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 326c868d584SAlex Deucher break; 3272c1f19b3SGraham Sider /* Van Gogh */ 328c868d584SAlex Deucher case IP_VERSION(10, 3, 1): 329f0dc99a6SGraham Sider gfx_target_version = 100303; 330f0dc99a6SGraham Sider if (!vf) 331c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 332c868d584SAlex Deucher break; 3332c1f19b3SGraham Sider /* Dimgrey Cavefish */ 334c868d584SAlex Deucher case IP_VERSION(10, 3, 4): 335f0dc99a6SGraham Sider gfx_target_version = 100302; 336c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 337c868d584SAlex Deucher break; 3382c1f19b3SGraham Sider /* Beige Goby */ 339c868d584SAlex Deucher case IP_VERSION(10, 3, 5): 340f0dc99a6SGraham Sider gfx_target_version = 100304; 341c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 342c868d584SAlex Deucher break; 3432c1f19b3SGraham Sider /* Yellow Carp */ 344c868d584SAlex Deucher case IP_VERSION(10, 3, 3): 345f0dc99a6SGraham Sider gfx_target_version = 100305; 346f0dc99a6SGraham Sider if (!vf) 347c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 348c868d584SAlex Deucher break; 349c868d584SAlex Deucher default: 350f0dc99a6SGraham Sider break; 351050091abSYong Zhao } 352c868d584SAlex Deucher break; 353c868d584SAlex Deucher } 3544a488a7aSOded Gabbay 355f0dc99a6SGraham Sider if (!f2g) { 356e4804a39SGraham Sider if (adev->ip_versions[GC_HWIP][0]) 357e4804a39SGraham Sider dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n", 358e4804a39SGraham Sider adev->ip_versions[GC_HWIP][0], vf ? "VF" : ""); 359e4804a39SGraham Sider else 360050091abSYong Zhao dev_err(kfd_device, "%s %s not supported in kfd\n", 361c868d584SAlex Deucher amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); 3624a488a7aSOded Gabbay return NULL; 3634ebc7182SYong Zhao } 3644a488a7aSOded Gabbay 365d35f00d8SEric Huang kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); 366d35f00d8SEric Huang if (!kfd) 367d35f00d8SEric Huang return NULL; 368d35f00d8SEric Huang 369c6c57446SGraham Sider kfd->adev = adev; 370f0dc99a6SGraham Sider kfd_device_info_init(kfd, vf, gfx_target_version); 3714a488a7aSOded Gabbay kfd->pdev = pdev; 37219f6d2a6SOded Gabbay kfd->init_complete = false; 373cea405b1SXihan Zhang kfd->kfd2kgd = f2g; 37443d8107fSHarish Kasiviswanathan atomic_set(&kfd->compute_profile, 0); 375cea405b1SXihan Zhang 376cea405b1SXihan Zhang mutex_init(&kfd->doorbell_mutex); 377cea405b1SXihan Zhang memset(&kfd->doorbell_available_index, 0, 378cea405b1SXihan Zhang sizeof(kfd->doorbell_available_index)); 3794a488a7aSOded Gabbay 3809b54d201SEric Huang atomic_set(&kfd->sram_ecc_flag, 0); 3819b54d201SEric Huang 38259d7115dSMukul Joshi ida_init(&kfd->doorbell_ida); 38359d7115dSMukul Joshi 3844a488a7aSOded Gabbay return kfd; 3854a488a7aSOded Gabbay } 3864a488a7aSOded Gabbay 387373d7080SFelix Kuehling static void kfd_cwsr_init(struct kfd_dev *kfd) 388373d7080SFelix Kuehling { 389f0dc99a6SGraham Sider if (cwsr_enable && kfd->device_info.supports_cwsr) { 390046e674bSGraham Sider if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) { 391373d7080SFelix Kuehling BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); 392373d7080SFelix Kuehling kfd->cwsr_isa = cwsr_trap_gfx8_hex; 393373d7080SFelix Kuehling kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); 394046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) { 3953baa24f0SOak Zeng BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE); 3963baa24f0SOak Zeng kfd->cwsr_isa = cwsr_trap_arcturus_hex; 3973baa24f0SOak Zeng kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex); 398046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) { 3990ef6845cSJay Cornwall BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE); 4000ef6845cSJay Cornwall kfd->cwsr_isa = cwsr_trap_aldebaran_hex; 4010ef6845cSJay Cornwall kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex); 402046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) { 4033e76c239SFelix Kuehling BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); 4043e76c239SFelix Kuehling kfd->cwsr_isa = cwsr_trap_gfx9_hex; 4053e76c239SFelix Kuehling kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); 406046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) { 40780b6cfedSJay Cornwall BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE); 40880b6cfedSJay Cornwall kfd->cwsr_isa = cwsr_trap_nv1x_hex; 40980b6cfedSJay Cornwall kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex); 41014328aa5SPhilip Cox } else { 41114328aa5SPhilip Cox BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE); 41214328aa5SPhilip Cox kfd->cwsr_isa = cwsr_trap_gfx10_hex; 41314328aa5SPhilip Cox kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex); 4143e76c239SFelix Kuehling } 4153e76c239SFelix Kuehling 416373d7080SFelix Kuehling kfd->cwsr_enabled = true; 417373d7080SFelix Kuehling } 418373d7080SFelix Kuehling } 419373d7080SFelix Kuehling 42029633d0eSJoseph Greathouse static int kfd_gws_init(struct kfd_dev *kfd) 42129633d0eSJoseph Greathouse { 42229633d0eSJoseph Greathouse int ret = 0; 42329633d0eSJoseph Greathouse 42429633d0eSJoseph Greathouse if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) 42529633d0eSJoseph Greathouse return 0; 42629633d0eSJoseph Greathouse 427046e674bSGraham Sider if (hws_gws_support || (KFD_IS_SOC15(kfd) && 428046e674bSGraham Sider ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) 429046e674bSGraham Sider && kfd->mec2_fw_version >= 0x81b3) || 430046e674bSGraham Sider (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) 431046e674bSGraham Sider && kfd->mec2_fw_version >= 0x1b3) || 432046e674bSGraham Sider (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) 433046e674bSGraham Sider && kfd->mec2_fw_version >= 0x30) || 434046e674bSGraham Sider (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) 435addaac0cSJonathan Kim && kfd->mec2_fw_version >= 0x28)))) 4366bfc7c7eSGraham Sider ret = amdgpu_amdkfd_alloc_gws(kfd->adev, 43702274fc0SGraham Sider kfd->adev->gds.gws_size, &kfd->gws); 43829633d0eSJoseph Greathouse 43929633d0eSJoseph Greathouse return ret; 44029633d0eSJoseph Greathouse } 44129633d0eSJoseph Greathouse 4422243f493SRajneesh Bhardwaj static void kfd_smi_init(struct kfd_dev *dev) 4432243f493SRajneesh Bhardwaj { 444938a0650SAmber Lin INIT_LIST_HEAD(&dev->smi_clients); 445938a0650SAmber Lin spin_lock_init(&dev->smi_lock); 446938a0650SAmber Lin } 447938a0650SAmber Lin 4484a488a7aSOded Gabbay bool kgd2kfd_device_init(struct kfd_dev *kfd, 4493a0c3423SHarish Kasiviswanathan struct drm_device *ddev, 4504a488a7aSOded Gabbay const struct kgd2kfd_shared_resources *gpu_resources) 4514a488a7aSOded Gabbay { 452fd6a440eSJonathan Kim unsigned int size, map_process_packet_size; 45319f6d2a6SOded Gabbay 4543a0c3423SHarish Kasiviswanathan kfd->ddev = ddev; 455574c4183SGraham Sider kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 4565ade6c9cSFelix Kuehling KGD_ENGINE_MEC1); 457574c4183SGraham Sider kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 45829633d0eSJoseph Greathouse KGD_ENGINE_MEC2); 459574c4183SGraham Sider kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 4605ade6c9cSFelix Kuehling KGD_ENGINE_SDMA1); 4614a488a7aSOded Gabbay kfd->shared_resources = *gpu_resources; 4624a488a7aSOded Gabbay 46344008d7aSYong Zhao kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; 46444008d7aSYong Zhao kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; 46544008d7aSYong Zhao kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd 46644008d7aSYong Zhao - kfd->vm_info.first_vmid_kfd + 1; 46744008d7aSYong Zhao 468e312af6cSFelix Kuehling /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. 469e312af6cSFelix Kuehling * 32 and 64-bit requests are possible and must be 470e312af6cSFelix Kuehling * supported. 471e312af6cSFelix Kuehling */ 4726bfc7c7eSGraham Sider kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev); 473e312af6cSFelix Kuehling if (!kfd->pci_atomic_requested && 474f0dc99a6SGraham Sider kfd->device_info.needs_pci_atomics && 475f0dc99a6SGraham Sider (!kfd->device_info.no_atomic_fw_version || 476f0dc99a6SGraham Sider kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) { 477e312af6cSFelix Kuehling dev_info(kfd_device, 478e312af6cSFelix Kuehling "skipped device %x:%x, PCI rejects atomics %d<%d\n", 479e312af6cSFelix Kuehling kfd->pdev->vendor, kfd->pdev->device, 480e312af6cSFelix Kuehling kfd->mec_fw_version, 481f0dc99a6SGraham Sider kfd->device_info.no_atomic_fw_version); 482e312af6cSFelix Kuehling return false; 483e312af6cSFelix Kuehling } 484e312af6cSFelix Kuehling 485a99c6d4fSFelix Kuehling /* Verify module parameters regarding mapped process number*/ 486b7dfbd2eSTushar Patel if (hws_max_conc_proc >= 0) 487b7dfbd2eSTushar Patel kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); 488b7dfbd2eSTushar Patel else 489a99c6d4fSFelix Kuehling kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; 490a99c6d4fSFelix Kuehling 49119f6d2a6SOded Gabbay /* calculate max size of mqds needed for queues */ 492b8cbab04SOded Gabbay size = max_num_of_queues_per_device * 493f0dc99a6SGraham Sider kfd->device_info.mqd_size_aligned; 49419f6d2a6SOded Gabbay 495e18e794eSOded Gabbay /* 496e18e794eSOded Gabbay * calculate max size of runlist packet. 497e18e794eSOded Gabbay * There can be only 2 packets at once 498e18e794eSOded Gabbay */ 499046e674bSGraham Sider map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ? 500fd6a440eSJonathan Kim sizeof(struct pm4_mes_map_process_aldebaran) : 501fd6a440eSJonathan Kim sizeof(struct pm4_mes_map_process); 502fd6a440eSJonathan Kim size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size + 503507968ddSFelix Kuehling max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) 504507968ddSFelix Kuehling + sizeof(struct pm4_mes_runlist)) * 2; 505e18e794eSOded Gabbay 506e18e794eSOded Gabbay /* Add size of HIQ & DIQ */ 507e18e794eSOded Gabbay size += KFD_KERNEL_QUEUE_SIZE * 2; 508e18e794eSOded Gabbay 509e18e794eSOded Gabbay /* add another 512KB for all other allocations on gart (HPD, fences) */ 51019f6d2a6SOded Gabbay size += 512 * 1024; 51119f6d2a6SOded Gabbay 5127cd52c91SAmber Lin if (amdgpu_amdkfd_alloc_gtt_mem( 5136bfc7c7eSGraham Sider kfd->adev, size, &kfd->gtt_mem, 51415426dbbSYong Zhao &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, 51515426dbbSYong Zhao false)) { 51679775b62SKent Russell dev_err(kfd_device, "Could not allocate %d bytes\n", size); 517e09d4fc8SOak Zeng goto alloc_gtt_mem_failure; 51819f6d2a6SOded Gabbay } 51919f6d2a6SOded Gabbay 52079775b62SKent Russell dev_info(kfd_device, "Allocated %d bytes on gart\n", size); 521e18e794eSOded Gabbay 52273a1da0bSOded Gabbay /* Initialize GTT sa with 512 byte chunk size */ 52373a1da0bSOded Gabbay if (kfd_gtt_sa_init(kfd, size, 512) != 0) { 52479775b62SKent Russell dev_err(kfd_device, "Error initializing gtt sub-allocator\n"); 52573a1da0bSOded Gabbay goto kfd_gtt_sa_init_error; 52673a1da0bSOded Gabbay } 52773a1da0bSOded Gabbay 528735df2baSFelix Kuehling if (kfd_doorbell_init(kfd)) { 529735df2baSFelix Kuehling dev_err(kfd_device, 530735df2baSFelix Kuehling "Error initializing doorbell aperture\n"); 531735df2baSFelix Kuehling goto kfd_doorbell_error; 532735df2baSFelix Kuehling } 53319f6d2a6SOded Gabbay 534*c5650327SDivya Shikre if (amdgpu_use_xgmi_p2p) 53502274fc0SGraham Sider kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; 5360c1690e3SShaoyun Liu 53702274fc0SGraham Sider kfd->noretry = kfd->adev->gmc.noretry; 5389b498efaSAlex Deucher 5392249d558SAndrew Lewycky if (kfd_interrupt_init(kfd)) { 54079775b62SKent Russell dev_err(kfd_device, "Error initializing interrupts\n"); 5412249d558SAndrew Lewycky goto kfd_interrupt_error; 5422249d558SAndrew Lewycky } 5432249d558SAndrew Lewycky 54464c7f8cfSBen Goz kfd->dqm = device_queue_manager_init(kfd); 54564c7f8cfSBen Goz if (!kfd->dqm) { 54679775b62SKent Russell dev_err(kfd_device, "Error initializing queue manager\n"); 54764c7f8cfSBen Goz goto device_queue_manager_error; 54864c7f8cfSBen Goz } 54964c7f8cfSBen Goz 55029633d0eSJoseph Greathouse /* If supported on this device, allocate global GWS that is shared 55129633d0eSJoseph Greathouse * by all KFD processes 55229633d0eSJoseph Greathouse */ 55329633d0eSJoseph Greathouse if (kfd_gws_init(kfd)) { 55429633d0eSJoseph Greathouse dev_err(kfd_device, "Could not allocate %d gws\n", 55502274fc0SGraham Sider kfd->adev->gds.gws_size); 55629633d0eSJoseph Greathouse goto gws_error; 55729633d0eSJoseph Greathouse } 55829633d0eSJoseph Greathouse 5596127896fSHuang Rui /* If CRAT is broken, won't set iommu enabled */ 5606127896fSHuang Rui kfd_double_confirm_iommu_support(kfd); 5616127896fSHuang Rui 56264d1c3a4SFelix Kuehling if (kfd_iommu_device_init(kfd)) { 5636f4b590aSYifan Zhang kfd->use_iommu_v2 = false; 56464d1c3a4SFelix Kuehling dev_err(kfd_device, "Error initializing iommuv2\n"); 56564d1c3a4SFelix Kuehling goto device_iommu_error; 56664c7f8cfSBen Goz } 56764c7f8cfSBen Goz 568373d7080SFelix Kuehling kfd_cwsr_init(kfd); 569373d7080SFelix Kuehling 57056c5977eSGraham Sider svm_migrate_init(kfd->adev); 571814ab993SPhilip Yang 572afd18180SYifan Zhang if (kgd2kfd_resume_iommu(kfd)) 573afd18180SYifan Zhang goto device_iommu_error; 574afd18180SYifan Zhang 575b8935a7cSYong Zhao if (kfd_resume(kfd)) 576b8935a7cSYong Zhao goto kfd_resume_error; 577b8935a7cSYong Zhao 578465ab9e0SOak Zeng if (kfd_topology_add_device(kfd)) { 579465ab9e0SOak Zeng dev_err(kfd_device, "Error adding device to topology\n"); 580465ab9e0SOak Zeng goto kfd_topology_add_device_error; 581465ab9e0SOak Zeng } 582465ab9e0SOak Zeng 583938a0650SAmber Lin kfd_smi_init(kfd); 584938a0650SAmber Lin 5854a488a7aSOded Gabbay kfd->init_complete = true; 58679775b62SKent Russell dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor, 5874a488a7aSOded Gabbay kfd->pdev->device); 5884a488a7aSOded Gabbay 58979775b62SKent Russell pr_debug("Starting kfd with the following scheduling policy %d\n", 590d146c5a7SFelix Kuehling kfd->dqm->sched_policy); 59164c7f8cfSBen Goz 59219f6d2a6SOded Gabbay goto out; 59319f6d2a6SOded Gabbay 594465ab9e0SOak Zeng kfd_topology_add_device_error: 595b8935a7cSYong Zhao kfd_resume_error: 59664d1c3a4SFelix Kuehling device_iommu_error: 59729633d0eSJoseph Greathouse gws_error: 59864c7f8cfSBen Goz device_queue_manager_uninit(kfd->dqm); 59964c7f8cfSBen Goz device_queue_manager_error: 6002249d558SAndrew Lewycky kfd_interrupt_exit(kfd); 6012249d558SAndrew Lewycky kfd_interrupt_error: 602735df2baSFelix Kuehling kfd_doorbell_fini(kfd); 603735df2baSFelix Kuehling kfd_doorbell_error: 60473a1da0bSOded Gabbay kfd_gtt_sa_fini(kfd); 60573a1da0bSOded Gabbay kfd_gtt_sa_init_error: 6066bfc7c7eSGraham Sider amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); 607e09d4fc8SOak Zeng alloc_gtt_mem_failure: 60829633d0eSJoseph Greathouse if (kfd->gws) 6096bfc7c7eSGraham Sider amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); 61019f6d2a6SOded Gabbay dev_err(kfd_device, 61179775b62SKent Russell "device %x:%x NOT added due to errors\n", 61219f6d2a6SOded Gabbay kfd->pdev->vendor, kfd->pdev->device); 61319f6d2a6SOded Gabbay out: 61419f6d2a6SOded Gabbay return kfd->init_complete; 6154a488a7aSOded Gabbay } 6164a488a7aSOded Gabbay 6174a488a7aSOded Gabbay void kgd2kfd_device_exit(struct kfd_dev *kfd) 6184a488a7aSOded Gabbay { 619b17f068aSOded Gabbay if (kfd->init_complete) { 62064c7f8cfSBen Goz device_queue_manager_uninit(kfd->dqm); 6212249d558SAndrew Lewycky kfd_interrupt_exit(kfd); 62219f6d2a6SOded Gabbay kfd_topology_remove_device(kfd); 623735df2baSFelix Kuehling kfd_doorbell_fini(kfd); 62459d7115dSMukul Joshi ida_destroy(&kfd->doorbell_ida); 62573a1da0bSOded Gabbay kfd_gtt_sa_fini(kfd); 6266bfc7c7eSGraham Sider amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); 62729633d0eSJoseph Greathouse if (kfd->gws) 6286bfc7c7eSGraham Sider amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); 629b17f068aSOded Gabbay } 6305b5c4e40SEvgeny Pinchuk 6314a488a7aSOded Gabbay kfree(kfd); 6324a488a7aSOded Gabbay } 6334a488a7aSOded Gabbay 634e3b7a967SShaoyun Liu int kgd2kfd_pre_reset(struct kfd_dev *kfd) 635e3b7a967SShaoyun Liu { 636e42051d2SShaoyun Liu if (!kfd->init_complete) 637e42051d2SShaoyun Liu return 0; 63809c34e8dSFelix Kuehling 63955977744SMukul Joshi kfd_smi_event_update_gpu_reset(kfd, false); 64055977744SMukul Joshi 64109c34e8dSFelix Kuehling kfd->dqm->ops.pre_reset(kfd->dqm); 64209c34e8dSFelix Kuehling 6439593f4d6SRajneesh Bhardwaj kgd2kfd_suspend(kfd, false); 644e42051d2SShaoyun Liu 645e42051d2SShaoyun Liu kfd_signal_reset_event(kfd); 646e3b7a967SShaoyun Liu return 0; 647e3b7a967SShaoyun Liu } 648e3b7a967SShaoyun Liu 649e42051d2SShaoyun Liu /* 650e42051d2SShaoyun Liu * Fix me. KFD won't be able to resume existing process for now. 651e42051d2SShaoyun Liu * We will keep all existing process in a evicted state and 652e42051d2SShaoyun Liu * wait the process to be terminated. 653e42051d2SShaoyun Liu */ 654e42051d2SShaoyun Liu 655e3b7a967SShaoyun Liu int kgd2kfd_post_reset(struct kfd_dev *kfd) 656e3b7a967SShaoyun Liu { 657a1bd079fSyu kuai int ret; 658e42051d2SShaoyun Liu 659e42051d2SShaoyun Liu if (!kfd->init_complete) 660e3b7a967SShaoyun Liu return 0; 661e42051d2SShaoyun Liu 662e42051d2SShaoyun Liu ret = kfd_resume(kfd); 663e42051d2SShaoyun Liu if (ret) 664e42051d2SShaoyun Liu return ret; 665a1bd079fSyu kuai atomic_dec(&kfd_locked); 6669b54d201SEric Huang 6679b54d201SEric Huang atomic_set(&kfd->sram_ecc_flag, 0); 6689b54d201SEric Huang 66955977744SMukul Joshi kfd_smi_event_update_gpu_reset(kfd, true); 67055977744SMukul Joshi 671e42051d2SShaoyun Liu return 0; 672e42051d2SShaoyun Liu } 673e42051d2SShaoyun Liu 674e42051d2SShaoyun Liu bool kfd_is_locked(void) 675e42051d2SShaoyun Liu { 676e42051d2SShaoyun Liu return (atomic_read(&kfd_locked) > 0); 677e3b7a967SShaoyun Liu } 678e3b7a967SShaoyun Liu 6799593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 6804a488a7aSOded Gabbay { 681733fa1f7SYong Zhao if (!kfd->init_complete) 682733fa1f7SYong Zhao return; 683733fa1f7SYong Zhao 6849593f4d6SRajneesh Bhardwaj /* for runtime suspend, skip locking kfd */ 6859593f4d6SRajneesh Bhardwaj if (!run_pm) { 68626103436SFelix Kuehling /* For first KFD device suspend all the KFD processes */ 687e42051d2SShaoyun Liu if (atomic_inc_return(&kfd_locked) == 1) 68826103436SFelix Kuehling kfd_suspend_all_processes(); 6899593f4d6SRajneesh Bhardwaj } 69026103436SFelix Kuehling 69145c9a5e4SOded Gabbay kfd->dqm->ops.stop(kfd->dqm); 69264d1c3a4SFelix Kuehling kfd_iommu_suspend(kfd); 6934a488a7aSOded Gabbay } 6944a488a7aSOded Gabbay 6959593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) 6964a488a7aSOded Gabbay { 69726103436SFelix Kuehling int ret, count; 69826103436SFelix Kuehling 699b8935a7cSYong Zhao if (!kfd->init_complete) 700b8935a7cSYong Zhao return 0; 701b17f068aSOded Gabbay 70226103436SFelix Kuehling ret = kfd_resume(kfd); 70326103436SFelix Kuehling if (ret) 70426103436SFelix Kuehling return ret; 705b17f068aSOded Gabbay 7069593f4d6SRajneesh Bhardwaj /* for runtime resume, skip unlocking kfd */ 7079593f4d6SRajneesh Bhardwaj if (!run_pm) { 708e42051d2SShaoyun Liu count = atomic_dec_return(&kfd_locked); 70926103436SFelix Kuehling WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); 71026103436SFelix Kuehling if (count == 0) 71126103436SFelix Kuehling ret = kfd_resume_all_processes(); 7129593f4d6SRajneesh Bhardwaj } 71326103436SFelix Kuehling 71426103436SFelix Kuehling return ret; 7154ebc7182SYong Zhao } 7164ebc7182SYong Zhao 717f8846323SJames Zhu int kgd2kfd_resume_iommu(struct kfd_dev *kfd) 718b8935a7cSYong Zhao { 719b8935a7cSYong Zhao int err = 0; 720b8935a7cSYong Zhao 72164d1c3a4SFelix Kuehling err = kfd_iommu_resume(kfd); 722f8846323SJames Zhu if (err) 72364d1c3a4SFelix Kuehling dev_err(kfd_device, 72464d1c3a4SFelix Kuehling "Failed to resume IOMMU for device %x:%x\n", 72564d1c3a4SFelix Kuehling kfd->pdev->vendor, kfd->pdev->device); 72664d1c3a4SFelix Kuehling return err; 72764d1c3a4SFelix Kuehling } 728733fa1f7SYong Zhao 729f8846323SJames Zhu static int kfd_resume(struct kfd_dev *kfd) 730f8846323SJames Zhu { 731f8846323SJames Zhu int err = 0; 732f8846323SJames Zhu 733b8935a7cSYong Zhao err = kfd->dqm->ops.start(kfd->dqm); 734499f4d38SYifan Zhang if (err) 735b8935a7cSYong Zhao dev_err(kfd_device, 736b8935a7cSYong Zhao "Error starting queue manager for device %x:%x\n", 737b8935a7cSYong Zhao kfd->pdev->vendor, kfd->pdev->device); 738b17f068aSOded Gabbay 739b8935a7cSYong Zhao return err; 7404a488a7aSOded Gabbay } 7414a488a7aSOded Gabbay 742b3eca59dSPhilip Yang static inline void kfd_queue_work(struct workqueue_struct *wq, 743b3eca59dSPhilip Yang struct work_struct *work) 744b3eca59dSPhilip Yang { 745b3eca59dSPhilip Yang int cpu, new_cpu; 746b3eca59dSPhilip Yang 747b3eca59dSPhilip Yang cpu = new_cpu = smp_processor_id(); 748b3eca59dSPhilip Yang do { 749b3eca59dSPhilip Yang new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; 750b3eca59dSPhilip Yang if (cpu_to_node(new_cpu) == numa_node_id()) 751b3eca59dSPhilip Yang break; 752b3eca59dSPhilip Yang } while (cpu != new_cpu); 753b3eca59dSPhilip Yang 754b3eca59dSPhilip Yang queue_work_on(new_cpu, wq, work); 755b3eca59dSPhilip Yang } 756b3eca59dSPhilip Yang 757b3f5e6b4SAndrew Lewycky /* This is called directly from KGD at ISR. */ 758b3f5e6b4SAndrew Lewycky void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 7594a488a7aSOded Gabbay { 76058e69886SLan Xiao uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; 76158e69886SLan Xiao bool is_patched = false; 7622383a767SChristian König unsigned long flags; 76358e69886SLan Xiao 7642249d558SAndrew Lewycky if (!kfd->init_complete) 7652249d558SAndrew Lewycky return; 7662249d558SAndrew Lewycky 767f0dc99a6SGraham Sider if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) { 76858e69886SLan Xiao dev_err_once(kfd_device, "Ring entry too small\n"); 76958e69886SLan Xiao return; 77058e69886SLan Xiao } 77158e69886SLan Xiao 7722383a767SChristian König spin_lock_irqsave(&kfd->interrupt_lock, flags); 7732249d558SAndrew Lewycky 7742249d558SAndrew Lewycky if (kfd->interrupts_active 77558e69886SLan Xiao && interrupt_is_wanted(kfd, ih_ring_entry, 77658e69886SLan Xiao patched_ihre, &is_patched) 77758e69886SLan Xiao && enqueue_ih_ring_entry(kfd, 77858e69886SLan Xiao is_patched ? patched_ihre : ih_ring_entry)) 779b3eca59dSPhilip Yang kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work); 7802249d558SAndrew Lewycky 7812383a767SChristian König spin_unlock_irqrestore(&kfd->interrupt_lock, flags); 7824a488a7aSOded Gabbay } 7836e81090bSOded Gabbay 7846b95e797SFelix Kuehling int kgd2kfd_quiesce_mm(struct mm_struct *mm) 7856b95e797SFelix Kuehling { 7866b95e797SFelix Kuehling struct kfd_process *p; 7876b95e797SFelix Kuehling int r; 7886b95e797SFelix Kuehling 7896b95e797SFelix Kuehling /* Because we are called from arbitrary context (workqueue) as opposed 7906b95e797SFelix Kuehling * to process context, kfd_process could attempt to exit while we are 7916b95e797SFelix Kuehling * running so the lookup function increments the process ref count. 7926b95e797SFelix Kuehling */ 7936b95e797SFelix Kuehling p = kfd_lookup_process_by_mm(mm); 7946b95e797SFelix Kuehling if (!p) 7956b95e797SFelix Kuehling return -ESRCH; 7966b95e797SFelix Kuehling 797b2057956SFelix Kuehling WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); 7986b95e797SFelix Kuehling r = kfd_process_evict_queues(p); 7996b95e797SFelix Kuehling 8006b95e797SFelix Kuehling kfd_unref_process(p); 8016b95e797SFelix Kuehling return r; 8026b95e797SFelix Kuehling } 8036b95e797SFelix Kuehling 8046b95e797SFelix Kuehling int kgd2kfd_resume_mm(struct mm_struct *mm) 8056b95e797SFelix Kuehling { 8066b95e797SFelix Kuehling struct kfd_process *p; 8076b95e797SFelix Kuehling int r; 8086b95e797SFelix Kuehling 8096b95e797SFelix Kuehling /* Because we are called from arbitrary context (workqueue) as opposed 8106b95e797SFelix Kuehling * to process context, kfd_process could attempt to exit while we are 8116b95e797SFelix Kuehling * running so the lookup function increments the process ref count. 8126b95e797SFelix Kuehling */ 8136b95e797SFelix Kuehling p = kfd_lookup_process_by_mm(mm); 8146b95e797SFelix Kuehling if (!p) 8156b95e797SFelix Kuehling return -ESRCH; 8166b95e797SFelix Kuehling 8176b95e797SFelix Kuehling r = kfd_process_restore_queues(p); 8186b95e797SFelix Kuehling 8196b95e797SFelix Kuehling kfd_unref_process(p); 8206b95e797SFelix Kuehling return r; 8216b95e797SFelix Kuehling } 8226b95e797SFelix Kuehling 82326103436SFelix Kuehling /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will 82426103436SFelix Kuehling * prepare for safe eviction of KFD BOs that belong to the specified 82526103436SFelix Kuehling * process. 82626103436SFelix Kuehling * 82726103436SFelix Kuehling * @mm: mm_struct that identifies the specified KFD process 82826103436SFelix Kuehling * @fence: eviction fence attached to KFD process BOs 82926103436SFelix Kuehling * 83026103436SFelix Kuehling */ 83126103436SFelix Kuehling int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 83226103436SFelix Kuehling struct dma_fence *fence) 83326103436SFelix Kuehling { 83426103436SFelix Kuehling struct kfd_process *p; 83526103436SFelix Kuehling unsigned long active_time; 83626103436SFelix Kuehling unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS); 83726103436SFelix Kuehling 83826103436SFelix Kuehling if (!fence) 83926103436SFelix Kuehling return -EINVAL; 84026103436SFelix Kuehling 84126103436SFelix Kuehling if (dma_fence_is_signaled(fence)) 84226103436SFelix Kuehling return 0; 84326103436SFelix Kuehling 84426103436SFelix Kuehling p = kfd_lookup_process_by_mm(mm); 84526103436SFelix Kuehling if (!p) 84626103436SFelix Kuehling return -ENODEV; 84726103436SFelix Kuehling 84826103436SFelix Kuehling if (fence->seqno == p->last_eviction_seqno) 84926103436SFelix Kuehling goto out; 85026103436SFelix Kuehling 85126103436SFelix Kuehling p->last_eviction_seqno = fence->seqno; 85226103436SFelix Kuehling 85326103436SFelix Kuehling /* Avoid KFD process starvation. Wait for at least 85426103436SFelix Kuehling * PROCESS_ACTIVE_TIME_MS before evicting the process again 85526103436SFelix Kuehling */ 85626103436SFelix Kuehling active_time = get_jiffies_64() - p->last_restore_timestamp; 85726103436SFelix Kuehling if (delay_jiffies > active_time) 85826103436SFelix Kuehling delay_jiffies -= active_time; 85926103436SFelix Kuehling else 86026103436SFelix Kuehling delay_jiffies = 0; 86126103436SFelix Kuehling 86226103436SFelix Kuehling /* During process initialization eviction_work.dwork is initialized 86326103436SFelix Kuehling * to kfd_evict_bo_worker 86426103436SFelix Kuehling */ 865b2057956SFelix Kuehling WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies", 866b2057956SFelix Kuehling p->lead_thread->pid, delay_jiffies); 86726103436SFelix Kuehling schedule_delayed_work(&p->eviction_work, delay_jiffies); 86826103436SFelix Kuehling out: 86926103436SFelix Kuehling kfd_unref_process(p); 87026103436SFelix Kuehling return 0; 87126103436SFelix Kuehling } 87226103436SFelix Kuehling 8736e81090bSOded Gabbay static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 8746e81090bSOded Gabbay unsigned int chunk_size) 8756e81090bSOded Gabbay { 8768625ff9cSFelix Kuehling unsigned int num_of_longs; 8776e81090bSOded Gabbay 87832fa8219SFelix Kuehling if (WARN_ON(buf_size < chunk_size)) 87932fa8219SFelix Kuehling return -EINVAL; 88032fa8219SFelix Kuehling if (WARN_ON(buf_size == 0)) 88132fa8219SFelix Kuehling return -EINVAL; 88232fa8219SFelix Kuehling if (WARN_ON(chunk_size == 0)) 88332fa8219SFelix Kuehling return -EINVAL; 8846e81090bSOded Gabbay 8856e81090bSOded Gabbay kfd->gtt_sa_chunk_size = chunk_size; 8866e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; 8876e81090bSOded Gabbay 8888625ff9cSFelix Kuehling num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) / 8898625ff9cSFelix Kuehling BITS_PER_LONG; 8906e81090bSOded Gabbay 8918625ff9cSFelix Kuehling kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL); 8926e81090bSOded Gabbay 8936e81090bSOded Gabbay if (!kfd->gtt_sa_bitmap) 8946e81090bSOded Gabbay return -ENOMEM; 8956e81090bSOded Gabbay 89679775b62SKent Russell pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", 8976e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); 8986e81090bSOded Gabbay 8996e81090bSOded Gabbay mutex_init(&kfd->gtt_sa_lock); 9006e81090bSOded Gabbay 9016e81090bSOded Gabbay return 0; 9026e81090bSOded Gabbay 9036e81090bSOded Gabbay } 9046e81090bSOded Gabbay 9056e81090bSOded Gabbay static void kfd_gtt_sa_fini(struct kfd_dev *kfd) 9066e81090bSOded Gabbay { 9076e81090bSOded Gabbay mutex_destroy(&kfd->gtt_sa_lock); 9086e81090bSOded Gabbay kfree(kfd->gtt_sa_bitmap); 9096e81090bSOded Gabbay } 9106e81090bSOded Gabbay 9116e81090bSOded Gabbay static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr, 9126e81090bSOded Gabbay unsigned int bit_num, 9136e81090bSOded Gabbay unsigned int chunk_size) 9146e81090bSOded Gabbay { 9156e81090bSOded Gabbay return start_addr + bit_num * chunk_size; 9166e81090bSOded Gabbay } 9176e81090bSOded Gabbay 9186e81090bSOded Gabbay static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, 9196e81090bSOded Gabbay unsigned int bit_num, 9206e81090bSOded Gabbay unsigned int chunk_size) 9216e81090bSOded Gabbay { 9226e81090bSOded Gabbay return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); 9236e81090bSOded Gabbay } 9246e81090bSOded Gabbay 9256e81090bSOded Gabbay int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 9266e81090bSOded Gabbay struct kfd_mem_obj **mem_obj) 9276e81090bSOded Gabbay { 9286e81090bSOded Gabbay unsigned int found, start_search, cur_size; 9296e81090bSOded Gabbay 9306e81090bSOded Gabbay if (size == 0) 9316e81090bSOded Gabbay return -EINVAL; 9326e81090bSOded Gabbay 9336e81090bSOded Gabbay if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) 9346e81090bSOded Gabbay return -ENOMEM; 9356e81090bSOded Gabbay 9361cd106ecSFelix Kuehling *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); 9371cd106ecSFelix Kuehling if (!(*mem_obj)) 9386e81090bSOded Gabbay return -ENOMEM; 9396e81090bSOded Gabbay 94079775b62SKent Russell pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size); 9416e81090bSOded Gabbay 9426e81090bSOded Gabbay start_search = 0; 9436e81090bSOded Gabbay 9446e81090bSOded Gabbay mutex_lock(&kfd->gtt_sa_lock); 9456e81090bSOded Gabbay 9466e81090bSOded Gabbay kfd_gtt_restart_search: 9476e81090bSOded Gabbay /* Find the first chunk that is free */ 9486e81090bSOded Gabbay found = find_next_zero_bit(kfd->gtt_sa_bitmap, 9496e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks, 9506e81090bSOded Gabbay start_search); 9516e81090bSOded Gabbay 95279775b62SKent Russell pr_debug("Found = %d\n", found); 9536e81090bSOded Gabbay 9546e81090bSOded Gabbay /* If there wasn't any free chunk, bail out */ 9556e81090bSOded Gabbay if (found == kfd->gtt_sa_num_of_chunks) 9566e81090bSOded Gabbay goto kfd_gtt_no_free_chunk; 9576e81090bSOded Gabbay 9586e81090bSOded Gabbay /* Update fields of mem_obj */ 9596e81090bSOded Gabbay (*mem_obj)->range_start = found; 9606e81090bSOded Gabbay (*mem_obj)->range_end = found; 9616e81090bSOded Gabbay (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr( 9626e81090bSOded Gabbay kfd->gtt_start_gpu_addr, 9636e81090bSOded Gabbay found, 9646e81090bSOded Gabbay kfd->gtt_sa_chunk_size); 9656e81090bSOded Gabbay (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr( 9666e81090bSOded Gabbay kfd->gtt_start_cpu_ptr, 9676e81090bSOded Gabbay found, 9686e81090bSOded Gabbay kfd->gtt_sa_chunk_size); 9696e81090bSOded Gabbay 97079775b62SKent Russell pr_debug("gpu_addr = %p, cpu_addr = %p\n", 9716e81090bSOded Gabbay (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); 9726e81090bSOded Gabbay 9736e81090bSOded Gabbay /* If we need only one chunk, mark it as allocated and get out */ 9746e81090bSOded Gabbay if (size <= kfd->gtt_sa_chunk_size) { 97579775b62SKent Russell pr_debug("Single bit\n"); 9766e81090bSOded Gabbay set_bit(found, kfd->gtt_sa_bitmap); 9776e81090bSOded Gabbay goto kfd_gtt_out; 9786e81090bSOded Gabbay } 9796e81090bSOded Gabbay 9806e81090bSOded Gabbay /* Otherwise, try to see if we have enough contiguous chunks */ 9816e81090bSOded Gabbay cur_size = size - kfd->gtt_sa_chunk_size; 9826e81090bSOded Gabbay do { 9836e81090bSOded Gabbay (*mem_obj)->range_end = 9846e81090bSOded Gabbay find_next_zero_bit(kfd->gtt_sa_bitmap, 9856e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks, ++found); 9866e81090bSOded Gabbay /* 9876e81090bSOded Gabbay * If next free chunk is not contiguous than we need to 9886e81090bSOded Gabbay * restart our search from the last free chunk we found (which 9896e81090bSOded Gabbay * wasn't contiguous to the previous ones 9906e81090bSOded Gabbay */ 9916e81090bSOded Gabbay if ((*mem_obj)->range_end != found) { 9926e81090bSOded Gabbay start_search = found; 9936e81090bSOded Gabbay goto kfd_gtt_restart_search; 9946e81090bSOded Gabbay } 9956e81090bSOded Gabbay 9966e81090bSOded Gabbay /* 9976e81090bSOded Gabbay * If we reached end of buffer, bail out with error 9986e81090bSOded Gabbay */ 9996e81090bSOded Gabbay if (found == kfd->gtt_sa_num_of_chunks) 10006e81090bSOded Gabbay goto kfd_gtt_no_free_chunk; 10016e81090bSOded Gabbay 10026e81090bSOded Gabbay /* Check if we don't need another chunk */ 10036e81090bSOded Gabbay if (cur_size <= kfd->gtt_sa_chunk_size) 10046e81090bSOded Gabbay cur_size = 0; 10056e81090bSOded Gabbay else 10066e81090bSOded Gabbay cur_size -= kfd->gtt_sa_chunk_size; 10076e81090bSOded Gabbay 10086e81090bSOded Gabbay } while (cur_size > 0); 10096e81090bSOded Gabbay 101079775b62SKent Russell pr_debug("range_start = %d, range_end = %d\n", 10116e81090bSOded Gabbay (*mem_obj)->range_start, (*mem_obj)->range_end); 10126e81090bSOded Gabbay 10136e81090bSOded Gabbay /* Mark the chunks as allocated */ 10146e81090bSOded Gabbay for (found = (*mem_obj)->range_start; 10156e81090bSOded Gabbay found <= (*mem_obj)->range_end; 10166e81090bSOded Gabbay found++) 10176e81090bSOded Gabbay set_bit(found, kfd->gtt_sa_bitmap); 10186e81090bSOded Gabbay 10196e81090bSOded Gabbay kfd_gtt_out: 10206e81090bSOded Gabbay mutex_unlock(&kfd->gtt_sa_lock); 10216e81090bSOded Gabbay return 0; 10226e81090bSOded Gabbay 10236e81090bSOded Gabbay kfd_gtt_no_free_chunk: 10243148a6a0SJack Zhang pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); 10256e81090bSOded Gabbay mutex_unlock(&kfd->gtt_sa_lock); 10263148a6a0SJack Zhang kfree(*mem_obj); 10276e81090bSOded Gabbay return -ENOMEM; 10286e81090bSOded Gabbay } 10296e81090bSOded Gabbay 10306e81090bSOded Gabbay int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) 10316e81090bSOded Gabbay { 10326e81090bSOded Gabbay unsigned int bit; 10336e81090bSOded Gabbay 10349216ed29SOded Gabbay /* Act like kfree when trying to free a NULL object */ 10359216ed29SOded Gabbay if (!mem_obj) 10369216ed29SOded Gabbay return 0; 10376e81090bSOded Gabbay 103879775b62SKent Russell pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n", 10396e81090bSOded Gabbay mem_obj, mem_obj->range_start, mem_obj->range_end); 10406e81090bSOded Gabbay 10416e81090bSOded Gabbay mutex_lock(&kfd->gtt_sa_lock); 10426e81090bSOded Gabbay 10436e81090bSOded Gabbay /* Mark the chunks as free */ 10446e81090bSOded Gabbay for (bit = mem_obj->range_start; 10456e81090bSOded Gabbay bit <= mem_obj->range_end; 10466e81090bSOded Gabbay bit++) 10476e81090bSOded Gabbay clear_bit(bit, kfd->gtt_sa_bitmap); 10486e81090bSOded Gabbay 10496e81090bSOded Gabbay mutex_unlock(&kfd->gtt_sa_lock); 10506e81090bSOded Gabbay 10516e81090bSOded Gabbay kfree(mem_obj); 10526e81090bSOded Gabbay return 0; 10536e81090bSOded Gabbay } 1054a29ec470SShaoyun Liu 10559b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) 10569b54d201SEric Huang { 10579b54d201SEric Huang if (kfd) 10589b54d201SEric Huang atomic_inc(&kfd->sram_ecc_flag); 10599b54d201SEric Huang } 10609b54d201SEric Huang 106143d8107fSHarish Kasiviswanathan void kfd_inc_compute_active(struct kfd_dev *kfd) 106243d8107fSHarish Kasiviswanathan { 106343d8107fSHarish Kasiviswanathan if (atomic_inc_return(&kfd->compute_profile) == 1) 10646bfc7c7eSGraham Sider amdgpu_amdkfd_set_compute_idle(kfd->adev, false); 106543d8107fSHarish Kasiviswanathan } 106643d8107fSHarish Kasiviswanathan 106743d8107fSHarish Kasiviswanathan void kfd_dec_compute_active(struct kfd_dev *kfd) 106843d8107fSHarish Kasiviswanathan { 106943d8107fSHarish Kasiviswanathan int count = atomic_dec_return(&kfd->compute_profile); 107043d8107fSHarish Kasiviswanathan 107143d8107fSHarish Kasiviswanathan if (count == 0) 10726bfc7c7eSGraham Sider amdgpu_amdkfd_set_compute_idle(kfd->adev, true); 107343d8107fSHarish Kasiviswanathan WARN_ONCE(count < 0, "Compute profile ref. count error"); 107443d8107fSHarish Kasiviswanathan } 107543d8107fSHarish Kasiviswanathan 1076410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) 10772c2b0d88SMukul Joshi { 1078158fc08dSAmber Lin if (kfd && kfd->init_complete) 10792c2b0d88SMukul Joshi kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); 10802c2b0d88SMukul Joshi } 10812c2b0d88SMukul Joshi 1082ee2f17f4SAmber Lin /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and 1083ee2f17f4SAmber Lin * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA. 1084ee2f17f4SAmber Lin * When the device has more than two engines, we reserve two for PCIe to enable 1085ee2f17f4SAmber Lin * full-duplex and the rest are used as XGMI. 1086ee2f17f4SAmber Lin */ 1087ee2f17f4SAmber Lin unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) 1088ee2f17f4SAmber Lin { 1089ee2f17f4SAmber Lin /* If XGMI is not supported, all SDMA engines are PCIe */ 1090ee2f17f4SAmber Lin if (!kdev->adev->gmc.xgmi.supported) 1091ee2f17f4SAmber Lin return kdev->adev->sdma.num_instances; 1092ee2f17f4SAmber Lin 1093ee2f17f4SAmber Lin return min(kdev->adev->sdma.num_instances, 2); 1094ee2f17f4SAmber Lin } 1095ee2f17f4SAmber Lin 1096ee2f17f4SAmber Lin unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) 1097ee2f17f4SAmber Lin { 1098ee2f17f4SAmber Lin /* After reserved for PCIe, the rest of engines are XGMI */ 1099ee2f17f4SAmber Lin return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); 1100ee2f17f4SAmber Lin } 1101ee2f17f4SAmber Lin 1102a29ec470SShaoyun Liu #if defined(CONFIG_DEBUG_FS) 1103a29ec470SShaoyun Liu 1104a29ec470SShaoyun Liu /* This function will send a package to HIQ to hang the HWS 1105a29ec470SShaoyun Liu * which will trigger a GPU reset and bring the HWS back to normal state 1106a29ec470SShaoyun Liu */ 1107a29ec470SShaoyun Liu int kfd_debugfs_hang_hws(struct kfd_dev *dev) 1108a29ec470SShaoyun Liu { 1109a29ec470SShaoyun Liu if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { 1110a29ec470SShaoyun Liu pr_err("HWS is not enabled"); 1111a29ec470SShaoyun Liu return -EINVAL; 1112a29ec470SShaoyun Liu } 1113a29ec470SShaoyun Liu 11144f942aaeSOak Zeng return dqm_debugfs_hang_hws(dev->dqm); 1115a29ec470SShaoyun Liu } 1116a29ec470SShaoyun Liu 1117a29ec470SShaoyun Liu #endif 1118