1d87f36a0SRajneesh Bhardwaj // SPDX-License-Identifier: GPL-2.0 OR MIT 24a488a7aSOded Gabbay /* 3d87f36a0SRajneesh Bhardwaj * Copyright 2014-2022 Advanced Micro Devices, Inc. 44a488a7aSOded Gabbay * 54a488a7aSOded Gabbay * Permission is hereby granted, free of charge, to any person obtaining a 64a488a7aSOded Gabbay * copy of this software and associated documentation files (the "Software"), 74a488a7aSOded Gabbay * to deal in the Software without restriction, including without limitation 84a488a7aSOded Gabbay * the rights to use, copy, modify, merge, publish, distribute, sublicense, 94a488a7aSOded Gabbay * and/or sell copies of the Software, and to permit persons to whom the 104a488a7aSOded Gabbay * Software is furnished to do so, subject to the following conditions: 114a488a7aSOded Gabbay * 124a488a7aSOded Gabbay * The above copyright notice and this permission notice shall be included in 134a488a7aSOded Gabbay * all copies or substantial portions of the Software. 144a488a7aSOded Gabbay * 154a488a7aSOded Gabbay * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 164a488a7aSOded Gabbay * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 174a488a7aSOded Gabbay * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 184a488a7aSOded Gabbay * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 194a488a7aSOded Gabbay * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 204a488a7aSOded Gabbay * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 214a488a7aSOded Gabbay * OTHER DEALINGS IN THE SOFTWARE. 224a488a7aSOded Gabbay */ 234a488a7aSOded Gabbay 244a488a7aSOded Gabbay #include <linux/bsearch.h> 254a488a7aSOded Gabbay #include <linux/pci.h> 264a488a7aSOded Gabbay #include <linux/slab.h> 274a488a7aSOded Gabbay #include "kfd_priv.h" 2864c7f8cfSBen Goz #include "kfd_device_queue_manager.h" 29507968ddSFelix Kuehling #include "kfd_pm4_headers_vi.h" 30fd6a440eSJonathan Kim #include "kfd_pm4_headers_aldebaran.h" 310db54b24SYong Zhao #include "cwsr_trap_handler.h" 3264d1c3a4SFelix Kuehling #include "kfd_iommu.h" 335b87245fSAmber Lin #include "amdgpu_amdkfd.h" 342c2b0d88SMukul Joshi #include "kfd_smi_events.h" 35814ab993SPhilip Yang #include "kfd_migrate.h" 365b983db8SAlex Deucher #include "amdgpu.h" 374a488a7aSOded Gabbay 3819f6d2a6SOded Gabbay #define MQD_SIZE_ALIGNED 768 39e42051d2SShaoyun Liu 40e42051d2SShaoyun Liu /* 41e42051d2SShaoyun Liu * kfd_locked is used to lock the kfd driver during suspend or reset 42e42051d2SShaoyun Liu * once locked, kfd driver will stop any further GPU execution. 43e42051d2SShaoyun Liu * create process (open) will return -EAGAIN. 44e42051d2SShaoyun Liu */ 45e42051d2SShaoyun Liu static atomic_t kfd_locked = ATOMIC_INIT(0); 4619f6d2a6SOded Gabbay 47a3e520a2SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK 48e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v7_kfd2kgd; 49a3e520a2SAlex Deucher #endif 50e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v8_kfd2kgd; 51e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v9_kfd2kgd; 52e392c887SYong Zhao extern const struct kfd2kgd_calls arcturus_kfd2kgd; 535073506cSJonathan Kim extern const struct kfd2kgd_calls aldebaran_kfd2kgd; 54e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; 553a2f0c81SYong Zhao extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; 56cc009e61SMukul Joshi extern const struct kfd2kgd_calls gfx_v11_kfd2kgd; 57e392c887SYong Zhao 586e81090bSOded Gabbay static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 596e81090bSOded Gabbay unsigned int chunk_size); 606e81090bSOded Gabbay static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 616e81090bSOded Gabbay 62b8935a7cSYong Zhao static int kfd_resume(struct kfd_dev *kfd); 63b8935a7cSYong Zhao 64cc009e61SMukul Joshi static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) 65f89c6bf7SGuchun Chen { 66f89c6bf7SGuchun Chen uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0]; 67f89c6bf7SGuchun Chen 68f89c6bf7SGuchun Chen switch (sdma_version) { 69f89c6bf7SGuchun Chen case IP_VERSION(4, 0, 0):/* VEGA10 */ 70f89c6bf7SGuchun Chen case IP_VERSION(4, 0, 1):/* VEGA12 */ 71f89c6bf7SGuchun Chen case IP_VERSION(4, 1, 0):/* RAVEN */ 72f89c6bf7SGuchun Chen case IP_VERSION(4, 1, 1):/* RAVEN */ 735eb877b2SKent Russell case IP_VERSION(4, 1, 2):/* RENOIR */ 74f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 1):/* VANGOGH */ 75f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ 767c4f4f19SMario Limonciello case IP_VERSION(5, 2, 6):/* GC 10.3.6 */ 777c4f4f19SMario Limonciello case IP_VERSION(5, 2, 7):/* GC 10.3.7 */ 78f89c6bf7SGuchun Chen kfd->device_info.num_sdma_queues_per_engine = 2; 79f89c6bf7SGuchun Chen break; 80f89c6bf7SGuchun Chen case IP_VERSION(4, 2, 0):/* VEGA20 */ 815eb877b2SKent Russell case IP_VERSION(4, 2, 2):/* ARCTURUS */ 82f89c6bf7SGuchun Chen case IP_VERSION(4, 4, 0):/* ALDEBARAN */ 83f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 0):/* NAVI10 */ 84f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ 85f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 2):/* NAVI14 */ 86f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 5):/* NAVI12 */ 87f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */ 885eb877b2SKent Russell case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */ 89f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ 90f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ 91cc009e61SMukul Joshi case IP_VERSION(6, 0, 0): 92efb4fd10SYifan Zhang case IP_VERSION(6, 0, 1): 9322dd871eSEric Huang case IP_VERSION(6, 0, 2): 945ddb5fe9SDavid Belanger case IP_VERSION(6, 0, 3): 95f89c6bf7SGuchun Chen kfd->device_info.num_sdma_queues_per_engine = 8; 96f89c6bf7SGuchun Chen break; 97f89c6bf7SGuchun Chen default: 98f89c6bf7SGuchun Chen dev_warn(kfd_device, 9920c5e425SGraham Sider "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n", 100f89c6bf7SGuchun Chen sdma_version); 101f89c6bf7SGuchun Chen kfd->device_info.num_sdma_queues_per_engine = 8; 102f89c6bf7SGuchun Chen } 103cc009e61SMukul Joshi 104cc009e61SMukul Joshi switch (sdma_version) { 105cc009e61SMukul Joshi case IP_VERSION(6, 0, 0): 10622dd871eSEric Huang case IP_VERSION(6, 0, 2): 1075ddb5fe9SDavid Belanger case IP_VERSION(6, 0, 3): 108cc009e61SMukul Joshi /* Reserve 1 for paging and 1 for gfx */ 109cc009e61SMukul Joshi kfd->device_info.num_reserved_sdma_queues_per_engine = 2; 110cc009e61SMukul Joshi /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */ 111cc009e61SMukul Joshi kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL; 112cc009e61SMukul Joshi break; 113e48e6a13SYifan Zhang case IP_VERSION(6, 0, 1): 114e48e6a13SYifan Zhang /* Reserve 1 for paging and 1 for gfx */ 115e48e6a13SYifan Zhang kfd->device_info.num_reserved_sdma_queues_per_engine = 2; 116e48e6a13SYifan Zhang /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */ 117e48e6a13SYifan Zhang kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL; 118e48e6a13SYifan Zhang break; 119cc009e61SMukul Joshi default: 120cc009e61SMukul Joshi break; 121cc009e61SMukul Joshi } 122f89c6bf7SGuchun Chen } 123f89c6bf7SGuchun Chen 124f89c6bf7SGuchun Chen static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) 125f89c6bf7SGuchun Chen { 126f89c6bf7SGuchun Chen uint32_t gc_version = KFD_GC_VERSION(kfd); 127f89c6bf7SGuchun Chen 128f89c6bf7SGuchun Chen switch (gc_version) { 129f89c6bf7SGuchun Chen case IP_VERSION(9, 0, 1): /* VEGA10 */ 130f89c6bf7SGuchun Chen case IP_VERSION(9, 1, 0): /* RAVEN */ 131f89c6bf7SGuchun Chen case IP_VERSION(9, 2, 1): /* VEGA12 */ 132f89c6bf7SGuchun Chen case IP_VERSION(9, 2, 2): /* RAVEN */ 133f89c6bf7SGuchun Chen case IP_VERSION(9, 3, 0): /* RENOIR */ 134f89c6bf7SGuchun Chen case IP_VERSION(9, 4, 0): /* VEGA20 */ 135f89c6bf7SGuchun Chen case IP_VERSION(9, 4, 1): /* ARCTURUS */ 136f89c6bf7SGuchun Chen case IP_VERSION(9, 4, 2): /* ALDEBARAN */ 137f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 1): /* VANGOGH */ 138f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ 1397c4f4f19SMario Limonciello case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ 1407c4f4f19SMario Limonciello case IP_VERSION(10, 3, 7): /* GC 10.3.7 */ 141f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */ 142f9ed188dSLang Yu case IP_VERSION(10, 1, 4): 143f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 10): /* NAVI10 */ 144f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 2): /* NAVI12 */ 145f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 1): /* NAVI14 */ 146f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */ 147f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */ 148f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */ 149f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */ 150f89c6bf7SGuchun Chen kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; 151f89c6bf7SGuchun Chen break; 152cc009e61SMukul Joshi case IP_VERSION(11, 0, 0): 15326776a70SHuang Rui case IP_VERSION(11, 0, 1): 154ec661f1cSEric Huang case IP_VERSION(11, 0, 2): 1555ddb5fe9SDavid Belanger case IP_VERSION(11, 0, 3): 156*88c21c2bSYifan Zhang case IP_VERSION(11, 0, 4): 157cc009e61SMukul Joshi kfd->device_info.event_interrupt_class = &event_interrupt_class_v11; 158cc009e61SMukul Joshi break; 159f89c6bf7SGuchun Chen default: 160f89c6bf7SGuchun Chen dev_warn(kfd_device, "v9 event interrupt handler is set due to " 161f89c6bf7SGuchun Chen "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version); 162f89c6bf7SGuchun Chen kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; 163f89c6bf7SGuchun Chen } 164f89c6bf7SGuchun Chen } 165f89c6bf7SGuchun Chen 166f0dc99a6SGraham Sider static void kfd_device_info_init(struct kfd_dev *kfd, 167f0dc99a6SGraham Sider bool vf, uint32_t gfx_target_version) 168f0dc99a6SGraham Sider { 169f0dc99a6SGraham Sider uint32_t gc_version = KFD_GC_VERSION(kfd); 170f0dc99a6SGraham Sider uint32_t asic_type = kfd->adev->asic_type; 171f0dc99a6SGraham Sider 172f0dc99a6SGraham Sider kfd->device_info.max_pasid_bits = 16; 173f0dc99a6SGraham Sider kfd->device_info.max_no_of_hqd = 24; 174f0dc99a6SGraham Sider kfd->device_info.num_of_watch_points = 4; 175f0dc99a6SGraham Sider kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED; 176f0dc99a6SGraham Sider kfd->device_info.gfx_target_version = gfx_target_version; 177f0dc99a6SGraham Sider 178f0dc99a6SGraham Sider if (KFD_IS_SOC15(kfd)) { 179f0dc99a6SGraham Sider kfd->device_info.doorbell_size = 8; 180f0dc99a6SGraham Sider kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t); 181f0dc99a6SGraham Sider kfd->device_info.supports_cwsr = true; 182f0dc99a6SGraham Sider 183cc009e61SMukul Joshi kfd_device_info_set_sdma_info(kfd); 184f89c6bf7SGuchun Chen 185f89c6bf7SGuchun Chen kfd_device_info_set_event_interrupt_class(kfd); 186f0dc99a6SGraham Sider 187f0dc99a6SGraham Sider /* Raven */ 188f0dc99a6SGraham Sider if (gc_version == IP_VERSION(9, 1, 0) || 189f0dc99a6SGraham Sider gc_version == IP_VERSION(9, 2, 2)) 190f0dc99a6SGraham Sider kfd->device_info.needs_iommu_device = true; 191f0dc99a6SGraham Sider 192f0dc99a6SGraham Sider if (gc_version < IP_VERSION(11, 0, 0)) { 193f0dc99a6SGraham Sider /* Navi2x+, Navi1x+ */ 19468e355c0SJesse Zhang if (gc_version == IP_VERSION(10, 3, 6)) 19568e355c0SJesse Zhang kfd->device_info.no_atomic_fw_version = 14; 196c4e85551SPrike Liang else if (gc_version == IP_VERSION(10, 3, 7)) 197c4e85551SPrike Liang kfd->device_info.no_atomic_fw_version = 3; 19868e355c0SJesse Zhang else if (gc_version >= IP_VERSION(10, 3, 0)) 199f0dc99a6SGraham Sider kfd->device_info.no_atomic_fw_version = 92; 20027cc310fSchen gong else if (gc_version >= IP_VERSION(10, 1, 1)) 20127cc310fSchen gong kfd->device_info.no_atomic_fw_version = 145; 202f0dc99a6SGraham Sider 203f0dc99a6SGraham Sider /* Navi1x+ */ 204f0dc99a6SGraham Sider if (gc_version >= IP_VERSION(10, 1, 1)) 205f0dc99a6SGraham Sider kfd->device_info.needs_pci_atomics = true; 206f0dc99a6SGraham Sider } 207f0dc99a6SGraham Sider } else { 208f0dc99a6SGraham Sider kfd->device_info.doorbell_size = 4; 209f0dc99a6SGraham Sider kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t); 210f0dc99a6SGraham Sider kfd->device_info.event_interrupt_class = &event_interrupt_class_cik; 211f0dc99a6SGraham Sider kfd->device_info.num_sdma_queues_per_engine = 2; 212f0dc99a6SGraham Sider 213f0dc99a6SGraham Sider if (asic_type != CHIP_KAVERI && 214f0dc99a6SGraham Sider asic_type != CHIP_HAWAII && 215f0dc99a6SGraham Sider asic_type != CHIP_TONGA) 216f0dc99a6SGraham Sider kfd->device_info.supports_cwsr = true; 217f0dc99a6SGraham Sider 218f0dc99a6SGraham Sider if (asic_type == CHIP_KAVERI || 219f0dc99a6SGraham Sider asic_type == CHIP_CARRIZO) 220f0dc99a6SGraham Sider kfd->device_info.needs_iommu_device = true; 221f0dc99a6SGraham Sider 222f0dc99a6SGraham Sider if (asic_type != CHIP_HAWAII && !vf) 223f0dc99a6SGraham Sider kfd->device_info.needs_pci_atomics = true; 224f0dc99a6SGraham Sider } 225f0dc99a6SGraham Sider } 226f0dc99a6SGraham Sider 227b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) 2284a488a7aSOded Gabbay { 229f0dc99a6SGraham Sider struct kfd_dev *kfd = NULL; 230f0dc99a6SGraham Sider const struct kfd2kgd_calls *f2g = NULL; 231f0dc99a6SGraham Sider uint32_t gfx_target_version = 0; 232050091abSYong Zhao 233c868d584SAlex Deucher switch (adev->asic_type) { 234c868d584SAlex Deucher #ifdef KFD_SUPPORT_IOMMU_V2 235c868d584SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK 236c868d584SAlex Deucher case CHIP_KAVERI: 237f0dc99a6SGraham Sider gfx_target_version = 70000; 238f0dc99a6SGraham Sider if (!vf) 239c868d584SAlex Deucher f2g = &gfx_v7_kfd2kgd; 240c868d584SAlex Deucher break; 241c868d584SAlex Deucher #endif 242c868d584SAlex Deucher case CHIP_CARRIZO: 243f0dc99a6SGraham Sider gfx_target_version = 80001; 244f0dc99a6SGraham Sider if (!vf) 245c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 246c868d584SAlex Deucher break; 247c868d584SAlex Deucher #endif 248c868d584SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK 249c868d584SAlex Deucher case CHIP_HAWAII: 250f0dc99a6SGraham Sider gfx_target_version = 70001; 2510f7ef0b9SFelix Kuehling if (!amdgpu_exp_hw_support) 2520f7ef0b9SFelix Kuehling pr_info( 2530f7ef0b9SFelix Kuehling "KFD support on Hawaii is experimental. See modparam exp_hw_support\n" 2540f7ef0b9SFelix Kuehling ); 2550f7ef0b9SFelix Kuehling else if (!vf) 256c868d584SAlex Deucher f2g = &gfx_v7_kfd2kgd; 257c868d584SAlex Deucher break; 258c868d584SAlex Deucher #endif 259c868d584SAlex Deucher case CHIP_TONGA: 260f0dc99a6SGraham Sider gfx_target_version = 80002; 261f0dc99a6SGraham Sider if (!vf) 262c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 263c868d584SAlex Deucher break; 264c868d584SAlex Deucher case CHIP_FIJI: 265f0dc99a6SGraham Sider gfx_target_version = 80003; 266c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 267c868d584SAlex Deucher break; 268c868d584SAlex Deucher case CHIP_POLARIS10: 269f0dc99a6SGraham Sider gfx_target_version = 80003; 270c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 271c868d584SAlex Deucher break; 272c868d584SAlex Deucher case CHIP_POLARIS11: 273f0dc99a6SGraham Sider gfx_target_version = 80003; 274f0dc99a6SGraham Sider if (!vf) 275c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 276c868d584SAlex Deucher break; 277c868d584SAlex Deucher case CHIP_POLARIS12: 278f0dc99a6SGraham Sider gfx_target_version = 80003; 279f0dc99a6SGraham Sider if (!vf) 280c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 281c868d584SAlex Deucher break; 282c868d584SAlex Deucher case CHIP_VEGAM: 283f0dc99a6SGraham Sider gfx_target_version = 80003; 284f0dc99a6SGraham Sider if (!vf) 285c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 286c868d584SAlex Deucher break; 287c868d584SAlex Deucher default: 288c868d584SAlex Deucher switch (adev->ip_versions[GC_HWIP][0]) { 2892c1f19b3SGraham Sider /* Vega 10 */ 290c868d584SAlex Deucher case IP_VERSION(9, 0, 1): 291f0dc99a6SGraham Sider gfx_target_version = 90000; 292c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 293c868d584SAlex Deucher break; 294c868d584SAlex Deucher #ifdef KFD_SUPPORT_IOMMU_V2 2952c1f19b3SGraham Sider /* Raven */ 296c868d584SAlex Deucher case IP_VERSION(9, 1, 0): 297c868d584SAlex Deucher case IP_VERSION(9, 2, 2): 298f0dc99a6SGraham Sider gfx_target_version = 90002; 299f0dc99a6SGraham Sider if (!vf) 300c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 301c868d584SAlex Deucher break; 302c868d584SAlex Deucher #endif 3032c1f19b3SGraham Sider /* Vega12 */ 304c868d584SAlex Deucher case IP_VERSION(9, 2, 1): 305f0dc99a6SGraham Sider gfx_target_version = 90004; 306f0dc99a6SGraham Sider if (!vf) 307c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 308c868d584SAlex Deucher break; 3092c1f19b3SGraham Sider /* Renoir */ 310c868d584SAlex Deucher case IP_VERSION(9, 3, 0): 311f0dc99a6SGraham Sider gfx_target_version = 90012; 312f0dc99a6SGraham Sider if (!vf) 313c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 314c868d584SAlex Deucher break; 3152c1f19b3SGraham Sider /* Vega20 */ 316c868d584SAlex Deucher case IP_VERSION(9, 4, 0): 317f0dc99a6SGraham Sider gfx_target_version = 90006; 318f0dc99a6SGraham Sider if (!vf) 319c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 320c868d584SAlex Deucher break; 3212c1f19b3SGraham Sider /* Arcturus */ 322c868d584SAlex Deucher case IP_VERSION(9, 4, 1): 323f0dc99a6SGraham Sider gfx_target_version = 90008; 324c868d584SAlex Deucher f2g = &arcturus_kfd2kgd; 325c868d584SAlex Deucher break; 3262c1f19b3SGraham Sider /* Aldebaran */ 327c868d584SAlex Deucher case IP_VERSION(9, 4, 2): 328f0dc99a6SGraham Sider gfx_target_version = 90010; 329c868d584SAlex Deucher f2g = &aldebaran_kfd2kgd; 330c868d584SAlex Deucher break; 3312c1f19b3SGraham Sider /* Navi10 */ 332c868d584SAlex Deucher case IP_VERSION(10, 1, 10): 333f0dc99a6SGraham Sider gfx_target_version = 100100; 334f0dc99a6SGraham Sider if (!vf) 335c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 336c868d584SAlex Deucher break; 3372c1f19b3SGraham Sider /* Navi12 */ 338c868d584SAlex Deucher case IP_VERSION(10, 1, 2): 339f0dc99a6SGraham Sider gfx_target_version = 100101; 340c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 341c868d584SAlex Deucher break; 3422c1f19b3SGraham Sider /* Navi14 */ 343c868d584SAlex Deucher case IP_VERSION(10, 1, 1): 344f0dc99a6SGraham Sider gfx_target_version = 100102; 345f0dc99a6SGraham Sider if (!vf) 346c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 347c868d584SAlex Deucher break; 3482c1f19b3SGraham Sider /* Cyan Skillfish */ 349c868d584SAlex Deucher case IP_VERSION(10, 1, 3): 350f9ed188dSLang Yu case IP_VERSION(10, 1, 4): 351f0dc99a6SGraham Sider gfx_target_version = 100103; 352f0dc99a6SGraham Sider if (!vf) 353c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 354c868d584SAlex Deucher break; 3552c1f19b3SGraham Sider /* Sienna Cichlid */ 356c868d584SAlex Deucher case IP_VERSION(10, 3, 0): 357f0dc99a6SGraham Sider gfx_target_version = 100300; 358c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 359c868d584SAlex Deucher break; 3602c1f19b3SGraham Sider /* Navy Flounder */ 361c868d584SAlex Deucher case IP_VERSION(10, 3, 2): 362f0dc99a6SGraham Sider gfx_target_version = 100301; 363c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 364c868d584SAlex Deucher break; 3652c1f19b3SGraham Sider /* Van Gogh */ 366c868d584SAlex Deucher case IP_VERSION(10, 3, 1): 367f0dc99a6SGraham Sider gfx_target_version = 100303; 368f0dc99a6SGraham Sider if (!vf) 369c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 370c868d584SAlex Deucher break; 3712c1f19b3SGraham Sider /* Dimgrey Cavefish */ 372c868d584SAlex Deucher case IP_VERSION(10, 3, 4): 373f0dc99a6SGraham Sider gfx_target_version = 100302; 374c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 375c868d584SAlex Deucher break; 3762c1f19b3SGraham Sider /* Beige Goby */ 377c868d584SAlex Deucher case IP_VERSION(10, 3, 5): 378f0dc99a6SGraham Sider gfx_target_version = 100304; 379c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 380c868d584SAlex Deucher break; 3812c1f19b3SGraham Sider /* Yellow Carp */ 382c868d584SAlex Deucher case IP_VERSION(10, 3, 3): 383f0dc99a6SGraham Sider gfx_target_version = 100305; 384f0dc99a6SGraham Sider if (!vf) 385c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 386c868d584SAlex Deucher break; 3877c4f4f19SMario Limonciello case IP_VERSION(10, 3, 6): 3887c4f4f19SMario Limonciello case IP_VERSION(10, 3, 7): 3892724efa3SPrike Liang gfx_target_version = 100306; 3907c4f4f19SMario Limonciello if (!vf) 3917c4f4f19SMario Limonciello f2g = &gfx_v10_3_kfd2kgd; 3927c4f4f19SMario Limonciello break; 393cc009e61SMukul Joshi case IP_VERSION(11, 0, 0): 394cc009e61SMukul Joshi gfx_target_version = 110000; 395cc009e61SMukul Joshi f2g = &gfx_v11_kfd2kgd; 396cc009e61SMukul Joshi break; 39726776a70SHuang Rui case IP_VERSION(11, 0, 1): 398*88c21c2bSYifan Zhang case IP_VERSION(11, 0, 4): 39926776a70SHuang Rui gfx_target_version = 110003; 40026776a70SHuang Rui f2g = &gfx_v11_kfd2kgd; 40126776a70SHuang Rui break; 402ec661f1cSEric Huang case IP_VERSION(11, 0, 2): 403ec661f1cSEric Huang gfx_target_version = 110002; 404ec661f1cSEric Huang f2g = &gfx_v11_kfd2kgd; 405ec661f1cSEric Huang break; 4065ddb5fe9SDavid Belanger case IP_VERSION(11, 0, 3): 4075ddb5fe9SDavid Belanger /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */ 4085ddb5fe9SDavid Belanger gfx_target_version = 110001; 4095ddb5fe9SDavid Belanger f2g = &gfx_v11_kfd2kgd; 4105ddb5fe9SDavid Belanger break; 411c868d584SAlex Deucher default: 412f0dc99a6SGraham Sider break; 413050091abSYong Zhao } 414c868d584SAlex Deucher break; 415c868d584SAlex Deucher } 4164a488a7aSOded Gabbay 417f0dc99a6SGraham Sider if (!f2g) { 418e4804a39SGraham Sider if (adev->ip_versions[GC_HWIP][0]) 419e4804a39SGraham Sider dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n", 420e4804a39SGraham Sider adev->ip_versions[GC_HWIP][0], vf ? "VF" : ""); 421e4804a39SGraham Sider else 422050091abSYong Zhao dev_err(kfd_device, "%s %s not supported in kfd\n", 423c868d584SAlex Deucher amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); 4244a488a7aSOded Gabbay return NULL; 4254ebc7182SYong Zhao } 4264a488a7aSOded Gabbay 427d35f00d8SEric Huang kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); 428d35f00d8SEric Huang if (!kfd) 429d35f00d8SEric Huang return NULL; 430d35f00d8SEric Huang 431c6c57446SGraham Sider kfd->adev = adev; 432f0dc99a6SGraham Sider kfd_device_info_init(kfd, vf, gfx_target_version); 43319f6d2a6SOded Gabbay kfd->init_complete = false; 434cea405b1SXihan Zhang kfd->kfd2kgd = f2g; 43543d8107fSHarish Kasiviswanathan atomic_set(&kfd->compute_profile, 0); 436cea405b1SXihan Zhang 437cea405b1SXihan Zhang mutex_init(&kfd->doorbell_mutex); 438cea405b1SXihan Zhang memset(&kfd->doorbell_available_index, 0, 439cea405b1SXihan Zhang sizeof(kfd->doorbell_available_index)); 4404a488a7aSOded Gabbay 4419b54d201SEric Huang atomic_set(&kfd->sram_ecc_flag, 0); 4429b54d201SEric Huang 44359d7115dSMukul Joshi ida_init(&kfd->doorbell_ida); 44459d7115dSMukul Joshi 4454a488a7aSOded Gabbay return kfd; 4464a488a7aSOded Gabbay } 4474a488a7aSOded Gabbay 448373d7080SFelix Kuehling static void kfd_cwsr_init(struct kfd_dev *kfd) 449373d7080SFelix Kuehling { 450f0dc99a6SGraham Sider if (cwsr_enable && kfd->device_info.supports_cwsr) { 451046e674bSGraham Sider if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) { 452373d7080SFelix Kuehling BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); 453373d7080SFelix Kuehling kfd->cwsr_isa = cwsr_trap_gfx8_hex; 454373d7080SFelix Kuehling kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); 455046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) { 4563baa24f0SOak Zeng BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE); 4573baa24f0SOak Zeng kfd->cwsr_isa = cwsr_trap_arcturus_hex; 4583baa24f0SOak Zeng kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex); 459046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) { 4600ef6845cSJay Cornwall BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE); 4610ef6845cSJay Cornwall kfd->cwsr_isa = cwsr_trap_aldebaran_hex; 4620ef6845cSJay Cornwall kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex); 463046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) { 4643e76c239SFelix Kuehling BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); 4653e76c239SFelix Kuehling kfd->cwsr_isa = cwsr_trap_gfx9_hex; 4663e76c239SFelix Kuehling kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); 467046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) { 46880b6cfedSJay Cornwall BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE); 46980b6cfedSJay Cornwall kfd->cwsr_isa = cwsr_trap_nv1x_hex; 47080b6cfedSJay Cornwall kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex); 4716a817038SJay Cornwall } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) { 47214328aa5SPhilip Cox BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE); 47314328aa5SPhilip Cox kfd->cwsr_isa = cwsr_trap_gfx10_hex; 47414328aa5SPhilip Cox kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex); 4756a817038SJay Cornwall } else { 4766a817038SJay Cornwall BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE); 4776a817038SJay Cornwall kfd->cwsr_isa = cwsr_trap_gfx11_hex; 4786a817038SJay Cornwall kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex); 4793e76c239SFelix Kuehling } 4803e76c239SFelix Kuehling 481373d7080SFelix Kuehling kfd->cwsr_enabled = true; 482373d7080SFelix Kuehling } 483373d7080SFelix Kuehling } 484373d7080SFelix Kuehling 48529633d0eSJoseph Greathouse static int kfd_gws_init(struct kfd_dev *kfd) 48629633d0eSJoseph Greathouse { 48729633d0eSJoseph Greathouse int ret = 0; 48829633d0eSJoseph Greathouse 48929633d0eSJoseph Greathouse if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) 49029633d0eSJoseph Greathouse return 0; 49129633d0eSJoseph Greathouse 492046e674bSGraham Sider if (hws_gws_support || (KFD_IS_SOC15(kfd) && 493046e674bSGraham Sider ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) 494046e674bSGraham Sider && kfd->mec2_fw_version >= 0x81b3) || 495046e674bSGraham Sider (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) 496046e674bSGraham Sider && kfd->mec2_fw_version >= 0x1b3) || 497046e674bSGraham Sider (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) 498046e674bSGraham Sider && kfd->mec2_fw_version >= 0x30) || 499046e674bSGraham Sider (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) 500beb15bc1SJonathan Kim && kfd->mec2_fw_version >= 0x28) || 501beb15bc1SJonathan Kim (KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0) 502beb15bc1SJonathan Kim && KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0) 503beb15bc1SJonathan Kim && kfd->mec2_fw_version >= 0x6b)))) 5046bfc7c7eSGraham Sider ret = amdgpu_amdkfd_alloc_gws(kfd->adev, 50502274fc0SGraham Sider kfd->adev->gds.gws_size, &kfd->gws); 50629633d0eSJoseph Greathouse 50729633d0eSJoseph Greathouse return ret; 50829633d0eSJoseph Greathouse } 50929633d0eSJoseph Greathouse 5102243f493SRajneesh Bhardwaj static void kfd_smi_init(struct kfd_dev *dev) 5112243f493SRajneesh Bhardwaj { 512938a0650SAmber Lin INIT_LIST_HEAD(&dev->smi_clients); 513938a0650SAmber Lin spin_lock_init(&dev->smi_lock); 514938a0650SAmber Lin } 515938a0650SAmber Lin 5164a488a7aSOded Gabbay bool kgd2kfd_device_init(struct kfd_dev *kfd, 5174a488a7aSOded Gabbay const struct kgd2kfd_shared_resources *gpu_resources) 5184a488a7aSOded Gabbay { 519fd6a440eSJonathan Kim unsigned int size, map_process_packet_size; 52019f6d2a6SOded Gabbay 521574c4183SGraham Sider kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 5225ade6c9cSFelix Kuehling KGD_ENGINE_MEC1); 523574c4183SGraham Sider kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 52429633d0eSJoseph Greathouse KGD_ENGINE_MEC2); 525574c4183SGraham Sider kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 5265ade6c9cSFelix Kuehling KGD_ENGINE_SDMA1); 5274a488a7aSOded Gabbay kfd->shared_resources = *gpu_resources; 5284a488a7aSOded Gabbay 52944008d7aSYong Zhao kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; 53044008d7aSYong Zhao kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; 53144008d7aSYong Zhao kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd 53244008d7aSYong Zhao - kfd->vm_info.first_vmid_kfd + 1; 53344008d7aSYong Zhao 534e312af6cSFelix Kuehling /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. 535e312af6cSFelix Kuehling * 32 and 64-bit requests are possible and must be 536e312af6cSFelix Kuehling * supported. 537e312af6cSFelix Kuehling */ 5386bfc7c7eSGraham Sider kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev); 539e312af6cSFelix Kuehling if (!kfd->pci_atomic_requested && 540f0dc99a6SGraham Sider kfd->device_info.needs_pci_atomics && 541f0dc99a6SGraham Sider (!kfd->device_info.no_atomic_fw_version || 542f0dc99a6SGraham Sider kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) { 543e312af6cSFelix Kuehling dev_info(kfd_device, 544e312af6cSFelix Kuehling "skipped device %x:%x, PCI rejects atomics %d<%d\n", 545d69a3b76SMukul Joshi kfd->adev->pdev->vendor, kfd->adev->pdev->device, 546e312af6cSFelix Kuehling kfd->mec_fw_version, 547f0dc99a6SGraham Sider kfd->device_info.no_atomic_fw_version); 548e312af6cSFelix Kuehling return false; 549e312af6cSFelix Kuehling } 550e312af6cSFelix Kuehling 551a99c6d4fSFelix Kuehling /* Verify module parameters regarding mapped process number*/ 552b7dfbd2eSTushar Patel if (hws_max_conc_proc >= 0) 553b7dfbd2eSTushar Patel kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); 554b7dfbd2eSTushar Patel else 555a99c6d4fSFelix Kuehling kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; 556a99c6d4fSFelix Kuehling 55719f6d2a6SOded Gabbay /* calculate max size of mqds needed for queues */ 558b8cbab04SOded Gabbay size = max_num_of_queues_per_device * 559f0dc99a6SGraham Sider kfd->device_info.mqd_size_aligned; 56019f6d2a6SOded Gabbay 561e18e794eSOded Gabbay /* 562e18e794eSOded Gabbay * calculate max size of runlist packet. 563e18e794eSOded Gabbay * There can be only 2 packets at once 564e18e794eSOded Gabbay */ 565046e674bSGraham Sider map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ? 566fd6a440eSJonathan Kim sizeof(struct pm4_mes_map_process_aldebaran) : 567fd6a440eSJonathan Kim sizeof(struct pm4_mes_map_process); 568fd6a440eSJonathan Kim size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size + 569507968ddSFelix Kuehling max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) 570507968ddSFelix Kuehling + sizeof(struct pm4_mes_runlist)) * 2; 571e18e794eSOded Gabbay 572e18e794eSOded Gabbay /* Add size of HIQ & DIQ */ 573e18e794eSOded Gabbay size += KFD_KERNEL_QUEUE_SIZE * 2; 574e18e794eSOded Gabbay 575e18e794eSOded Gabbay /* add another 512KB for all other allocations on gart (HPD, fences) */ 57619f6d2a6SOded Gabbay size += 512 * 1024; 57719f6d2a6SOded Gabbay 5787cd52c91SAmber Lin if (amdgpu_amdkfd_alloc_gtt_mem( 5796bfc7c7eSGraham Sider kfd->adev, size, &kfd->gtt_mem, 58015426dbbSYong Zhao &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, 58115426dbbSYong Zhao false)) { 58279775b62SKent Russell dev_err(kfd_device, "Could not allocate %d bytes\n", size); 583e09d4fc8SOak Zeng goto alloc_gtt_mem_failure; 58419f6d2a6SOded Gabbay } 58519f6d2a6SOded Gabbay 58679775b62SKent Russell dev_info(kfd_device, "Allocated %d bytes on gart\n", size); 587e18e794eSOded Gabbay 58873a1da0bSOded Gabbay /* Initialize GTT sa with 512 byte chunk size */ 58973a1da0bSOded Gabbay if (kfd_gtt_sa_init(kfd, size, 512) != 0) { 59079775b62SKent Russell dev_err(kfd_device, "Error initializing gtt sub-allocator\n"); 59173a1da0bSOded Gabbay goto kfd_gtt_sa_init_error; 59273a1da0bSOded Gabbay } 59373a1da0bSOded Gabbay 594735df2baSFelix Kuehling if (kfd_doorbell_init(kfd)) { 595735df2baSFelix Kuehling dev_err(kfd_device, 596735df2baSFelix Kuehling "Error initializing doorbell aperture\n"); 597735df2baSFelix Kuehling goto kfd_doorbell_error; 598735df2baSFelix Kuehling } 59919f6d2a6SOded Gabbay 600c5650327SDivya Shikre if (amdgpu_use_xgmi_p2p) 60102274fc0SGraham Sider kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; 6020c1690e3SShaoyun Liu 60302274fc0SGraham Sider kfd->noretry = kfd->adev->gmc.noretry; 6049b498efaSAlex Deucher 6052249d558SAndrew Lewycky if (kfd_interrupt_init(kfd)) { 60679775b62SKent Russell dev_err(kfd_device, "Error initializing interrupts\n"); 6072249d558SAndrew Lewycky goto kfd_interrupt_error; 6082249d558SAndrew Lewycky } 6092249d558SAndrew Lewycky 61064c7f8cfSBen Goz kfd->dqm = device_queue_manager_init(kfd); 61164c7f8cfSBen Goz if (!kfd->dqm) { 61279775b62SKent Russell dev_err(kfd_device, "Error initializing queue manager\n"); 61364c7f8cfSBen Goz goto device_queue_manager_error; 61464c7f8cfSBen Goz } 61564c7f8cfSBen Goz 61629633d0eSJoseph Greathouse /* If supported on this device, allocate global GWS that is shared 61729633d0eSJoseph Greathouse * by all KFD processes 61829633d0eSJoseph Greathouse */ 61929633d0eSJoseph Greathouse if (kfd_gws_init(kfd)) { 62029633d0eSJoseph Greathouse dev_err(kfd_device, "Could not allocate %d gws\n", 62102274fc0SGraham Sider kfd->adev->gds.gws_size); 62229633d0eSJoseph Greathouse goto gws_error; 62329633d0eSJoseph Greathouse } 62429633d0eSJoseph Greathouse 6256127896fSHuang Rui /* If CRAT is broken, won't set iommu enabled */ 6266127896fSHuang Rui kfd_double_confirm_iommu_support(kfd); 6276127896fSHuang Rui 62864d1c3a4SFelix Kuehling if (kfd_iommu_device_init(kfd)) { 6296f4b590aSYifan Zhang kfd->use_iommu_v2 = false; 63064d1c3a4SFelix Kuehling dev_err(kfd_device, "Error initializing iommuv2\n"); 63164d1c3a4SFelix Kuehling goto device_iommu_error; 63264c7f8cfSBen Goz } 63364c7f8cfSBen Goz 634373d7080SFelix Kuehling kfd_cwsr_init(kfd); 635373d7080SFelix Kuehling 63656c5977eSGraham Sider svm_migrate_init(kfd->adev); 637814ab993SPhilip Yang 638afd18180SYifan Zhang if (kgd2kfd_resume_iommu(kfd)) 639afd18180SYifan Zhang goto device_iommu_error; 640afd18180SYifan Zhang 641b8935a7cSYong Zhao if (kfd_resume(kfd)) 642b8935a7cSYong Zhao goto kfd_resume_error; 643b8935a7cSYong Zhao 644b179fc28SMukul Joshi amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); 645b179fc28SMukul Joshi 646465ab9e0SOak Zeng if (kfd_topology_add_device(kfd)) { 647465ab9e0SOak Zeng dev_err(kfd_device, "Error adding device to topology\n"); 648465ab9e0SOak Zeng goto kfd_topology_add_device_error; 649465ab9e0SOak Zeng } 650465ab9e0SOak Zeng 651938a0650SAmber Lin kfd_smi_init(kfd); 652938a0650SAmber Lin 6534a488a7aSOded Gabbay kfd->init_complete = true; 654d69a3b76SMukul Joshi dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, 655d69a3b76SMukul Joshi kfd->adev->pdev->device); 6564a488a7aSOded Gabbay 65779775b62SKent Russell pr_debug("Starting kfd with the following scheduling policy %d\n", 658d146c5a7SFelix Kuehling kfd->dqm->sched_policy); 65964c7f8cfSBen Goz 66019f6d2a6SOded Gabbay goto out; 66119f6d2a6SOded Gabbay 662465ab9e0SOak Zeng kfd_topology_add_device_error: 663b8935a7cSYong Zhao kfd_resume_error: 66464d1c3a4SFelix Kuehling device_iommu_error: 66529633d0eSJoseph Greathouse gws_error: 66664c7f8cfSBen Goz device_queue_manager_uninit(kfd->dqm); 66764c7f8cfSBen Goz device_queue_manager_error: 6682249d558SAndrew Lewycky kfd_interrupt_exit(kfd); 6692249d558SAndrew Lewycky kfd_interrupt_error: 670735df2baSFelix Kuehling kfd_doorbell_fini(kfd); 671735df2baSFelix Kuehling kfd_doorbell_error: 67273a1da0bSOded Gabbay kfd_gtt_sa_fini(kfd); 67373a1da0bSOded Gabbay kfd_gtt_sa_init_error: 6746bfc7c7eSGraham Sider amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); 675e09d4fc8SOak Zeng alloc_gtt_mem_failure: 67629633d0eSJoseph Greathouse if (kfd->gws) 6776bfc7c7eSGraham Sider amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); 67819f6d2a6SOded Gabbay dev_err(kfd_device, 67979775b62SKent Russell "device %x:%x NOT added due to errors\n", 680d69a3b76SMukul Joshi kfd->adev->pdev->vendor, kfd->adev->pdev->device); 68119f6d2a6SOded Gabbay out: 68219f6d2a6SOded Gabbay return kfd->init_complete; 6834a488a7aSOded Gabbay } 6844a488a7aSOded Gabbay 6854a488a7aSOded Gabbay void kgd2kfd_device_exit(struct kfd_dev *kfd) 6864a488a7aSOded Gabbay { 687b17f068aSOded Gabbay if (kfd->init_complete) { 68864c7f8cfSBen Goz device_queue_manager_uninit(kfd->dqm); 6892249d558SAndrew Lewycky kfd_interrupt_exit(kfd); 69019f6d2a6SOded Gabbay kfd_topology_remove_device(kfd); 691735df2baSFelix Kuehling kfd_doorbell_fini(kfd); 69259d7115dSMukul Joshi ida_destroy(&kfd->doorbell_ida); 69373a1da0bSOded Gabbay kfd_gtt_sa_fini(kfd); 6946bfc7c7eSGraham Sider amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); 69529633d0eSJoseph Greathouse if (kfd->gws) 6966bfc7c7eSGraham Sider amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); 697b17f068aSOded Gabbay } 6985b5c4e40SEvgeny Pinchuk 6994a488a7aSOded Gabbay kfree(kfd); 7004a488a7aSOded Gabbay } 7014a488a7aSOded Gabbay 702e3b7a967SShaoyun Liu int kgd2kfd_pre_reset(struct kfd_dev *kfd) 703e3b7a967SShaoyun Liu { 704e42051d2SShaoyun Liu if (!kfd->init_complete) 705e42051d2SShaoyun Liu return 0; 70609c34e8dSFelix Kuehling 70755977744SMukul Joshi kfd_smi_event_update_gpu_reset(kfd, false); 70855977744SMukul Joshi 70909c34e8dSFelix Kuehling kfd->dqm->ops.pre_reset(kfd->dqm); 71009c34e8dSFelix Kuehling 7119593f4d6SRajneesh Bhardwaj kgd2kfd_suspend(kfd, false); 712e42051d2SShaoyun Liu 713e42051d2SShaoyun Liu kfd_signal_reset_event(kfd); 714e3b7a967SShaoyun Liu return 0; 715e3b7a967SShaoyun Liu } 716e3b7a967SShaoyun Liu 717e42051d2SShaoyun Liu /* 718e42051d2SShaoyun Liu * Fix me. KFD won't be able to resume existing process for now. 719e42051d2SShaoyun Liu * We will keep all existing process in a evicted state and 720e42051d2SShaoyun Liu * wait the process to be terminated. 721e42051d2SShaoyun Liu */ 722e42051d2SShaoyun Liu 723e3b7a967SShaoyun Liu int kgd2kfd_post_reset(struct kfd_dev *kfd) 724e3b7a967SShaoyun Liu { 725a1bd079fSyu kuai int ret; 726e42051d2SShaoyun Liu 727e42051d2SShaoyun Liu if (!kfd->init_complete) 728e3b7a967SShaoyun Liu return 0; 729e42051d2SShaoyun Liu 730e42051d2SShaoyun Liu ret = kfd_resume(kfd); 731e42051d2SShaoyun Liu if (ret) 732e42051d2SShaoyun Liu return ret; 733a1bd079fSyu kuai atomic_dec(&kfd_locked); 7349b54d201SEric Huang 7359b54d201SEric Huang atomic_set(&kfd->sram_ecc_flag, 0); 7369b54d201SEric Huang 73755977744SMukul Joshi kfd_smi_event_update_gpu_reset(kfd, true); 73855977744SMukul Joshi 739e42051d2SShaoyun Liu return 0; 740e42051d2SShaoyun Liu } 741e42051d2SShaoyun Liu 742e42051d2SShaoyun Liu bool kfd_is_locked(void) 743e42051d2SShaoyun Liu { 744e42051d2SShaoyun Liu return (atomic_read(&kfd_locked) > 0); 745e3b7a967SShaoyun Liu } 746e3b7a967SShaoyun Liu 7479593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 7484a488a7aSOded Gabbay { 749733fa1f7SYong Zhao if (!kfd->init_complete) 750733fa1f7SYong Zhao return; 751733fa1f7SYong Zhao 7529593f4d6SRajneesh Bhardwaj /* for runtime suspend, skip locking kfd */ 7539593f4d6SRajneesh Bhardwaj if (!run_pm) { 75426103436SFelix Kuehling /* For first KFD device suspend all the KFD processes */ 755e42051d2SShaoyun Liu if (atomic_inc_return(&kfd_locked) == 1) 75626103436SFelix Kuehling kfd_suspend_all_processes(); 7579593f4d6SRajneesh Bhardwaj } 75826103436SFelix Kuehling 75945c9a5e4SOded Gabbay kfd->dqm->ops.stop(kfd->dqm); 76064d1c3a4SFelix Kuehling kfd_iommu_suspend(kfd); 7614a488a7aSOded Gabbay } 7624a488a7aSOded Gabbay 7639593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) 7644a488a7aSOded Gabbay { 76526103436SFelix Kuehling int ret, count; 76626103436SFelix Kuehling 767b8935a7cSYong Zhao if (!kfd->init_complete) 768b8935a7cSYong Zhao return 0; 769b17f068aSOded Gabbay 77026103436SFelix Kuehling ret = kfd_resume(kfd); 77126103436SFelix Kuehling if (ret) 77226103436SFelix Kuehling return ret; 773b17f068aSOded Gabbay 7749593f4d6SRajneesh Bhardwaj /* for runtime resume, skip unlocking kfd */ 7759593f4d6SRajneesh Bhardwaj if (!run_pm) { 776e42051d2SShaoyun Liu count = atomic_dec_return(&kfd_locked); 77726103436SFelix Kuehling WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); 77826103436SFelix Kuehling if (count == 0) 77926103436SFelix Kuehling ret = kfd_resume_all_processes(); 7809593f4d6SRajneesh Bhardwaj } 78126103436SFelix Kuehling 78226103436SFelix Kuehling return ret; 7834ebc7182SYong Zhao } 7844ebc7182SYong Zhao 785f8846323SJames Zhu int kgd2kfd_resume_iommu(struct kfd_dev *kfd) 786b8935a7cSYong Zhao { 787b8935a7cSYong Zhao int err = 0; 788b8935a7cSYong Zhao 78964d1c3a4SFelix Kuehling err = kfd_iommu_resume(kfd); 790f8846323SJames Zhu if (err) 79164d1c3a4SFelix Kuehling dev_err(kfd_device, 79264d1c3a4SFelix Kuehling "Failed to resume IOMMU for device %x:%x\n", 793d69a3b76SMukul Joshi kfd->adev->pdev->vendor, kfd->adev->pdev->device); 79464d1c3a4SFelix Kuehling return err; 79564d1c3a4SFelix Kuehling } 796733fa1f7SYong Zhao 797f8846323SJames Zhu static int kfd_resume(struct kfd_dev *kfd) 798f8846323SJames Zhu { 799f8846323SJames Zhu int err = 0; 800f8846323SJames Zhu 801b8935a7cSYong Zhao err = kfd->dqm->ops.start(kfd->dqm); 802499f4d38SYifan Zhang if (err) 803b8935a7cSYong Zhao dev_err(kfd_device, 804b8935a7cSYong Zhao "Error starting queue manager for device %x:%x\n", 805d69a3b76SMukul Joshi kfd->adev->pdev->vendor, kfd->adev->pdev->device); 806b17f068aSOded Gabbay 807b8935a7cSYong Zhao return err; 8084a488a7aSOded Gabbay } 8094a488a7aSOded Gabbay 810b3eca59dSPhilip Yang static inline void kfd_queue_work(struct workqueue_struct *wq, 811b3eca59dSPhilip Yang struct work_struct *work) 812b3eca59dSPhilip Yang { 813b3eca59dSPhilip Yang int cpu, new_cpu; 814b3eca59dSPhilip Yang 815b3eca59dSPhilip Yang cpu = new_cpu = smp_processor_id(); 816b3eca59dSPhilip Yang do { 817b3eca59dSPhilip Yang new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; 818b3eca59dSPhilip Yang if (cpu_to_node(new_cpu) == numa_node_id()) 819b3eca59dSPhilip Yang break; 820b3eca59dSPhilip Yang } while (cpu != new_cpu); 821b3eca59dSPhilip Yang 822b3eca59dSPhilip Yang queue_work_on(new_cpu, wq, work); 823b3eca59dSPhilip Yang } 824b3eca59dSPhilip Yang 825b3f5e6b4SAndrew Lewycky /* This is called directly from KGD at ISR. */ 826b3f5e6b4SAndrew Lewycky void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 8274a488a7aSOded Gabbay { 82858e69886SLan Xiao uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; 82958e69886SLan Xiao bool is_patched = false; 8302383a767SChristian König unsigned long flags; 83158e69886SLan Xiao 8322249d558SAndrew Lewycky if (!kfd->init_complete) 8332249d558SAndrew Lewycky return; 8342249d558SAndrew Lewycky 835f0dc99a6SGraham Sider if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) { 83658e69886SLan Xiao dev_err_once(kfd_device, "Ring entry too small\n"); 83758e69886SLan Xiao return; 83858e69886SLan Xiao } 83958e69886SLan Xiao 8402383a767SChristian König spin_lock_irqsave(&kfd->interrupt_lock, flags); 8412249d558SAndrew Lewycky 8422249d558SAndrew Lewycky if (kfd->interrupts_active 84358e69886SLan Xiao && interrupt_is_wanted(kfd, ih_ring_entry, 84458e69886SLan Xiao patched_ihre, &is_patched) 84558e69886SLan Xiao && enqueue_ih_ring_entry(kfd, 84658e69886SLan Xiao is_patched ? patched_ihre : ih_ring_entry)) 847b3eca59dSPhilip Yang kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work); 8482249d558SAndrew Lewycky 8492383a767SChristian König spin_unlock_irqrestore(&kfd->interrupt_lock, flags); 8504a488a7aSOded Gabbay } 8516e81090bSOded Gabbay 852c7f21978SPhilip Yang int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger) 8536b95e797SFelix Kuehling { 8546b95e797SFelix Kuehling struct kfd_process *p; 8556b95e797SFelix Kuehling int r; 8566b95e797SFelix Kuehling 8576b95e797SFelix Kuehling /* Because we are called from arbitrary context (workqueue) as opposed 8586b95e797SFelix Kuehling * to process context, kfd_process could attempt to exit while we are 8596b95e797SFelix Kuehling * running so the lookup function increments the process ref count. 8606b95e797SFelix Kuehling */ 8616b95e797SFelix Kuehling p = kfd_lookup_process_by_mm(mm); 8626b95e797SFelix Kuehling if (!p) 8636b95e797SFelix Kuehling return -ESRCH; 8646b95e797SFelix Kuehling 865b2057956SFelix Kuehling WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); 866c7f21978SPhilip Yang r = kfd_process_evict_queues(p, trigger); 8676b95e797SFelix Kuehling 8686b95e797SFelix Kuehling kfd_unref_process(p); 8696b95e797SFelix Kuehling return r; 8706b95e797SFelix Kuehling } 8716b95e797SFelix Kuehling 8726b95e797SFelix Kuehling int kgd2kfd_resume_mm(struct mm_struct *mm) 8736b95e797SFelix Kuehling { 8746b95e797SFelix Kuehling struct kfd_process *p; 8756b95e797SFelix Kuehling int r; 8766b95e797SFelix Kuehling 8776b95e797SFelix Kuehling /* Because we are called from arbitrary context (workqueue) as opposed 8786b95e797SFelix Kuehling * to process context, kfd_process could attempt to exit while we are 8796b95e797SFelix Kuehling * running so the lookup function increments the process ref count. 8806b95e797SFelix Kuehling */ 8816b95e797SFelix Kuehling p = kfd_lookup_process_by_mm(mm); 8826b95e797SFelix Kuehling if (!p) 8836b95e797SFelix Kuehling return -ESRCH; 8846b95e797SFelix Kuehling 8856b95e797SFelix Kuehling r = kfd_process_restore_queues(p); 8866b95e797SFelix Kuehling 8876b95e797SFelix Kuehling kfd_unref_process(p); 8886b95e797SFelix Kuehling return r; 8896b95e797SFelix Kuehling } 8906b95e797SFelix Kuehling 89126103436SFelix Kuehling /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will 89226103436SFelix Kuehling * prepare for safe eviction of KFD BOs that belong to the specified 89326103436SFelix Kuehling * process. 89426103436SFelix Kuehling * 89526103436SFelix Kuehling * @mm: mm_struct that identifies the specified KFD process 89626103436SFelix Kuehling * @fence: eviction fence attached to KFD process BOs 89726103436SFelix Kuehling * 89826103436SFelix Kuehling */ 89926103436SFelix Kuehling int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 90026103436SFelix Kuehling struct dma_fence *fence) 90126103436SFelix Kuehling { 90226103436SFelix Kuehling struct kfd_process *p; 90326103436SFelix Kuehling unsigned long active_time; 90426103436SFelix Kuehling unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS); 90526103436SFelix Kuehling 90626103436SFelix Kuehling if (!fence) 90726103436SFelix Kuehling return -EINVAL; 90826103436SFelix Kuehling 90926103436SFelix Kuehling if (dma_fence_is_signaled(fence)) 91026103436SFelix Kuehling return 0; 91126103436SFelix Kuehling 91226103436SFelix Kuehling p = kfd_lookup_process_by_mm(mm); 91326103436SFelix Kuehling if (!p) 91426103436SFelix Kuehling return -ENODEV; 91526103436SFelix Kuehling 91626103436SFelix Kuehling if (fence->seqno == p->last_eviction_seqno) 91726103436SFelix Kuehling goto out; 91826103436SFelix Kuehling 91926103436SFelix Kuehling p->last_eviction_seqno = fence->seqno; 92026103436SFelix Kuehling 92126103436SFelix Kuehling /* Avoid KFD process starvation. Wait for at least 92226103436SFelix Kuehling * PROCESS_ACTIVE_TIME_MS before evicting the process again 92326103436SFelix Kuehling */ 92426103436SFelix Kuehling active_time = get_jiffies_64() - p->last_restore_timestamp; 92526103436SFelix Kuehling if (delay_jiffies > active_time) 92626103436SFelix Kuehling delay_jiffies -= active_time; 92726103436SFelix Kuehling else 92826103436SFelix Kuehling delay_jiffies = 0; 92926103436SFelix Kuehling 93026103436SFelix Kuehling /* During process initialization eviction_work.dwork is initialized 93126103436SFelix Kuehling * to kfd_evict_bo_worker 93226103436SFelix Kuehling */ 933b2057956SFelix Kuehling WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies", 934b2057956SFelix Kuehling p->lead_thread->pid, delay_jiffies); 93526103436SFelix Kuehling schedule_delayed_work(&p->eviction_work, delay_jiffies); 93626103436SFelix Kuehling out: 93726103436SFelix Kuehling kfd_unref_process(p); 93826103436SFelix Kuehling return 0; 93926103436SFelix Kuehling } 94026103436SFelix Kuehling 9416e81090bSOded Gabbay static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 9426e81090bSOded Gabbay unsigned int chunk_size) 9436e81090bSOded Gabbay { 94432fa8219SFelix Kuehling if (WARN_ON(buf_size < chunk_size)) 94532fa8219SFelix Kuehling return -EINVAL; 94632fa8219SFelix Kuehling if (WARN_ON(buf_size == 0)) 94732fa8219SFelix Kuehling return -EINVAL; 94832fa8219SFelix Kuehling if (WARN_ON(chunk_size == 0)) 94932fa8219SFelix Kuehling return -EINVAL; 9506e81090bSOded Gabbay 9516e81090bSOded Gabbay kfd->gtt_sa_chunk_size = chunk_size; 9526e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; 9536e81090bSOded Gabbay 954f43a9f18SChristophe JAILLET kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks, 955f43a9f18SChristophe JAILLET GFP_KERNEL); 9566e81090bSOded Gabbay if (!kfd->gtt_sa_bitmap) 9576e81090bSOded Gabbay return -ENOMEM; 9586e81090bSOded Gabbay 95979775b62SKent Russell pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", 9606e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); 9616e81090bSOded Gabbay 9626e81090bSOded Gabbay mutex_init(&kfd->gtt_sa_lock); 9636e81090bSOded Gabbay 9646e81090bSOded Gabbay return 0; 9656e81090bSOded Gabbay } 9666e81090bSOded Gabbay 9676e81090bSOded Gabbay static void kfd_gtt_sa_fini(struct kfd_dev *kfd) 9686e81090bSOded Gabbay { 9696e81090bSOded Gabbay mutex_destroy(&kfd->gtt_sa_lock); 970f43a9f18SChristophe JAILLET bitmap_free(kfd->gtt_sa_bitmap); 9716e81090bSOded Gabbay } 9726e81090bSOded Gabbay 9736e81090bSOded Gabbay static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr, 9746e81090bSOded Gabbay unsigned int bit_num, 9756e81090bSOded Gabbay unsigned int chunk_size) 9766e81090bSOded Gabbay { 9776e81090bSOded Gabbay return start_addr + bit_num * chunk_size; 9786e81090bSOded Gabbay } 9796e81090bSOded Gabbay 9806e81090bSOded Gabbay static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, 9816e81090bSOded Gabbay unsigned int bit_num, 9826e81090bSOded Gabbay unsigned int chunk_size) 9836e81090bSOded Gabbay { 9846e81090bSOded Gabbay return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); 9856e81090bSOded Gabbay } 9866e81090bSOded Gabbay 9876e81090bSOded Gabbay int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 9886e81090bSOded Gabbay struct kfd_mem_obj **mem_obj) 9896e81090bSOded Gabbay { 9906e81090bSOded Gabbay unsigned int found, start_search, cur_size; 9916e81090bSOded Gabbay 9926e81090bSOded Gabbay if (size == 0) 9936e81090bSOded Gabbay return -EINVAL; 9946e81090bSOded Gabbay 9956e81090bSOded Gabbay if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) 9966e81090bSOded Gabbay return -ENOMEM; 9976e81090bSOded Gabbay 9981cd106ecSFelix Kuehling *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); 9991cd106ecSFelix Kuehling if (!(*mem_obj)) 10006e81090bSOded Gabbay return -ENOMEM; 10016e81090bSOded Gabbay 100279775b62SKent Russell pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size); 10036e81090bSOded Gabbay 10046e81090bSOded Gabbay start_search = 0; 10056e81090bSOded Gabbay 10066e81090bSOded Gabbay mutex_lock(&kfd->gtt_sa_lock); 10076e81090bSOded Gabbay 10086e81090bSOded Gabbay kfd_gtt_restart_search: 10096e81090bSOded Gabbay /* Find the first chunk that is free */ 10106e81090bSOded Gabbay found = find_next_zero_bit(kfd->gtt_sa_bitmap, 10116e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks, 10126e81090bSOded Gabbay start_search); 10136e81090bSOded Gabbay 101479775b62SKent Russell pr_debug("Found = %d\n", found); 10156e81090bSOded Gabbay 10166e81090bSOded Gabbay /* If there wasn't any free chunk, bail out */ 10176e81090bSOded Gabbay if (found == kfd->gtt_sa_num_of_chunks) 10186e81090bSOded Gabbay goto kfd_gtt_no_free_chunk; 10196e81090bSOded Gabbay 10206e81090bSOded Gabbay /* Update fields of mem_obj */ 10216e81090bSOded Gabbay (*mem_obj)->range_start = found; 10226e81090bSOded Gabbay (*mem_obj)->range_end = found; 10236e81090bSOded Gabbay (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr( 10246e81090bSOded Gabbay kfd->gtt_start_gpu_addr, 10256e81090bSOded Gabbay found, 10266e81090bSOded Gabbay kfd->gtt_sa_chunk_size); 10276e81090bSOded Gabbay (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr( 10286e81090bSOded Gabbay kfd->gtt_start_cpu_ptr, 10296e81090bSOded Gabbay found, 10306e81090bSOded Gabbay kfd->gtt_sa_chunk_size); 10316e81090bSOded Gabbay 103279775b62SKent Russell pr_debug("gpu_addr = %p, cpu_addr = %p\n", 10336e81090bSOded Gabbay (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); 10346e81090bSOded Gabbay 10356e81090bSOded Gabbay /* If we need only one chunk, mark it as allocated and get out */ 10366e81090bSOded Gabbay if (size <= kfd->gtt_sa_chunk_size) { 103779775b62SKent Russell pr_debug("Single bit\n"); 1038b8b9ba58SChristophe JAILLET __set_bit(found, kfd->gtt_sa_bitmap); 10396e81090bSOded Gabbay goto kfd_gtt_out; 10406e81090bSOded Gabbay } 10416e81090bSOded Gabbay 10426e81090bSOded Gabbay /* Otherwise, try to see if we have enough contiguous chunks */ 10436e81090bSOded Gabbay cur_size = size - kfd->gtt_sa_chunk_size; 10446e81090bSOded Gabbay do { 10456e81090bSOded Gabbay (*mem_obj)->range_end = 10466e81090bSOded Gabbay find_next_zero_bit(kfd->gtt_sa_bitmap, 10476e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks, ++found); 10486e81090bSOded Gabbay /* 10496e81090bSOded Gabbay * If next free chunk is not contiguous than we need to 10506e81090bSOded Gabbay * restart our search from the last free chunk we found (which 10516e81090bSOded Gabbay * wasn't contiguous to the previous ones 10526e81090bSOded Gabbay */ 10536e81090bSOded Gabbay if ((*mem_obj)->range_end != found) { 10546e81090bSOded Gabbay start_search = found; 10556e81090bSOded Gabbay goto kfd_gtt_restart_search; 10566e81090bSOded Gabbay } 10576e81090bSOded Gabbay 10586e81090bSOded Gabbay /* 10596e81090bSOded Gabbay * If we reached end of buffer, bail out with error 10606e81090bSOded Gabbay */ 10616e81090bSOded Gabbay if (found == kfd->gtt_sa_num_of_chunks) 10626e81090bSOded Gabbay goto kfd_gtt_no_free_chunk; 10636e81090bSOded Gabbay 10646e81090bSOded Gabbay /* Check if we don't need another chunk */ 10656e81090bSOded Gabbay if (cur_size <= kfd->gtt_sa_chunk_size) 10666e81090bSOded Gabbay cur_size = 0; 10676e81090bSOded Gabbay else 10686e81090bSOded Gabbay cur_size -= kfd->gtt_sa_chunk_size; 10696e81090bSOded Gabbay 10706e81090bSOded Gabbay } while (cur_size > 0); 10716e81090bSOded Gabbay 107279775b62SKent Russell pr_debug("range_start = %d, range_end = %d\n", 10736e81090bSOded Gabbay (*mem_obj)->range_start, (*mem_obj)->range_end); 10746e81090bSOded Gabbay 10756e81090bSOded Gabbay /* Mark the chunks as allocated */ 1076b8b9ba58SChristophe JAILLET bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start, 1077b8b9ba58SChristophe JAILLET (*mem_obj)->range_end - (*mem_obj)->range_start + 1); 10786e81090bSOded Gabbay 10796e81090bSOded Gabbay kfd_gtt_out: 10806e81090bSOded Gabbay mutex_unlock(&kfd->gtt_sa_lock); 10816e81090bSOded Gabbay return 0; 10826e81090bSOded Gabbay 10836e81090bSOded Gabbay kfd_gtt_no_free_chunk: 10843148a6a0SJack Zhang pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); 10856e81090bSOded Gabbay mutex_unlock(&kfd->gtt_sa_lock); 10863148a6a0SJack Zhang kfree(*mem_obj); 10876e81090bSOded Gabbay return -ENOMEM; 10886e81090bSOded Gabbay } 10896e81090bSOded Gabbay 10906e81090bSOded Gabbay int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) 10916e81090bSOded Gabbay { 10929216ed29SOded Gabbay /* Act like kfree when trying to free a NULL object */ 10939216ed29SOded Gabbay if (!mem_obj) 10949216ed29SOded Gabbay return 0; 10956e81090bSOded Gabbay 109679775b62SKent Russell pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n", 10976e81090bSOded Gabbay mem_obj, mem_obj->range_start, mem_obj->range_end); 10986e81090bSOded Gabbay 10996e81090bSOded Gabbay mutex_lock(&kfd->gtt_sa_lock); 11006e81090bSOded Gabbay 11016e81090bSOded Gabbay /* Mark the chunks as free */ 1102b8b9ba58SChristophe JAILLET bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start, 1103b8b9ba58SChristophe JAILLET mem_obj->range_end - mem_obj->range_start + 1); 11046e81090bSOded Gabbay 11056e81090bSOded Gabbay mutex_unlock(&kfd->gtt_sa_lock); 11066e81090bSOded Gabbay 11076e81090bSOded Gabbay kfree(mem_obj); 11086e81090bSOded Gabbay return 0; 11096e81090bSOded Gabbay } 1110a29ec470SShaoyun Liu 11119b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) 11129b54d201SEric Huang { 11139b54d201SEric Huang if (kfd) 11149b54d201SEric Huang atomic_inc(&kfd->sram_ecc_flag); 11159b54d201SEric Huang } 11169b54d201SEric Huang 111743d8107fSHarish Kasiviswanathan void kfd_inc_compute_active(struct kfd_dev *kfd) 111843d8107fSHarish Kasiviswanathan { 111943d8107fSHarish Kasiviswanathan if (atomic_inc_return(&kfd->compute_profile) == 1) 11206bfc7c7eSGraham Sider amdgpu_amdkfd_set_compute_idle(kfd->adev, false); 112143d8107fSHarish Kasiviswanathan } 112243d8107fSHarish Kasiviswanathan 112343d8107fSHarish Kasiviswanathan void kfd_dec_compute_active(struct kfd_dev *kfd) 112443d8107fSHarish Kasiviswanathan { 112543d8107fSHarish Kasiviswanathan int count = atomic_dec_return(&kfd->compute_profile); 112643d8107fSHarish Kasiviswanathan 112743d8107fSHarish Kasiviswanathan if (count == 0) 11286bfc7c7eSGraham Sider amdgpu_amdkfd_set_compute_idle(kfd->adev, true); 112943d8107fSHarish Kasiviswanathan WARN_ONCE(count < 0, "Compute profile ref. count error"); 113043d8107fSHarish Kasiviswanathan } 113143d8107fSHarish Kasiviswanathan 1132410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) 11332c2b0d88SMukul Joshi { 1134158fc08dSAmber Lin if (kfd && kfd->init_complete) 11352c2b0d88SMukul Joshi kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); 11362c2b0d88SMukul Joshi } 11372c2b0d88SMukul Joshi 1138ee2f17f4SAmber Lin /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and 1139ee2f17f4SAmber Lin * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA. 1140ee2f17f4SAmber Lin * When the device has more than two engines, we reserve two for PCIe to enable 1141ee2f17f4SAmber Lin * full-duplex and the rest are used as XGMI. 1142ee2f17f4SAmber Lin */ 1143ee2f17f4SAmber Lin unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) 1144ee2f17f4SAmber Lin { 1145ee2f17f4SAmber Lin /* If XGMI is not supported, all SDMA engines are PCIe */ 1146ee2f17f4SAmber Lin if (!kdev->adev->gmc.xgmi.supported) 1147ee2f17f4SAmber Lin return kdev->adev->sdma.num_instances; 1148ee2f17f4SAmber Lin 1149ee2f17f4SAmber Lin return min(kdev->adev->sdma.num_instances, 2); 1150ee2f17f4SAmber Lin } 1151ee2f17f4SAmber Lin 1152ee2f17f4SAmber Lin unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) 1153ee2f17f4SAmber Lin { 1154ee2f17f4SAmber Lin /* After reserved for PCIe, the rest of engines are XGMI */ 1155ee2f17f4SAmber Lin return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); 1156ee2f17f4SAmber Lin } 1157ee2f17f4SAmber Lin 1158a29ec470SShaoyun Liu #if defined(CONFIG_DEBUG_FS) 1159a29ec470SShaoyun Liu 1160a29ec470SShaoyun Liu /* This function will send a package to HIQ to hang the HWS 1161a29ec470SShaoyun Liu * which will trigger a GPU reset and bring the HWS back to normal state 1162a29ec470SShaoyun Liu */ 1163a29ec470SShaoyun Liu int kfd_debugfs_hang_hws(struct kfd_dev *dev) 1164a29ec470SShaoyun Liu { 1165a29ec470SShaoyun Liu if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { 1166a29ec470SShaoyun Liu pr_err("HWS is not enabled"); 1167a29ec470SShaoyun Liu return -EINVAL; 1168a29ec470SShaoyun Liu } 1169a29ec470SShaoyun Liu 11704f942aaeSOak Zeng return dqm_debugfs_hang_hws(dev->dqm); 1171a29ec470SShaoyun Liu } 1172a29ec470SShaoyun Liu 1173a29ec470SShaoyun Liu #endif 1174