1d87f36a0SRajneesh Bhardwaj // SPDX-License-Identifier: GPL-2.0 OR MIT 24a488a7aSOded Gabbay /* 3d87f36a0SRajneesh Bhardwaj * Copyright 2014-2022 Advanced Micro Devices, Inc. 44a488a7aSOded Gabbay * 54a488a7aSOded Gabbay * Permission is hereby granted, free of charge, to any person obtaining a 64a488a7aSOded Gabbay * copy of this software and associated documentation files (the "Software"), 74a488a7aSOded Gabbay * to deal in the Software without restriction, including without limitation 84a488a7aSOded Gabbay * the rights to use, copy, modify, merge, publish, distribute, sublicense, 94a488a7aSOded Gabbay * and/or sell copies of the Software, and to permit persons to whom the 104a488a7aSOded Gabbay * Software is furnished to do so, subject to the following conditions: 114a488a7aSOded Gabbay * 124a488a7aSOded Gabbay * The above copyright notice and this permission notice shall be included in 134a488a7aSOded Gabbay * all copies or substantial portions of the Software. 144a488a7aSOded Gabbay * 154a488a7aSOded Gabbay * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 164a488a7aSOded Gabbay * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 174a488a7aSOded Gabbay * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 184a488a7aSOded Gabbay * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 194a488a7aSOded Gabbay * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 204a488a7aSOded Gabbay * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 214a488a7aSOded Gabbay * OTHER DEALINGS IN THE SOFTWARE. 224a488a7aSOded Gabbay */ 234a488a7aSOded Gabbay 244a488a7aSOded Gabbay #include <linux/bsearch.h> 254a488a7aSOded Gabbay #include <linux/pci.h> 264a488a7aSOded Gabbay #include <linux/slab.h> 274a488a7aSOded Gabbay #include "kfd_priv.h" 2864c7f8cfSBen Goz #include "kfd_device_queue_manager.h" 29507968ddSFelix Kuehling #include "kfd_pm4_headers_vi.h" 30fd6a440eSJonathan Kim #include "kfd_pm4_headers_aldebaran.h" 310db54b24SYong Zhao #include "cwsr_trap_handler.h" 3264d1c3a4SFelix Kuehling #include "kfd_iommu.h" 335b87245fSAmber Lin #include "amdgpu_amdkfd.h" 342c2b0d88SMukul Joshi #include "kfd_smi_events.h" 35814ab993SPhilip Yang #include "kfd_migrate.h" 365b983db8SAlex Deucher #include "amdgpu.h" 374a488a7aSOded Gabbay 3819f6d2a6SOded Gabbay #define MQD_SIZE_ALIGNED 768 39e42051d2SShaoyun Liu 40e42051d2SShaoyun Liu /* 41e42051d2SShaoyun Liu * kfd_locked is used to lock the kfd driver during suspend or reset 42e42051d2SShaoyun Liu * once locked, kfd driver will stop any further GPU execution. 43e42051d2SShaoyun Liu * create process (open) will return -EAGAIN. 44e42051d2SShaoyun Liu */ 45e42051d2SShaoyun Liu static atomic_t kfd_locked = ATOMIC_INIT(0); 4619f6d2a6SOded Gabbay 47a3e520a2SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK 48e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v7_kfd2kgd; 49a3e520a2SAlex Deucher #endif 50e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v8_kfd2kgd; 51e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v9_kfd2kgd; 52e392c887SYong Zhao extern const struct kfd2kgd_calls arcturus_kfd2kgd; 535073506cSJonathan Kim extern const struct kfd2kgd_calls aldebaran_kfd2kgd; 54*f544afacSAmber Lin extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd; 55e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; 563a2f0c81SYong Zhao extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; 57cc009e61SMukul Joshi extern const struct kfd2kgd_calls gfx_v11_kfd2kgd; 58e392c887SYong Zhao 596e81090bSOded Gabbay static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 606e81090bSOded Gabbay unsigned int chunk_size); 616e81090bSOded Gabbay static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 626e81090bSOded Gabbay 637ee938acSFelix Kuehling static int kfd_resume_iommu(struct kfd_dev *kfd); 64b8935a7cSYong Zhao static int kfd_resume(struct kfd_dev *kfd); 65b8935a7cSYong Zhao 66cc009e61SMukul Joshi static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) 67f89c6bf7SGuchun Chen { 68f89c6bf7SGuchun Chen uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0]; 69f89c6bf7SGuchun Chen 70f89c6bf7SGuchun Chen switch (sdma_version) { 71f89c6bf7SGuchun Chen case IP_VERSION(4, 0, 0):/* VEGA10 */ 72f89c6bf7SGuchun Chen case IP_VERSION(4, 0, 1):/* VEGA12 */ 73f89c6bf7SGuchun Chen case IP_VERSION(4, 1, 0):/* RAVEN */ 74f89c6bf7SGuchun Chen case IP_VERSION(4, 1, 1):/* RAVEN */ 755eb877b2SKent Russell case IP_VERSION(4, 1, 2):/* RENOIR */ 76f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 1):/* VANGOGH */ 77f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ 787c4f4f19SMario Limonciello case IP_VERSION(5, 2, 6):/* GC 10.3.6 */ 797c4f4f19SMario Limonciello case IP_VERSION(5, 2, 7):/* GC 10.3.7 */ 80f89c6bf7SGuchun Chen kfd->device_info.num_sdma_queues_per_engine = 2; 81f89c6bf7SGuchun Chen break; 82f89c6bf7SGuchun Chen case IP_VERSION(4, 2, 0):/* VEGA20 */ 835eb877b2SKent Russell case IP_VERSION(4, 2, 2):/* ARCTURUS */ 84f89c6bf7SGuchun Chen case IP_VERSION(4, 4, 0):/* ALDEBARAN */ 85f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 0):/* NAVI10 */ 86f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ 87f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 2):/* NAVI14 */ 88f89c6bf7SGuchun Chen case IP_VERSION(5, 0, 5):/* NAVI12 */ 89f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */ 905eb877b2SKent Russell case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */ 91f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ 92f89c6bf7SGuchun Chen case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ 93cc009e61SMukul Joshi case IP_VERSION(6, 0, 0): 94efb4fd10SYifan Zhang case IP_VERSION(6, 0, 1): 9522dd871eSEric Huang case IP_VERSION(6, 0, 2): 965ddb5fe9SDavid Belanger case IP_VERSION(6, 0, 3): 97f89c6bf7SGuchun Chen kfd->device_info.num_sdma_queues_per_engine = 8; 98f89c6bf7SGuchun Chen break; 99f89c6bf7SGuchun Chen default: 100f89c6bf7SGuchun Chen dev_warn(kfd_device, 10120c5e425SGraham Sider "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n", 102f89c6bf7SGuchun Chen sdma_version); 103f89c6bf7SGuchun Chen kfd->device_info.num_sdma_queues_per_engine = 8; 104f89c6bf7SGuchun Chen } 105cc009e61SMukul Joshi 106cc009e61SMukul Joshi switch (sdma_version) { 107cc009e61SMukul Joshi case IP_VERSION(6, 0, 0): 10822dd871eSEric Huang case IP_VERSION(6, 0, 2): 1095ddb5fe9SDavid Belanger case IP_VERSION(6, 0, 3): 110cc009e61SMukul Joshi /* Reserve 1 for paging and 1 for gfx */ 111cc009e61SMukul Joshi kfd->device_info.num_reserved_sdma_queues_per_engine = 2; 112cc009e61SMukul Joshi /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */ 113cc009e61SMukul Joshi kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL; 114cc009e61SMukul Joshi break; 115e48e6a13SYifan Zhang case IP_VERSION(6, 0, 1): 116e48e6a13SYifan Zhang /* Reserve 1 for paging and 1 for gfx */ 117e48e6a13SYifan Zhang kfd->device_info.num_reserved_sdma_queues_per_engine = 2; 118e48e6a13SYifan Zhang /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */ 119e48e6a13SYifan Zhang kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL; 120e48e6a13SYifan Zhang break; 121cc009e61SMukul Joshi default: 122cc009e61SMukul Joshi break; 123cc009e61SMukul Joshi } 124f89c6bf7SGuchun Chen } 125f89c6bf7SGuchun Chen 126f89c6bf7SGuchun Chen static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) 127f89c6bf7SGuchun Chen { 128f89c6bf7SGuchun Chen uint32_t gc_version = KFD_GC_VERSION(kfd); 129f89c6bf7SGuchun Chen 130f89c6bf7SGuchun Chen switch (gc_version) { 131f89c6bf7SGuchun Chen case IP_VERSION(9, 0, 1): /* VEGA10 */ 132f89c6bf7SGuchun Chen case IP_VERSION(9, 1, 0): /* RAVEN */ 133f89c6bf7SGuchun Chen case IP_VERSION(9, 2, 1): /* VEGA12 */ 134f89c6bf7SGuchun Chen case IP_VERSION(9, 2, 2): /* RAVEN */ 135f89c6bf7SGuchun Chen case IP_VERSION(9, 3, 0): /* RENOIR */ 136f89c6bf7SGuchun Chen case IP_VERSION(9, 4, 0): /* VEGA20 */ 137f89c6bf7SGuchun Chen case IP_VERSION(9, 4, 1): /* ARCTURUS */ 138f89c6bf7SGuchun Chen case IP_VERSION(9, 4, 2): /* ALDEBARAN */ 139f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 1): /* VANGOGH */ 140f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ 1417c4f4f19SMario Limonciello case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ 1427c4f4f19SMario Limonciello case IP_VERSION(10, 3, 7): /* GC 10.3.7 */ 143f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */ 144f9ed188dSLang Yu case IP_VERSION(10, 1, 4): 145f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 10): /* NAVI10 */ 146f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 2): /* NAVI12 */ 147f89c6bf7SGuchun Chen case IP_VERSION(10, 1, 1): /* NAVI14 */ 148f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */ 149f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */ 150f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */ 151f89c6bf7SGuchun Chen case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */ 152f89c6bf7SGuchun Chen kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; 153f89c6bf7SGuchun Chen break; 154cc009e61SMukul Joshi case IP_VERSION(11, 0, 0): 15526776a70SHuang Rui case IP_VERSION(11, 0, 1): 156ec661f1cSEric Huang case IP_VERSION(11, 0, 2): 1575ddb5fe9SDavid Belanger case IP_VERSION(11, 0, 3): 15888c21c2bSYifan Zhang case IP_VERSION(11, 0, 4): 159cc009e61SMukul Joshi kfd->device_info.event_interrupt_class = &event_interrupt_class_v11; 160cc009e61SMukul Joshi break; 161f89c6bf7SGuchun Chen default: 162f89c6bf7SGuchun Chen dev_warn(kfd_device, "v9 event interrupt handler is set due to " 163f89c6bf7SGuchun Chen "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version); 164f89c6bf7SGuchun Chen kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; 165f89c6bf7SGuchun Chen } 166f89c6bf7SGuchun Chen } 167f89c6bf7SGuchun Chen 168f0dc99a6SGraham Sider static void kfd_device_info_init(struct kfd_dev *kfd, 169f0dc99a6SGraham Sider bool vf, uint32_t gfx_target_version) 170f0dc99a6SGraham Sider { 171f0dc99a6SGraham Sider uint32_t gc_version = KFD_GC_VERSION(kfd); 172f0dc99a6SGraham Sider uint32_t asic_type = kfd->adev->asic_type; 173f0dc99a6SGraham Sider 174f0dc99a6SGraham Sider kfd->device_info.max_pasid_bits = 16; 175f0dc99a6SGraham Sider kfd->device_info.max_no_of_hqd = 24; 176f0dc99a6SGraham Sider kfd->device_info.num_of_watch_points = 4; 177f0dc99a6SGraham Sider kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED; 178f0dc99a6SGraham Sider kfd->device_info.gfx_target_version = gfx_target_version; 179f0dc99a6SGraham Sider 180f0dc99a6SGraham Sider if (KFD_IS_SOC15(kfd)) { 181f0dc99a6SGraham Sider kfd->device_info.doorbell_size = 8; 182f0dc99a6SGraham Sider kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t); 183f0dc99a6SGraham Sider kfd->device_info.supports_cwsr = true; 184f0dc99a6SGraham Sider 185cc009e61SMukul Joshi kfd_device_info_set_sdma_info(kfd); 186f89c6bf7SGuchun Chen 187f89c6bf7SGuchun Chen kfd_device_info_set_event_interrupt_class(kfd); 188f0dc99a6SGraham Sider 189f0dc99a6SGraham Sider /* Raven */ 190f0dc99a6SGraham Sider if (gc_version == IP_VERSION(9, 1, 0) || 191f0dc99a6SGraham Sider gc_version == IP_VERSION(9, 2, 2)) 192f0dc99a6SGraham Sider kfd->device_info.needs_iommu_device = true; 193f0dc99a6SGraham Sider 194f0dc99a6SGraham Sider if (gc_version < IP_VERSION(11, 0, 0)) { 195f0dc99a6SGraham Sider /* Navi2x+, Navi1x+ */ 19668e355c0SJesse Zhang if (gc_version == IP_VERSION(10, 3, 6)) 19768e355c0SJesse Zhang kfd->device_info.no_atomic_fw_version = 14; 198c4e85551SPrike Liang else if (gc_version == IP_VERSION(10, 3, 7)) 199c4e85551SPrike Liang kfd->device_info.no_atomic_fw_version = 3; 20068e355c0SJesse Zhang else if (gc_version >= IP_VERSION(10, 3, 0)) 201f0dc99a6SGraham Sider kfd->device_info.no_atomic_fw_version = 92; 20227cc310fSchen gong else if (gc_version >= IP_VERSION(10, 1, 1)) 20327cc310fSchen gong kfd->device_info.no_atomic_fw_version = 145; 204f0dc99a6SGraham Sider 205f0dc99a6SGraham Sider /* Navi1x+ */ 206f0dc99a6SGraham Sider if (gc_version >= IP_VERSION(10, 1, 1)) 207f0dc99a6SGraham Sider kfd->device_info.needs_pci_atomics = true; 20800fa4035SSreekant Somasekharan } else if (gc_version < IP_VERSION(12, 0, 0)) { 20900fa4035SSreekant Somasekharan /* 21000fa4035SSreekant Somasekharan * PCIe atomics support acknowledgment in GFX11 RS64 CPFW requires 21100fa4035SSreekant Somasekharan * MEC version >= 509. Prior RS64 CPFW versions (and all F32) require 21200fa4035SSreekant Somasekharan * PCIe atomics support. 21300fa4035SSreekant Somasekharan */ 21400fa4035SSreekant Somasekharan kfd->device_info.needs_pci_atomics = true; 21500fa4035SSreekant Somasekharan kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0; 216f0dc99a6SGraham Sider } 217f0dc99a6SGraham Sider } else { 218f0dc99a6SGraham Sider kfd->device_info.doorbell_size = 4; 219f0dc99a6SGraham Sider kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t); 220f0dc99a6SGraham Sider kfd->device_info.event_interrupt_class = &event_interrupt_class_cik; 221f0dc99a6SGraham Sider kfd->device_info.num_sdma_queues_per_engine = 2; 222f0dc99a6SGraham Sider 223f0dc99a6SGraham Sider if (asic_type != CHIP_KAVERI && 224f0dc99a6SGraham Sider asic_type != CHIP_HAWAII && 225f0dc99a6SGraham Sider asic_type != CHIP_TONGA) 226f0dc99a6SGraham Sider kfd->device_info.supports_cwsr = true; 227f0dc99a6SGraham Sider 228f0dc99a6SGraham Sider if (asic_type == CHIP_KAVERI || 229f0dc99a6SGraham Sider asic_type == CHIP_CARRIZO) 230f0dc99a6SGraham Sider kfd->device_info.needs_iommu_device = true; 231f0dc99a6SGraham Sider 232f0dc99a6SGraham Sider if (asic_type != CHIP_HAWAII && !vf) 233f0dc99a6SGraham Sider kfd->device_info.needs_pci_atomics = true; 234f0dc99a6SGraham Sider } 235f0dc99a6SGraham Sider } 236f0dc99a6SGraham Sider 237b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) 2384a488a7aSOded Gabbay { 239f0dc99a6SGraham Sider struct kfd_dev *kfd = NULL; 240f0dc99a6SGraham Sider const struct kfd2kgd_calls *f2g = NULL; 241f0dc99a6SGraham Sider uint32_t gfx_target_version = 0; 242050091abSYong Zhao 243c868d584SAlex Deucher switch (adev->asic_type) { 244c868d584SAlex Deucher #ifdef KFD_SUPPORT_IOMMU_V2 245c868d584SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK 246c868d584SAlex Deucher case CHIP_KAVERI: 247f0dc99a6SGraham Sider gfx_target_version = 70000; 248f0dc99a6SGraham Sider if (!vf) 249c868d584SAlex Deucher f2g = &gfx_v7_kfd2kgd; 250c868d584SAlex Deucher break; 251c868d584SAlex Deucher #endif 252c868d584SAlex Deucher case CHIP_CARRIZO: 253f0dc99a6SGraham Sider gfx_target_version = 80001; 254f0dc99a6SGraham Sider if (!vf) 255c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 256c868d584SAlex Deucher break; 257c868d584SAlex Deucher #endif 258c868d584SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK 259c868d584SAlex Deucher case CHIP_HAWAII: 260f0dc99a6SGraham Sider gfx_target_version = 70001; 2610f7ef0b9SFelix Kuehling if (!amdgpu_exp_hw_support) 2620f7ef0b9SFelix Kuehling pr_info( 2630f7ef0b9SFelix Kuehling "KFD support on Hawaii is experimental. See modparam exp_hw_support\n" 2640f7ef0b9SFelix Kuehling ); 2650f7ef0b9SFelix Kuehling else if (!vf) 266c868d584SAlex Deucher f2g = &gfx_v7_kfd2kgd; 267c868d584SAlex Deucher break; 268c868d584SAlex Deucher #endif 269c868d584SAlex Deucher case CHIP_TONGA: 270f0dc99a6SGraham Sider gfx_target_version = 80002; 271f0dc99a6SGraham Sider if (!vf) 272c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 273c868d584SAlex Deucher break; 274c868d584SAlex Deucher case CHIP_FIJI: 275c868d584SAlex Deucher case CHIP_POLARIS10: 276f0dc99a6SGraham Sider gfx_target_version = 80003; 277c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 278c868d584SAlex Deucher break; 279c868d584SAlex Deucher case CHIP_POLARIS11: 280c868d584SAlex Deucher case CHIP_POLARIS12: 281c868d584SAlex Deucher case CHIP_VEGAM: 282f0dc99a6SGraham Sider gfx_target_version = 80003; 283f0dc99a6SGraham Sider if (!vf) 284c868d584SAlex Deucher f2g = &gfx_v8_kfd2kgd; 285c868d584SAlex Deucher break; 286c868d584SAlex Deucher default: 287c868d584SAlex Deucher switch (adev->ip_versions[GC_HWIP][0]) { 2882c1f19b3SGraham Sider /* Vega 10 */ 289c868d584SAlex Deucher case IP_VERSION(9, 0, 1): 290f0dc99a6SGraham Sider gfx_target_version = 90000; 291c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 292c868d584SAlex Deucher break; 293c868d584SAlex Deucher #ifdef KFD_SUPPORT_IOMMU_V2 2942c1f19b3SGraham Sider /* Raven */ 295c868d584SAlex Deucher case IP_VERSION(9, 1, 0): 296c868d584SAlex Deucher case IP_VERSION(9, 2, 2): 297f0dc99a6SGraham Sider gfx_target_version = 90002; 298f0dc99a6SGraham Sider if (!vf) 299c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 300c868d584SAlex Deucher break; 301c868d584SAlex Deucher #endif 3022c1f19b3SGraham Sider /* Vega12 */ 303c868d584SAlex Deucher case IP_VERSION(9, 2, 1): 304f0dc99a6SGraham Sider gfx_target_version = 90004; 305f0dc99a6SGraham Sider if (!vf) 306c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 307c868d584SAlex Deucher break; 3082c1f19b3SGraham Sider /* Renoir */ 309c868d584SAlex Deucher case IP_VERSION(9, 3, 0): 310f0dc99a6SGraham Sider gfx_target_version = 90012; 311f0dc99a6SGraham Sider if (!vf) 312c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 313c868d584SAlex Deucher break; 3142c1f19b3SGraham Sider /* Vega20 */ 315c868d584SAlex Deucher case IP_VERSION(9, 4, 0): 316f0dc99a6SGraham Sider gfx_target_version = 90006; 317f0dc99a6SGraham Sider if (!vf) 318c868d584SAlex Deucher f2g = &gfx_v9_kfd2kgd; 319c868d584SAlex Deucher break; 3202c1f19b3SGraham Sider /* Arcturus */ 321c868d584SAlex Deucher case IP_VERSION(9, 4, 1): 322f0dc99a6SGraham Sider gfx_target_version = 90008; 323c868d584SAlex Deucher f2g = &arcturus_kfd2kgd; 324c868d584SAlex Deucher break; 3252c1f19b3SGraham Sider /* Aldebaran */ 326c868d584SAlex Deucher case IP_VERSION(9, 4, 2): 327f0dc99a6SGraham Sider gfx_target_version = 90010; 328c868d584SAlex Deucher f2g = &aldebaran_kfd2kgd; 329c868d584SAlex Deucher break; 33070bdfedaSGraham Sider case IP_VERSION(9, 4, 3): 33170bdfedaSGraham Sider gfx_target_version = 90400; 332*f544afacSAmber Lin f2g = &gc_9_4_3_kfd2kgd; 33370bdfedaSGraham Sider break; 3342c1f19b3SGraham Sider /* Navi10 */ 335c868d584SAlex Deucher case IP_VERSION(10, 1, 10): 336f0dc99a6SGraham Sider gfx_target_version = 100100; 337f0dc99a6SGraham Sider if (!vf) 338c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 339c868d584SAlex Deucher break; 3402c1f19b3SGraham Sider /* Navi12 */ 341c868d584SAlex Deucher case IP_VERSION(10, 1, 2): 342f0dc99a6SGraham Sider gfx_target_version = 100101; 343c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 344c868d584SAlex Deucher break; 3452c1f19b3SGraham Sider /* Navi14 */ 346c868d584SAlex Deucher case IP_VERSION(10, 1, 1): 347f0dc99a6SGraham Sider gfx_target_version = 100102; 348f0dc99a6SGraham Sider if (!vf) 349c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 350c868d584SAlex Deucher break; 3512c1f19b3SGraham Sider /* Cyan Skillfish */ 352c868d584SAlex Deucher case IP_VERSION(10, 1, 3): 353f9ed188dSLang Yu case IP_VERSION(10, 1, 4): 354f0dc99a6SGraham Sider gfx_target_version = 100103; 355f0dc99a6SGraham Sider if (!vf) 356c868d584SAlex Deucher f2g = &gfx_v10_kfd2kgd; 357c868d584SAlex Deucher break; 3582c1f19b3SGraham Sider /* Sienna Cichlid */ 359c868d584SAlex Deucher case IP_VERSION(10, 3, 0): 360f0dc99a6SGraham Sider gfx_target_version = 100300; 361c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 362c868d584SAlex Deucher break; 3632c1f19b3SGraham Sider /* Navy Flounder */ 364c868d584SAlex Deucher case IP_VERSION(10, 3, 2): 365f0dc99a6SGraham Sider gfx_target_version = 100301; 366c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 367c868d584SAlex Deucher break; 3682c1f19b3SGraham Sider /* Van Gogh */ 369c868d584SAlex Deucher case IP_VERSION(10, 3, 1): 370f0dc99a6SGraham Sider gfx_target_version = 100303; 371f0dc99a6SGraham Sider if (!vf) 372c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 373c868d584SAlex Deucher break; 3742c1f19b3SGraham Sider /* Dimgrey Cavefish */ 375c868d584SAlex Deucher case IP_VERSION(10, 3, 4): 376f0dc99a6SGraham Sider gfx_target_version = 100302; 377c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 378c868d584SAlex Deucher break; 3792c1f19b3SGraham Sider /* Beige Goby */ 380c868d584SAlex Deucher case IP_VERSION(10, 3, 5): 381f0dc99a6SGraham Sider gfx_target_version = 100304; 382c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 383c868d584SAlex Deucher break; 3842c1f19b3SGraham Sider /* Yellow Carp */ 385c868d584SAlex Deucher case IP_VERSION(10, 3, 3): 386f0dc99a6SGraham Sider gfx_target_version = 100305; 387f0dc99a6SGraham Sider if (!vf) 388c868d584SAlex Deucher f2g = &gfx_v10_3_kfd2kgd; 389c868d584SAlex Deucher break; 3907c4f4f19SMario Limonciello case IP_VERSION(10, 3, 6): 3917c4f4f19SMario Limonciello case IP_VERSION(10, 3, 7): 3922724efa3SPrike Liang gfx_target_version = 100306; 3937c4f4f19SMario Limonciello if (!vf) 3947c4f4f19SMario Limonciello f2g = &gfx_v10_3_kfd2kgd; 3957c4f4f19SMario Limonciello break; 396cc009e61SMukul Joshi case IP_VERSION(11, 0, 0): 397cc009e61SMukul Joshi gfx_target_version = 110000; 398cc009e61SMukul Joshi f2g = &gfx_v11_kfd2kgd; 399cc009e61SMukul Joshi break; 40026776a70SHuang Rui case IP_VERSION(11, 0, 1): 40188c21c2bSYifan Zhang case IP_VERSION(11, 0, 4): 40226776a70SHuang Rui gfx_target_version = 110003; 40326776a70SHuang Rui f2g = &gfx_v11_kfd2kgd; 40426776a70SHuang Rui break; 405ec661f1cSEric Huang case IP_VERSION(11, 0, 2): 406ec661f1cSEric Huang gfx_target_version = 110002; 407ec661f1cSEric Huang f2g = &gfx_v11_kfd2kgd; 408ec661f1cSEric Huang break; 4095ddb5fe9SDavid Belanger case IP_VERSION(11, 0, 3): 4105ddb5fe9SDavid Belanger /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */ 4115ddb5fe9SDavid Belanger gfx_target_version = 110001; 4125ddb5fe9SDavid Belanger f2g = &gfx_v11_kfd2kgd; 4135ddb5fe9SDavid Belanger break; 414c868d584SAlex Deucher default: 415f0dc99a6SGraham Sider break; 416050091abSYong Zhao } 417c868d584SAlex Deucher break; 418c868d584SAlex Deucher } 4194a488a7aSOded Gabbay 420f0dc99a6SGraham Sider if (!f2g) { 421e4804a39SGraham Sider if (adev->ip_versions[GC_HWIP][0]) 422e4804a39SGraham Sider dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n", 423e4804a39SGraham Sider adev->ip_versions[GC_HWIP][0], vf ? "VF" : ""); 424e4804a39SGraham Sider else 425050091abSYong Zhao dev_err(kfd_device, "%s %s not supported in kfd\n", 426c868d584SAlex Deucher amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); 4274a488a7aSOded Gabbay return NULL; 4284ebc7182SYong Zhao } 4294a488a7aSOded Gabbay 430d35f00d8SEric Huang kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); 431d35f00d8SEric Huang if (!kfd) 432d35f00d8SEric Huang return NULL; 433d35f00d8SEric Huang 434c6c57446SGraham Sider kfd->adev = adev; 435f0dc99a6SGraham Sider kfd_device_info_init(kfd, vf, gfx_target_version); 43619f6d2a6SOded Gabbay kfd->init_complete = false; 437cea405b1SXihan Zhang kfd->kfd2kgd = f2g; 43843d8107fSHarish Kasiviswanathan atomic_set(&kfd->compute_profile, 0); 439cea405b1SXihan Zhang 440cea405b1SXihan Zhang mutex_init(&kfd->doorbell_mutex); 441cea405b1SXihan Zhang memset(&kfd->doorbell_available_index, 0, 442cea405b1SXihan Zhang sizeof(kfd->doorbell_available_index)); 4434a488a7aSOded Gabbay 4449b54d201SEric Huang atomic_set(&kfd->sram_ecc_flag, 0); 4459b54d201SEric Huang 44659d7115dSMukul Joshi ida_init(&kfd->doorbell_ida); 44759d7115dSMukul Joshi 4484a488a7aSOded Gabbay return kfd; 4494a488a7aSOded Gabbay } 4504a488a7aSOded Gabbay 451373d7080SFelix Kuehling static void kfd_cwsr_init(struct kfd_dev *kfd) 452373d7080SFelix Kuehling { 453f0dc99a6SGraham Sider if (cwsr_enable && kfd->device_info.supports_cwsr) { 454046e674bSGraham Sider if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) { 455373d7080SFelix Kuehling BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); 456373d7080SFelix Kuehling kfd->cwsr_isa = cwsr_trap_gfx8_hex; 457373d7080SFelix Kuehling kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); 458046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) { 4593baa24f0SOak Zeng BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE); 4603baa24f0SOak Zeng kfd->cwsr_isa = cwsr_trap_arcturus_hex; 4613baa24f0SOak Zeng kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex); 462046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) { 4630ef6845cSJay Cornwall BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE); 4640ef6845cSJay Cornwall kfd->cwsr_isa = cwsr_trap_aldebaran_hex; 4650ef6845cSJay Cornwall kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex); 466828d9a87SHawking Zhang } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) { 4671d44ff3dSJay Cornwall BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex) > PAGE_SIZE); 4681d44ff3dSJay Cornwall kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex; 4691d44ff3dSJay Cornwall kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex); 470046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) { 4713e76c239SFelix Kuehling BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); 4723e76c239SFelix Kuehling kfd->cwsr_isa = cwsr_trap_gfx9_hex; 4733e76c239SFelix Kuehling kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); 474046e674bSGraham Sider } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) { 47580b6cfedSJay Cornwall BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE); 47680b6cfedSJay Cornwall kfd->cwsr_isa = cwsr_trap_nv1x_hex; 47780b6cfedSJay Cornwall kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex); 4786a817038SJay Cornwall } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) { 47914328aa5SPhilip Cox BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE); 48014328aa5SPhilip Cox kfd->cwsr_isa = cwsr_trap_gfx10_hex; 48114328aa5SPhilip Cox kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex); 4826a817038SJay Cornwall } else { 4836a817038SJay Cornwall BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE); 4846a817038SJay Cornwall kfd->cwsr_isa = cwsr_trap_gfx11_hex; 4856a817038SJay Cornwall kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex); 4863e76c239SFelix Kuehling } 4873e76c239SFelix Kuehling 488373d7080SFelix Kuehling kfd->cwsr_enabled = true; 489373d7080SFelix Kuehling } 490373d7080SFelix Kuehling } 491373d7080SFelix Kuehling 49229633d0eSJoseph Greathouse static int kfd_gws_init(struct kfd_dev *kfd) 49329633d0eSJoseph Greathouse { 49429633d0eSJoseph Greathouse int ret = 0; 49529633d0eSJoseph Greathouse 49629633d0eSJoseph Greathouse if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) 49729633d0eSJoseph Greathouse return 0; 49829633d0eSJoseph Greathouse 499046e674bSGraham Sider if (hws_gws_support || (KFD_IS_SOC15(kfd) && 500046e674bSGraham Sider ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) 501046e674bSGraham Sider && kfd->mec2_fw_version >= 0x81b3) || 502046e674bSGraham Sider (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) 503046e674bSGraham Sider && kfd->mec2_fw_version >= 0x1b3) || 504046e674bSGraham Sider (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) 505046e674bSGraham Sider && kfd->mec2_fw_version >= 0x30) || 506046e674bSGraham Sider (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) 507beb15bc1SJonathan Kim && kfd->mec2_fw_version >= 0x28) || 508beb15bc1SJonathan Kim (KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0) 509beb15bc1SJonathan Kim && KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0) 510beb15bc1SJonathan Kim && kfd->mec2_fw_version >= 0x6b)))) 5116bfc7c7eSGraham Sider ret = amdgpu_amdkfd_alloc_gws(kfd->adev, 51202274fc0SGraham Sider kfd->adev->gds.gws_size, &kfd->gws); 51329633d0eSJoseph Greathouse 51429633d0eSJoseph Greathouse return ret; 51529633d0eSJoseph Greathouse } 51629633d0eSJoseph Greathouse 5172243f493SRajneesh Bhardwaj static void kfd_smi_init(struct kfd_dev *dev) 5182243f493SRajneesh Bhardwaj { 519938a0650SAmber Lin INIT_LIST_HEAD(&dev->smi_clients); 520938a0650SAmber Lin spin_lock_init(&dev->smi_lock); 521938a0650SAmber Lin } 522938a0650SAmber Lin 5234a488a7aSOded Gabbay bool kgd2kfd_device_init(struct kfd_dev *kfd, 5244a488a7aSOded Gabbay const struct kgd2kfd_shared_resources *gpu_resources) 5254a488a7aSOded Gabbay { 526fd6a440eSJonathan Kim unsigned int size, map_process_packet_size; 52719f6d2a6SOded Gabbay 528574c4183SGraham Sider kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 5295ade6c9cSFelix Kuehling KGD_ENGINE_MEC1); 530574c4183SGraham Sider kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 53129633d0eSJoseph Greathouse KGD_ENGINE_MEC2); 532574c4183SGraham Sider kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, 5335ade6c9cSFelix Kuehling KGD_ENGINE_SDMA1); 5344a488a7aSOded Gabbay kfd->shared_resources = *gpu_resources; 5354a488a7aSOded Gabbay 53644008d7aSYong Zhao kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; 53744008d7aSYong Zhao kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; 53844008d7aSYong Zhao kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd 53944008d7aSYong Zhao - kfd->vm_info.first_vmid_kfd + 1; 54044008d7aSYong Zhao 541e312af6cSFelix Kuehling /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. 542e312af6cSFelix Kuehling * 32 and 64-bit requests are possible and must be 543e312af6cSFelix Kuehling * supported. 544e312af6cSFelix Kuehling */ 5456bfc7c7eSGraham Sider kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev); 546e312af6cSFelix Kuehling if (!kfd->pci_atomic_requested && 547f0dc99a6SGraham Sider kfd->device_info.needs_pci_atomics && 548f0dc99a6SGraham Sider (!kfd->device_info.no_atomic_fw_version || 549f0dc99a6SGraham Sider kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) { 550e312af6cSFelix Kuehling dev_info(kfd_device, 551e312af6cSFelix Kuehling "skipped device %x:%x, PCI rejects atomics %d<%d\n", 552d69a3b76SMukul Joshi kfd->adev->pdev->vendor, kfd->adev->pdev->device, 553e312af6cSFelix Kuehling kfd->mec_fw_version, 554f0dc99a6SGraham Sider kfd->device_info.no_atomic_fw_version); 555e312af6cSFelix Kuehling return false; 556e312af6cSFelix Kuehling } 557e312af6cSFelix Kuehling 558a99c6d4fSFelix Kuehling /* Verify module parameters regarding mapped process number*/ 559b7dfbd2eSTushar Patel if (hws_max_conc_proc >= 0) 560b7dfbd2eSTushar Patel kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); 561b7dfbd2eSTushar Patel else 562a99c6d4fSFelix Kuehling kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; 563a99c6d4fSFelix Kuehling 56419f6d2a6SOded Gabbay /* calculate max size of mqds needed for queues */ 565b8cbab04SOded Gabbay size = max_num_of_queues_per_device * 566f0dc99a6SGraham Sider kfd->device_info.mqd_size_aligned; 56719f6d2a6SOded Gabbay 568e18e794eSOded Gabbay /* 569e18e794eSOded Gabbay * calculate max size of runlist packet. 570e18e794eSOded Gabbay * There can be only 2 packets at once 571e18e794eSOded Gabbay */ 572046e674bSGraham Sider map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ? 573fd6a440eSJonathan Kim sizeof(struct pm4_mes_map_process_aldebaran) : 574fd6a440eSJonathan Kim sizeof(struct pm4_mes_map_process); 575fd6a440eSJonathan Kim size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size + 576507968ddSFelix Kuehling max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) 577507968ddSFelix Kuehling + sizeof(struct pm4_mes_runlist)) * 2; 578e18e794eSOded Gabbay 579e18e794eSOded Gabbay /* Add size of HIQ & DIQ */ 580e18e794eSOded Gabbay size += KFD_KERNEL_QUEUE_SIZE * 2; 581e18e794eSOded Gabbay 582e18e794eSOded Gabbay /* add another 512KB for all other allocations on gart (HPD, fences) */ 58319f6d2a6SOded Gabbay size += 512 * 1024; 58419f6d2a6SOded Gabbay 5857cd52c91SAmber Lin if (amdgpu_amdkfd_alloc_gtt_mem( 5866bfc7c7eSGraham Sider kfd->adev, size, &kfd->gtt_mem, 58715426dbbSYong Zhao &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, 58815426dbbSYong Zhao false)) { 58979775b62SKent Russell dev_err(kfd_device, "Could not allocate %d bytes\n", size); 590e09d4fc8SOak Zeng goto alloc_gtt_mem_failure; 59119f6d2a6SOded Gabbay } 59219f6d2a6SOded Gabbay 59379775b62SKent Russell dev_info(kfd_device, "Allocated %d bytes on gart\n", size); 594e18e794eSOded Gabbay 59573a1da0bSOded Gabbay /* Initialize GTT sa with 512 byte chunk size */ 59673a1da0bSOded Gabbay if (kfd_gtt_sa_init(kfd, size, 512) != 0) { 59779775b62SKent Russell dev_err(kfd_device, "Error initializing gtt sub-allocator\n"); 59873a1da0bSOded Gabbay goto kfd_gtt_sa_init_error; 59973a1da0bSOded Gabbay } 60073a1da0bSOded Gabbay 601735df2baSFelix Kuehling if (kfd_doorbell_init(kfd)) { 602735df2baSFelix Kuehling dev_err(kfd_device, 603735df2baSFelix Kuehling "Error initializing doorbell aperture\n"); 604735df2baSFelix Kuehling goto kfd_doorbell_error; 605735df2baSFelix Kuehling } 60619f6d2a6SOded Gabbay 607c5650327SDivya Shikre if (amdgpu_use_xgmi_p2p) 60802274fc0SGraham Sider kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; 6090c1690e3SShaoyun Liu 61002274fc0SGraham Sider kfd->noretry = kfd->adev->gmc.noretry; 6119b498efaSAlex Deucher 6122249d558SAndrew Lewycky if (kfd_interrupt_init(kfd)) { 61379775b62SKent Russell dev_err(kfd_device, "Error initializing interrupts\n"); 6142249d558SAndrew Lewycky goto kfd_interrupt_error; 6152249d558SAndrew Lewycky } 6162249d558SAndrew Lewycky 61764c7f8cfSBen Goz kfd->dqm = device_queue_manager_init(kfd); 61864c7f8cfSBen Goz if (!kfd->dqm) { 61979775b62SKent Russell dev_err(kfd_device, "Error initializing queue manager\n"); 62064c7f8cfSBen Goz goto device_queue_manager_error; 62164c7f8cfSBen Goz } 62264c7f8cfSBen Goz 62329633d0eSJoseph Greathouse /* If supported on this device, allocate global GWS that is shared 62429633d0eSJoseph Greathouse * by all KFD processes 62529633d0eSJoseph Greathouse */ 62629633d0eSJoseph Greathouse if (kfd_gws_init(kfd)) { 62729633d0eSJoseph Greathouse dev_err(kfd_device, "Could not allocate %d gws\n", 62802274fc0SGraham Sider kfd->adev->gds.gws_size); 62929633d0eSJoseph Greathouse goto gws_error; 63029633d0eSJoseph Greathouse } 63129633d0eSJoseph Greathouse 6326127896fSHuang Rui /* If CRAT is broken, won't set iommu enabled */ 6336127896fSHuang Rui kfd_double_confirm_iommu_support(kfd); 6346127896fSHuang Rui 63564d1c3a4SFelix Kuehling if (kfd_iommu_device_init(kfd)) { 6366f4b590aSYifan Zhang kfd->use_iommu_v2 = false; 63764d1c3a4SFelix Kuehling dev_err(kfd_device, "Error initializing iommuv2\n"); 63864d1c3a4SFelix Kuehling goto device_iommu_error; 63964c7f8cfSBen Goz } 64064c7f8cfSBen Goz 641373d7080SFelix Kuehling kfd_cwsr_init(kfd); 642373d7080SFelix Kuehling 64356c5977eSGraham Sider svm_migrate_init(kfd->adev); 644814ab993SPhilip Yang 6457ee938acSFelix Kuehling if (kfd_resume_iommu(kfd)) 646afd18180SYifan Zhang goto device_iommu_error; 647afd18180SYifan Zhang 648b8935a7cSYong Zhao if (kfd_resume(kfd)) 649b8935a7cSYong Zhao goto kfd_resume_error; 650b8935a7cSYong Zhao 651b179fc28SMukul Joshi amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); 652b179fc28SMukul Joshi 653465ab9e0SOak Zeng if (kfd_topology_add_device(kfd)) { 654465ab9e0SOak Zeng dev_err(kfd_device, "Error adding device to topology\n"); 655465ab9e0SOak Zeng goto kfd_topology_add_device_error; 656465ab9e0SOak Zeng } 657465ab9e0SOak Zeng 658938a0650SAmber Lin kfd_smi_init(kfd); 659938a0650SAmber Lin 6604a488a7aSOded Gabbay kfd->init_complete = true; 661d69a3b76SMukul Joshi dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, 662d69a3b76SMukul Joshi kfd->adev->pdev->device); 6634a488a7aSOded Gabbay 66479775b62SKent Russell pr_debug("Starting kfd with the following scheduling policy %d\n", 665d146c5a7SFelix Kuehling kfd->dqm->sched_policy); 66664c7f8cfSBen Goz 66719f6d2a6SOded Gabbay goto out; 66819f6d2a6SOded Gabbay 669465ab9e0SOak Zeng kfd_topology_add_device_error: 670b8935a7cSYong Zhao kfd_resume_error: 67164d1c3a4SFelix Kuehling device_iommu_error: 67229633d0eSJoseph Greathouse gws_error: 67364c7f8cfSBen Goz device_queue_manager_uninit(kfd->dqm); 67464c7f8cfSBen Goz device_queue_manager_error: 6752249d558SAndrew Lewycky kfd_interrupt_exit(kfd); 6762249d558SAndrew Lewycky kfd_interrupt_error: 677735df2baSFelix Kuehling kfd_doorbell_fini(kfd); 678735df2baSFelix Kuehling kfd_doorbell_error: 67973a1da0bSOded Gabbay kfd_gtt_sa_fini(kfd); 68073a1da0bSOded Gabbay kfd_gtt_sa_init_error: 6816bfc7c7eSGraham Sider amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); 682e09d4fc8SOak Zeng alloc_gtt_mem_failure: 68329633d0eSJoseph Greathouse if (kfd->gws) 6846bfc7c7eSGraham Sider amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); 68519f6d2a6SOded Gabbay dev_err(kfd_device, 68679775b62SKent Russell "device %x:%x NOT added due to errors\n", 687d69a3b76SMukul Joshi kfd->adev->pdev->vendor, kfd->adev->pdev->device); 68819f6d2a6SOded Gabbay out: 68919f6d2a6SOded Gabbay return kfd->init_complete; 6904a488a7aSOded Gabbay } 6914a488a7aSOded Gabbay 6924a488a7aSOded Gabbay void kgd2kfd_device_exit(struct kfd_dev *kfd) 6934a488a7aSOded Gabbay { 694b17f068aSOded Gabbay if (kfd->init_complete) { 69564c7f8cfSBen Goz device_queue_manager_uninit(kfd->dqm); 6962249d558SAndrew Lewycky kfd_interrupt_exit(kfd); 69719f6d2a6SOded Gabbay kfd_topology_remove_device(kfd); 698735df2baSFelix Kuehling kfd_doorbell_fini(kfd); 69959d7115dSMukul Joshi ida_destroy(&kfd->doorbell_ida); 70073a1da0bSOded Gabbay kfd_gtt_sa_fini(kfd); 7016bfc7c7eSGraham Sider amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); 70229633d0eSJoseph Greathouse if (kfd->gws) 7036bfc7c7eSGraham Sider amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); 704b17f068aSOded Gabbay } 7055b5c4e40SEvgeny Pinchuk 7064a488a7aSOded Gabbay kfree(kfd); 7074a488a7aSOded Gabbay } 7084a488a7aSOded Gabbay 709e3b7a967SShaoyun Liu int kgd2kfd_pre_reset(struct kfd_dev *kfd) 710e3b7a967SShaoyun Liu { 711e42051d2SShaoyun Liu if (!kfd->init_complete) 712e42051d2SShaoyun Liu return 0; 71309c34e8dSFelix Kuehling 71455977744SMukul Joshi kfd_smi_event_update_gpu_reset(kfd, false); 71555977744SMukul Joshi 71609c34e8dSFelix Kuehling kfd->dqm->ops.pre_reset(kfd->dqm); 71709c34e8dSFelix Kuehling 7189593f4d6SRajneesh Bhardwaj kgd2kfd_suspend(kfd, false); 719e42051d2SShaoyun Liu 720e42051d2SShaoyun Liu kfd_signal_reset_event(kfd); 721e3b7a967SShaoyun Liu return 0; 722e3b7a967SShaoyun Liu } 723e3b7a967SShaoyun Liu 724e42051d2SShaoyun Liu /* 725e42051d2SShaoyun Liu * Fix me. KFD won't be able to resume existing process for now. 726e42051d2SShaoyun Liu * We will keep all existing process in a evicted state and 727e42051d2SShaoyun Liu * wait the process to be terminated. 728e42051d2SShaoyun Liu */ 729e42051d2SShaoyun Liu 730e3b7a967SShaoyun Liu int kgd2kfd_post_reset(struct kfd_dev *kfd) 731e3b7a967SShaoyun Liu { 732a1bd079fSyu kuai int ret; 733e42051d2SShaoyun Liu 734e42051d2SShaoyun Liu if (!kfd->init_complete) 735e3b7a967SShaoyun Liu return 0; 736e42051d2SShaoyun Liu 737e42051d2SShaoyun Liu ret = kfd_resume(kfd); 738e42051d2SShaoyun Liu if (ret) 739e42051d2SShaoyun Liu return ret; 740a1bd079fSyu kuai atomic_dec(&kfd_locked); 7419b54d201SEric Huang 7429b54d201SEric Huang atomic_set(&kfd->sram_ecc_flag, 0); 7439b54d201SEric Huang 74455977744SMukul Joshi kfd_smi_event_update_gpu_reset(kfd, true); 74555977744SMukul Joshi 746e42051d2SShaoyun Liu return 0; 747e42051d2SShaoyun Liu } 748e42051d2SShaoyun Liu 749e42051d2SShaoyun Liu bool kfd_is_locked(void) 750e42051d2SShaoyun Liu { 751e42051d2SShaoyun Liu return (atomic_read(&kfd_locked) > 0); 752e3b7a967SShaoyun Liu } 753e3b7a967SShaoyun Liu 7549593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 7554a488a7aSOded Gabbay { 756733fa1f7SYong Zhao if (!kfd->init_complete) 757733fa1f7SYong Zhao return; 758733fa1f7SYong Zhao 7599593f4d6SRajneesh Bhardwaj /* for runtime suspend, skip locking kfd */ 7609593f4d6SRajneesh Bhardwaj if (!run_pm) { 76126103436SFelix Kuehling /* For first KFD device suspend all the KFD processes */ 762e42051d2SShaoyun Liu if (atomic_inc_return(&kfd_locked) == 1) 76326103436SFelix Kuehling kfd_suspend_all_processes(); 7649593f4d6SRajneesh Bhardwaj } 76526103436SFelix Kuehling 76645c9a5e4SOded Gabbay kfd->dqm->ops.stop(kfd->dqm); 76764d1c3a4SFelix Kuehling kfd_iommu_suspend(kfd); 7684a488a7aSOded Gabbay } 7694a488a7aSOded Gabbay 7709593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) 7714a488a7aSOded Gabbay { 77226103436SFelix Kuehling int ret, count; 77326103436SFelix Kuehling 774b8935a7cSYong Zhao if (!kfd->init_complete) 775b8935a7cSYong Zhao return 0; 776b17f068aSOded Gabbay 77726103436SFelix Kuehling ret = kfd_resume(kfd); 77826103436SFelix Kuehling if (ret) 77926103436SFelix Kuehling return ret; 780b17f068aSOded Gabbay 7819593f4d6SRajneesh Bhardwaj /* for runtime resume, skip unlocking kfd */ 7829593f4d6SRajneesh Bhardwaj if (!run_pm) { 783e42051d2SShaoyun Liu count = atomic_dec_return(&kfd_locked); 78426103436SFelix Kuehling WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); 78526103436SFelix Kuehling if (count == 0) 78626103436SFelix Kuehling ret = kfd_resume_all_processes(); 7879593f4d6SRajneesh Bhardwaj } 78826103436SFelix Kuehling 78926103436SFelix Kuehling return ret; 7904ebc7182SYong Zhao } 7914ebc7182SYong Zhao 792f8846323SJames Zhu int kgd2kfd_resume_iommu(struct kfd_dev *kfd) 793b8935a7cSYong Zhao { 7947ee938acSFelix Kuehling if (!kfd->init_complete) 7957ee938acSFelix Kuehling return 0; 7967ee938acSFelix Kuehling 7977ee938acSFelix Kuehling return kfd_resume_iommu(kfd); 7987ee938acSFelix Kuehling } 7997ee938acSFelix Kuehling 8007ee938acSFelix Kuehling static int kfd_resume_iommu(struct kfd_dev *kfd) 8017ee938acSFelix Kuehling { 802b8935a7cSYong Zhao int err = 0; 803b8935a7cSYong Zhao 80464d1c3a4SFelix Kuehling err = kfd_iommu_resume(kfd); 805f8846323SJames Zhu if (err) 80664d1c3a4SFelix Kuehling dev_err(kfd_device, 80764d1c3a4SFelix Kuehling "Failed to resume IOMMU for device %x:%x\n", 808d69a3b76SMukul Joshi kfd->adev->pdev->vendor, kfd->adev->pdev->device); 80964d1c3a4SFelix Kuehling return err; 81064d1c3a4SFelix Kuehling } 811733fa1f7SYong Zhao 812f8846323SJames Zhu static int kfd_resume(struct kfd_dev *kfd) 813f8846323SJames Zhu { 814f8846323SJames Zhu int err = 0; 815f8846323SJames Zhu 816b8935a7cSYong Zhao err = kfd->dqm->ops.start(kfd->dqm); 817499f4d38SYifan Zhang if (err) 818b8935a7cSYong Zhao dev_err(kfd_device, 819b8935a7cSYong Zhao "Error starting queue manager for device %x:%x\n", 820d69a3b76SMukul Joshi kfd->adev->pdev->vendor, kfd->adev->pdev->device); 821b17f068aSOded Gabbay 822b8935a7cSYong Zhao return err; 8234a488a7aSOded Gabbay } 8244a488a7aSOded Gabbay 825b3eca59dSPhilip Yang static inline void kfd_queue_work(struct workqueue_struct *wq, 826b3eca59dSPhilip Yang struct work_struct *work) 827b3eca59dSPhilip Yang { 828b3eca59dSPhilip Yang int cpu, new_cpu; 829b3eca59dSPhilip Yang 830b3eca59dSPhilip Yang cpu = new_cpu = smp_processor_id(); 831b3eca59dSPhilip Yang do { 832b3eca59dSPhilip Yang new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; 833b3eca59dSPhilip Yang if (cpu_to_node(new_cpu) == numa_node_id()) 834b3eca59dSPhilip Yang break; 835b3eca59dSPhilip Yang } while (cpu != new_cpu); 836b3eca59dSPhilip Yang 837b3eca59dSPhilip Yang queue_work_on(new_cpu, wq, work); 838b3eca59dSPhilip Yang } 839b3eca59dSPhilip Yang 840b3f5e6b4SAndrew Lewycky /* This is called directly from KGD at ISR. */ 841b3f5e6b4SAndrew Lewycky void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 8424a488a7aSOded Gabbay { 84358e69886SLan Xiao uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; 84458e69886SLan Xiao bool is_patched = false; 8452383a767SChristian König unsigned long flags; 84658e69886SLan Xiao 8472249d558SAndrew Lewycky if (!kfd->init_complete) 8482249d558SAndrew Lewycky return; 8492249d558SAndrew Lewycky 850f0dc99a6SGraham Sider if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) { 85158e69886SLan Xiao dev_err_once(kfd_device, "Ring entry too small\n"); 85258e69886SLan Xiao return; 85358e69886SLan Xiao } 85458e69886SLan Xiao 8552383a767SChristian König spin_lock_irqsave(&kfd->interrupt_lock, flags); 8562249d558SAndrew Lewycky 8572249d558SAndrew Lewycky if (kfd->interrupts_active 85858e69886SLan Xiao && interrupt_is_wanted(kfd, ih_ring_entry, 85958e69886SLan Xiao patched_ihre, &is_patched) 86058e69886SLan Xiao && enqueue_ih_ring_entry(kfd, 86158e69886SLan Xiao is_patched ? patched_ihre : ih_ring_entry)) 862b3eca59dSPhilip Yang kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work); 8632249d558SAndrew Lewycky 8642383a767SChristian König spin_unlock_irqrestore(&kfd->interrupt_lock, flags); 8654a488a7aSOded Gabbay } 8666e81090bSOded Gabbay 867c7f21978SPhilip Yang int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger) 8686b95e797SFelix Kuehling { 8696b95e797SFelix Kuehling struct kfd_process *p; 8706b95e797SFelix Kuehling int r; 8716b95e797SFelix Kuehling 8726b95e797SFelix Kuehling /* Because we are called from arbitrary context (workqueue) as opposed 8736b95e797SFelix Kuehling * to process context, kfd_process could attempt to exit while we are 8746b95e797SFelix Kuehling * running so the lookup function increments the process ref count. 8756b95e797SFelix Kuehling */ 8766b95e797SFelix Kuehling p = kfd_lookup_process_by_mm(mm); 8776b95e797SFelix Kuehling if (!p) 8786b95e797SFelix Kuehling return -ESRCH; 8796b95e797SFelix Kuehling 880b2057956SFelix Kuehling WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); 881c7f21978SPhilip Yang r = kfd_process_evict_queues(p, trigger); 8826b95e797SFelix Kuehling 8836b95e797SFelix Kuehling kfd_unref_process(p); 8846b95e797SFelix Kuehling return r; 8856b95e797SFelix Kuehling } 8866b95e797SFelix Kuehling 8876b95e797SFelix Kuehling int kgd2kfd_resume_mm(struct mm_struct *mm) 8886b95e797SFelix Kuehling { 8896b95e797SFelix Kuehling struct kfd_process *p; 8906b95e797SFelix Kuehling int r; 8916b95e797SFelix Kuehling 8926b95e797SFelix Kuehling /* Because we are called from arbitrary context (workqueue) as opposed 8936b95e797SFelix Kuehling * to process context, kfd_process could attempt to exit while we are 8946b95e797SFelix Kuehling * running so the lookup function increments the process ref count. 8956b95e797SFelix Kuehling */ 8966b95e797SFelix Kuehling p = kfd_lookup_process_by_mm(mm); 8976b95e797SFelix Kuehling if (!p) 8986b95e797SFelix Kuehling return -ESRCH; 8996b95e797SFelix Kuehling 9006b95e797SFelix Kuehling r = kfd_process_restore_queues(p); 9016b95e797SFelix Kuehling 9026b95e797SFelix Kuehling kfd_unref_process(p); 9036b95e797SFelix Kuehling return r; 9046b95e797SFelix Kuehling } 9056b95e797SFelix Kuehling 90626103436SFelix Kuehling /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will 90726103436SFelix Kuehling * prepare for safe eviction of KFD BOs that belong to the specified 90826103436SFelix Kuehling * process. 90926103436SFelix Kuehling * 91026103436SFelix Kuehling * @mm: mm_struct that identifies the specified KFD process 91126103436SFelix Kuehling * @fence: eviction fence attached to KFD process BOs 91226103436SFelix Kuehling * 91326103436SFelix Kuehling */ 91426103436SFelix Kuehling int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 91526103436SFelix Kuehling struct dma_fence *fence) 91626103436SFelix Kuehling { 91726103436SFelix Kuehling struct kfd_process *p; 91826103436SFelix Kuehling unsigned long active_time; 91926103436SFelix Kuehling unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS); 92026103436SFelix Kuehling 92126103436SFelix Kuehling if (!fence) 92226103436SFelix Kuehling return -EINVAL; 92326103436SFelix Kuehling 92426103436SFelix Kuehling if (dma_fence_is_signaled(fence)) 92526103436SFelix Kuehling return 0; 92626103436SFelix Kuehling 92726103436SFelix Kuehling p = kfd_lookup_process_by_mm(mm); 92826103436SFelix Kuehling if (!p) 92926103436SFelix Kuehling return -ENODEV; 93026103436SFelix Kuehling 93126103436SFelix Kuehling if (fence->seqno == p->last_eviction_seqno) 93226103436SFelix Kuehling goto out; 93326103436SFelix Kuehling 93426103436SFelix Kuehling p->last_eviction_seqno = fence->seqno; 93526103436SFelix Kuehling 93626103436SFelix Kuehling /* Avoid KFD process starvation. Wait for at least 93726103436SFelix Kuehling * PROCESS_ACTIVE_TIME_MS before evicting the process again 93826103436SFelix Kuehling */ 93926103436SFelix Kuehling active_time = get_jiffies_64() - p->last_restore_timestamp; 94026103436SFelix Kuehling if (delay_jiffies > active_time) 94126103436SFelix Kuehling delay_jiffies -= active_time; 94226103436SFelix Kuehling else 94326103436SFelix Kuehling delay_jiffies = 0; 94426103436SFelix Kuehling 94526103436SFelix Kuehling /* During process initialization eviction_work.dwork is initialized 94626103436SFelix Kuehling * to kfd_evict_bo_worker 94726103436SFelix Kuehling */ 948b2057956SFelix Kuehling WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies", 949b2057956SFelix Kuehling p->lead_thread->pid, delay_jiffies); 95026103436SFelix Kuehling schedule_delayed_work(&p->eviction_work, delay_jiffies); 95126103436SFelix Kuehling out: 95226103436SFelix Kuehling kfd_unref_process(p); 95326103436SFelix Kuehling return 0; 95426103436SFelix Kuehling } 95526103436SFelix Kuehling 9566e81090bSOded Gabbay static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 9576e81090bSOded Gabbay unsigned int chunk_size) 9586e81090bSOded Gabbay { 95932fa8219SFelix Kuehling if (WARN_ON(buf_size < chunk_size)) 96032fa8219SFelix Kuehling return -EINVAL; 96132fa8219SFelix Kuehling if (WARN_ON(buf_size == 0)) 96232fa8219SFelix Kuehling return -EINVAL; 96332fa8219SFelix Kuehling if (WARN_ON(chunk_size == 0)) 96432fa8219SFelix Kuehling return -EINVAL; 9656e81090bSOded Gabbay 9666e81090bSOded Gabbay kfd->gtt_sa_chunk_size = chunk_size; 9676e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; 9686e81090bSOded Gabbay 969f43a9f18SChristophe JAILLET kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks, 970f43a9f18SChristophe JAILLET GFP_KERNEL); 9716e81090bSOded Gabbay if (!kfd->gtt_sa_bitmap) 9726e81090bSOded Gabbay return -ENOMEM; 9736e81090bSOded Gabbay 97479775b62SKent Russell pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", 9756e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); 9766e81090bSOded Gabbay 9776e81090bSOded Gabbay mutex_init(&kfd->gtt_sa_lock); 9786e81090bSOded Gabbay 9796e81090bSOded Gabbay return 0; 9806e81090bSOded Gabbay } 9816e81090bSOded Gabbay 9826e81090bSOded Gabbay static void kfd_gtt_sa_fini(struct kfd_dev *kfd) 9836e81090bSOded Gabbay { 9846e81090bSOded Gabbay mutex_destroy(&kfd->gtt_sa_lock); 985f43a9f18SChristophe JAILLET bitmap_free(kfd->gtt_sa_bitmap); 9866e81090bSOded Gabbay } 9876e81090bSOded Gabbay 9886e81090bSOded Gabbay static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr, 9896e81090bSOded Gabbay unsigned int bit_num, 9906e81090bSOded Gabbay unsigned int chunk_size) 9916e81090bSOded Gabbay { 9926e81090bSOded Gabbay return start_addr + bit_num * chunk_size; 9936e81090bSOded Gabbay } 9946e81090bSOded Gabbay 9956e81090bSOded Gabbay static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, 9966e81090bSOded Gabbay unsigned int bit_num, 9976e81090bSOded Gabbay unsigned int chunk_size) 9986e81090bSOded Gabbay { 9996e81090bSOded Gabbay return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); 10006e81090bSOded Gabbay } 10016e81090bSOded Gabbay 10026e81090bSOded Gabbay int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 10036e81090bSOded Gabbay struct kfd_mem_obj **mem_obj) 10046e81090bSOded Gabbay { 10056e81090bSOded Gabbay unsigned int found, start_search, cur_size; 10066e81090bSOded Gabbay 10076e81090bSOded Gabbay if (size == 0) 10086e81090bSOded Gabbay return -EINVAL; 10096e81090bSOded Gabbay 10106e81090bSOded Gabbay if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) 10116e81090bSOded Gabbay return -ENOMEM; 10126e81090bSOded Gabbay 10131cd106ecSFelix Kuehling *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); 10141cd106ecSFelix Kuehling if (!(*mem_obj)) 10156e81090bSOded Gabbay return -ENOMEM; 10166e81090bSOded Gabbay 101779775b62SKent Russell pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size); 10186e81090bSOded Gabbay 10196e81090bSOded Gabbay start_search = 0; 10206e81090bSOded Gabbay 10216e81090bSOded Gabbay mutex_lock(&kfd->gtt_sa_lock); 10226e81090bSOded Gabbay 10236e81090bSOded Gabbay kfd_gtt_restart_search: 10246e81090bSOded Gabbay /* Find the first chunk that is free */ 10256e81090bSOded Gabbay found = find_next_zero_bit(kfd->gtt_sa_bitmap, 10266e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks, 10276e81090bSOded Gabbay start_search); 10286e81090bSOded Gabbay 102979775b62SKent Russell pr_debug("Found = %d\n", found); 10306e81090bSOded Gabbay 10316e81090bSOded Gabbay /* If there wasn't any free chunk, bail out */ 10326e81090bSOded Gabbay if (found == kfd->gtt_sa_num_of_chunks) 10336e81090bSOded Gabbay goto kfd_gtt_no_free_chunk; 10346e81090bSOded Gabbay 10356e81090bSOded Gabbay /* Update fields of mem_obj */ 10366e81090bSOded Gabbay (*mem_obj)->range_start = found; 10376e81090bSOded Gabbay (*mem_obj)->range_end = found; 10386e81090bSOded Gabbay (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr( 10396e81090bSOded Gabbay kfd->gtt_start_gpu_addr, 10406e81090bSOded Gabbay found, 10416e81090bSOded Gabbay kfd->gtt_sa_chunk_size); 10426e81090bSOded Gabbay (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr( 10436e81090bSOded Gabbay kfd->gtt_start_cpu_ptr, 10446e81090bSOded Gabbay found, 10456e81090bSOded Gabbay kfd->gtt_sa_chunk_size); 10466e81090bSOded Gabbay 104779775b62SKent Russell pr_debug("gpu_addr = %p, cpu_addr = %p\n", 10486e81090bSOded Gabbay (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); 10496e81090bSOded Gabbay 10506e81090bSOded Gabbay /* If we need only one chunk, mark it as allocated and get out */ 10516e81090bSOded Gabbay if (size <= kfd->gtt_sa_chunk_size) { 105279775b62SKent Russell pr_debug("Single bit\n"); 1053b8b9ba58SChristophe JAILLET __set_bit(found, kfd->gtt_sa_bitmap); 10546e81090bSOded Gabbay goto kfd_gtt_out; 10556e81090bSOded Gabbay } 10566e81090bSOded Gabbay 10576e81090bSOded Gabbay /* Otherwise, try to see if we have enough contiguous chunks */ 10586e81090bSOded Gabbay cur_size = size - kfd->gtt_sa_chunk_size; 10596e81090bSOded Gabbay do { 10606e81090bSOded Gabbay (*mem_obj)->range_end = 10616e81090bSOded Gabbay find_next_zero_bit(kfd->gtt_sa_bitmap, 10626e81090bSOded Gabbay kfd->gtt_sa_num_of_chunks, ++found); 10636e81090bSOded Gabbay /* 10646e81090bSOded Gabbay * If next free chunk is not contiguous than we need to 10656e81090bSOded Gabbay * restart our search from the last free chunk we found (which 10666e81090bSOded Gabbay * wasn't contiguous to the previous ones 10676e81090bSOded Gabbay */ 10686e81090bSOded Gabbay if ((*mem_obj)->range_end != found) { 10696e81090bSOded Gabbay start_search = found; 10706e81090bSOded Gabbay goto kfd_gtt_restart_search; 10716e81090bSOded Gabbay } 10726e81090bSOded Gabbay 10736e81090bSOded Gabbay /* 10746e81090bSOded Gabbay * If we reached end of buffer, bail out with error 10756e81090bSOded Gabbay */ 10766e81090bSOded Gabbay if (found == kfd->gtt_sa_num_of_chunks) 10776e81090bSOded Gabbay goto kfd_gtt_no_free_chunk; 10786e81090bSOded Gabbay 10796e81090bSOded Gabbay /* Check if we don't need another chunk */ 10806e81090bSOded Gabbay if (cur_size <= kfd->gtt_sa_chunk_size) 10816e81090bSOded Gabbay cur_size = 0; 10826e81090bSOded Gabbay else 10836e81090bSOded Gabbay cur_size -= kfd->gtt_sa_chunk_size; 10846e81090bSOded Gabbay 10856e81090bSOded Gabbay } while (cur_size > 0); 10866e81090bSOded Gabbay 108779775b62SKent Russell pr_debug("range_start = %d, range_end = %d\n", 10886e81090bSOded Gabbay (*mem_obj)->range_start, (*mem_obj)->range_end); 10896e81090bSOded Gabbay 10906e81090bSOded Gabbay /* Mark the chunks as allocated */ 1091b8b9ba58SChristophe JAILLET bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start, 1092b8b9ba58SChristophe JAILLET (*mem_obj)->range_end - (*mem_obj)->range_start + 1); 10936e81090bSOded Gabbay 10946e81090bSOded Gabbay kfd_gtt_out: 10956e81090bSOded Gabbay mutex_unlock(&kfd->gtt_sa_lock); 10966e81090bSOded Gabbay return 0; 10976e81090bSOded Gabbay 10986e81090bSOded Gabbay kfd_gtt_no_free_chunk: 10993148a6a0SJack Zhang pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); 11006e81090bSOded Gabbay mutex_unlock(&kfd->gtt_sa_lock); 11013148a6a0SJack Zhang kfree(*mem_obj); 11026e81090bSOded Gabbay return -ENOMEM; 11036e81090bSOded Gabbay } 11046e81090bSOded Gabbay 11056e81090bSOded Gabbay int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) 11066e81090bSOded Gabbay { 11079216ed29SOded Gabbay /* Act like kfree when trying to free a NULL object */ 11089216ed29SOded Gabbay if (!mem_obj) 11099216ed29SOded Gabbay return 0; 11106e81090bSOded Gabbay 111179775b62SKent Russell pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n", 11126e81090bSOded Gabbay mem_obj, mem_obj->range_start, mem_obj->range_end); 11136e81090bSOded Gabbay 11146e81090bSOded Gabbay mutex_lock(&kfd->gtt_sa_lock); 11156e81090bSOded Gabbay 11166e81090bSOded Gabbay /* Mark the chunks as free */ 1117b8b9ba58SChristophe JAILLET bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start, 1118b8b9ba58SChristophe JAILLET mem_obj->range_end - mem_obj->range_start + 1); 11196e81090bSOded Gabbay 11206e81090bSOded Gabbay mutex_unlock(&kfd->gtt_sa_lock); 11216e81090bSOded Gabbay 11226e81090bSOded Gabbay kfree(mem_obj); 11236e81090bSOded Gabbay return 0; 11246e81090bSOded Gabbay } 1125a29ec470SShaoyun Liu 11269b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) 11279b54d201SEric Huang { 11289b54d201SEric Huang if (kfd) 11299b54d201SEric Huang atomic_inc(&kfd->sram_ecc_flag); 11309b54d201SEric Huang } 11319b54d201SEric Huang 113243d8107fSHarish Kasiviswanathan void kfd_inc_compute_active(struct kfd_dev *kfd) 113343d8107fSHarish Kasiviswanathan { 113443d8107fSHarish Kasiviswanathan if (atomic_inc_return(&kfd->compute_profile) == 1) 11356bfc7c7eSGraham Sider amdgpu_amdkfd_set_compute_idle(kfd->adev, false); 113643d8107fSHarish Kasiviswanathan } 113743d8107fSHarish Kasiviswanathan 113843d8107fSHarish Kasiviswanathan void kfd_dec_compute_active(struct kfd_dev *kfd) 113943d8107fSHarish Kasiviswanathan { 114043d8107fSHarish Kasiviswanathan int count = atomic_dec_return(&kfd->compute_profile); 114143d8107fSHarish Kasiviswanathan 114243d8107fSHarish Kasiviswanathan if (count == 0) 11436bfc7c7eSGraham Sider amdgpu_amdkfd_set_compute_idle(kfd->adev, true); 114443d8107fSHarish Kasiviswanathan WARN_ONCE(count < 0, "Compute profile ref. count error"); 114543d8107fSHarish Kasiviswanathan } 114643d8107fSHarish Kasiviswanathan 1147410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) 11482c2b0d88SMukul Joshi { 1149158fc08dSAmber Lin if (kfd && kfd->init_complete) 11502c2b0d88SMukul Joshi kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); 11512c2b0d88SMukul Joshi } 11522c2b0d88SMukul Joshi 1153ee2f17f4SAmber Lin /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and 1154ee2f17f4SAmber Lin * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA. 1155ee2f17f4SAmber Lin * When the device has more than two engines, we reserve two for PCIe to enable 1156ee2f17f4SAmber Lin * full-duplex and the rest are used as XGMI. 1157ee2f17f4SAmber Lin */ 1158ee2f17f4SAmber Lin unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) 1159ee2f17f4SAmber Lin { 1160ee2f17f4SAmber Lin /* If XGMI is not supported, all SDMA engines are PCIe */ 1161ee2f17f4SAmber Lin if (!kdev->adev->gmc.xgmi.supported) 1162ee2f17f4SAmber Lin return kdev->adev->sdma.num_instances; 1163ee2f17f4SAmber Lin 1164ee2f17f4SAmber Lin return min(kdev->adev->sdma.num_instances, 2); 1165ee2f17f4SAmber Lin } 1166ee2f17f4SAmber Lin 1167ee2f17f4SAmber Lin unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) 1168ee2f17f4SAmber Lin { 1169ee2f17f4SAmber Lin /* After reserved for PCIe, the rest of engines are XGMI */ 1170ee2f17f4SAmber Lin return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); 1171ee2f17f4SAmber Lin } 1172ee2f17f4SAmber Lin 1173a29ec470SShaoyun Liu #if defined(CONFIG_DEBUG_FS) 1174a29ec470SShaoyun Liu 1175a29ec470SShaoyun Liu /* This function will send a package to HIQ to hang the HWS 1176a29ec470SShaoyun Liu * which will trigger a GPU reset and bring the HWS back to normal state 1177a29ec470SShaoyun Liu */ 1178a29ec470SShaoyun Liu int kfd_debugfs_hang_hws(struct kfd_dev *dev) 1179a29ec470SShaoyun Liu { 1180a29ec470SShaoyun Liu if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { 1181a29ec470SShaoyun Liu pr_err("HWS is not enabled"); 1182a29ec470SShaoyun Liu return -EINVAL; 1183a29ec470SShaoyun Liu } 1184a29ec470SShaoyun Liu 11854f942aaeSOak Zeng return dqm_debugfs_hang_hws(dev->dqm); 1186a29ec470SShaoyun Liu } 1187a29ec470SShaoyun Liu 1188a29ec470SShaoyun Liu #endif 1189