1d87f36a0SRajneesh Bhardwaj // SPDX-License-Identifier: GPL-2.0 OR MIT
24a488a7aSOded Gabbay /*
3d87f36a0SRajneesh Bhardwaj  * Copyright 2014-2022 Advanced Micro Devices, Inc.
44a488a7aSOded Gabbay  *
54a488a7aSOded Gabbay  * Permission is hereby granted, free of charge, to any person obtaining a
64a488a7aSOded Gabbay  * copy of this software and associated documentation files (the "Software"),
74a488a7aSOded Gabbay  * to deal in the Software without restriction, including without limitation
84a488a7aSOded Gabbay  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
94a488a7aSOded Gabbay  * and/or sell copies of the Software, and to permit persons to whom the
104a488a7aSOded Gabbay  * Software is furnished to do so, subject to the following conditions:
114a488a7aSOded Gabbay  *
124a488a7aSOded Gabbay  * The above copyright notice and this permission notice shall be included in
134a488a7aSOded Gabbay  * all copies or substantial portions of the Software.
144a488a7aSOded Gabbay  *
154a488a7aSOded Gabbay  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
164a488a7aSOded Gabbay  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
174a488a7aSOded Gabbay  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
184a488a7aSOded Gabbay  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
194a488a7aSOded Gabbay  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
204a488a7aSOded Gabbay  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
214a488a7aSOded Gabbay  * OTHER DEALINGS IN THE SOFTWARE.
224a488a7aSOded Gabbay  */
234a488a7aSOded Gabbay 
244a488a7aSOded Gabbay #include <linux/bsearch.h>
254a488a7aSOded Gabbay #include <linux/pci.h>
264a488a7aSOded Gabbay #include <linux/slab.h>
274a488a7aSOded Gabbay #include "kfd_priv.h"
2864c7f8cfSBen Goz #include "kfd_device_queue_manager.h"
29507968ddSFelix Kuehling #include "kfd_pm4_headers_vi.h"
30fd6a440eSJonathan Kim #include "kfd_pm4_headers_aldebaran.h"
310db54b24SYong Zhao #include "cwsr_trap_handler.h"
3264d1c3a4SFelix Kuehling #include "kfd_iommu.h"
335b87245fSAmber Lin #include "amdgpu_amdkfd.h"
342c2b0d88SMukul Joshi #include "kfd_smi_events.h"
35814ab993SPhilip Yang #include "kfd_migrate.h"
365b983db8SAlex Deucher #include "amdgpu.h"
374a488a7aSOded Gabbay 
3819f6d2a6SOded Gabbay #define MQD_SIZE_ALIGNED 768
39e42051d2SShaoyun Liu 
40e42051d2SShaoyun Liu /*
41e42051d2SShaoyun Liu  * kfd_locked is used to lock the kfd driver during suspend or reset
42e42051d2SShaoyun Liu  * once locked, kfd driver will stop any further GPU execution.
43e42051d2SShaoyun Liu  * create process (open) will return -EAGAIN.
44e42051d2SShaoyun Liu  */
45e42051d2SShaoyun Liu static atomic_t kfd_locked = ATOMIC_INIT(0);
4619f6d2a6SOded Gabbay 
47a3e520a2SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK
48e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
49a3e520a2SAlex Deucher #endif
50e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
51e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
52e392c887SYong Zhao extern const struct kfd2kgd_calls arcturus_kfd2kgd;
535073506cSJonathan Kim extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
54f544afacSAmber Lin extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd;
55e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
563a2f0c81SYong Zhao extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
57cc009e61SMukul Joshi extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
58e392c887SYong Zhao 
596e81090bSOded Gabbay static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
606e81090bSOded Gabbay 				unsigned int chunk_size);
616e81090bSOded Gabbay static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
626e81090bSOded Gabbay 
637ee938acSFelix Kuehling static int kfd_resume_iommu(struct kfd_dev *kfd);
64*8dc1db31SMukul Joshi static int kfd_resume(struct kfd_node *kfd);
65b8935a7cSYong Zhao 
66cc009e61SMukul Joshi static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
67f89c6bf7SGuchun Chen {
68f89c6bf7SGuchun Chen 	uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
69f89c6bf7SGuchun Chen 
70f89c6bf7SGuchun Chen 	switch (sdma_version) {
71f89c6bf7SGuchun Chen 	case IP_VERSION(4, 0, 0):/* VEGA10 */
72f89c6bf7SGuchun Chen 	case IP_VERSION(4, 0, 1):/* VEGA12 */
73f89c6bf7SGuchun Chen 	case IP_VERSION(4, 1, 0):/* RAVEN */
74f89c6bf7SGuchun Chen 	case IP_VERSION(4, 1, 1):/* RAVEN */
755eb877b2SKent Russell 	case IP_VERSION(4, 1, 2):/* RENOIR */
76f89c6bf7SGuchun Chen 	case IP_VERSION(5, 2, 1):/* VANGOGH */
77f89c6bf7SGuchun Chen 	case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
787c4f4f19SMario Limonciello 	case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
797c4f4f19SMario Limonciello 	case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
80f89c6bf7SGuchun Chen 		kfd->device_info.num_sdma_queues_per_engine = 2;
81f89c6bf7SGuchun Chen 		break;
82f89c6bf7SGuchun Chen 	case IP_VERSION(4, 2, 0):/* VEGA20 */
835eb877b2SKent Russell 	case IP_VERSION(4, 2, 2):/* ARCTURUS */
84f89c6bf7SGuchun Chen 	case IP_VERSION(4, 4, 0):/* ALDEBARAN */
85f89c6bf7SGuchun Chen 	case IP_VERSION(5, 0, 0):/* NAVI10 */
86f89c6bf7SGuchun Chen 	case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
87f89c6bf7SGuchun Chen 	case IP_VERSION(5, 0, 2):/* NAVI14 */
88f89c6bf7SGuchun Chen 	case IP_VERSION(5, 0, 5):/* NAVI12 */
89f89c6bf7SGuchun Chen 	case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
905eb877b2SKent Russell 	case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
91f89c6bf7SGuchun Chen 	case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
92f89c6bf7SGuchun Chen 	case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
93cc009e61SMukul Joshi 	case IP_VERSION(6, 0, 0):
94efb4fd10SYifan Zhang 	case IP_VERSION(6, 0, 1):
9522dd871eSEric Huang 	case IP_VERSION(6, 0, 2):
965ddb5fe9SDavid Belanger 	case IP_VERSION(6, 0, 3):
97f89c6bf7SGuchun Chen 		kfd->device_info.num_sdma_queues_per_engine = 8;
98f89c6bf7SGuchun Chen 		break;
99f89c6bf7SGuchun Chen 	default:
100f89c6bf7SGuchun Chen 		dev_warn(kfd_device,
10120c5e425SGraham Sider 			"Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
102f89c6bf7SGuchun Chen 			sdma_version);
103f89c6bf7SGuchun Chen 		kfd->device_info.num_sdma_queues_per_engine = 8;
104f89c6bf7SGuchun Chen 	}
105cc009e61SMukul Joshi 
106cc009e61SMukul Joshi 	switch (sdma_version) {
107cc009e61SMukul Joshi 	case IP_VERSION(6, 0, 0):
10822dd871eSEric Huang 	case IP_VERSION(6, 0, 2):
1095ddb5fe9SDavid Belanger 	case IP_VERSION(6, 0, 3):
110cc009e61SMukul Joshi 		/* Reserve 1 for paging and 1 for gfx */
111cc009e61SMukul Joshi 		kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
112cc009e61SMukul Joshi 		/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
113cc009e61SMukul Joshi 		kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
114cc009e61SMukul Joshi 		break;
115e48e6a13SYifan Zhang 	case IP_VERSION(6, 0, 1):
116e48e6a13SYifan Zhang 		/* Reserve 1 for paging and 1 for gfx */
117e48e6a13SYifan Zhang 		kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
118e48e6a13SYifan Zhang 		/* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
119e48e6a13SYifan Zhang 		kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
120e48e6a13SYifan Zhang 		break;
121cc009e61SMukul Joshi 	default:
122cc009e61SMukul Joshi 		break;
123cc009e61SMukul Joshi 	}
124f89c6bf7SGuchun Chen }
125f89c6bf7SGuchun Chen 
126f89c6bf7SGuchun Chen static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
127f89c6bf7SGuchun Chen {
128f89c6bf7SGuchun Chen 	uint32_t gc_version = KFD_GC_VERSION(kfd);
129f89c6bf7SGuchun Chen 
130f89c6bf7SGuchun Chen 	switch (gc_version) {
131f89c6bf7SGuchun Chen 	case IP_VERSION(9, 0, 1): /* VEGA10 */
132f89c6bf7SGuchun Chen 	case IP_VERSION(9, 1, 0): /* RAVEN */
133f89c6bf7SGuchun Chen 	case IP_VERSION(9, 2, 1): /* VEGA12 */
134f89c6bf7SGuchun Chen 	case IP_VERSION(9, 2, 2): /* RAVEN */
135f89c6bf7SGuchun Chen 	case IP_VERSION(9, 3, 0): /* RENOIR */
136f89c6bf7SGuchun Chen 	case IP_VERSION(9, 4, 0): /* VEGA20 */
137f89c6bf7SGuchun Chen 	case IP_VERSION(9, 4, 1): /* ARCTURUS */
138f89c6bf7SGuchun Chen 	case IP_VERSION(9, 4, 2): /* ALDEBARAN */
139f89c6bf7SGuchun Chen 	case IP_VERSION(10, 3, 1): /* VANGOGH */
140f89c6bf7SGuchun Chen 	case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
1417c4f4f19SMario Limonciello 	case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
1427c4f4f19SMario Limonciello 	case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
143f89c6bf7SGuchun Chen 	case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
144f9ed188dSLang Yu 	case IP_VERSION(10, 1, 4):
145f89c6bf7SGuchun Chen 	case IP_VERSION(10, 1, 10): /* NAVI10 */
146f89c6bf7SGuchun Chen 	case IP_VERSION(10, 1, 2): /* NAVI12 */
147f89c6bf7SGuchun Chen 	case IP_VERSION(10, 1, 1): /* NAVI14 */
148f89c6bf7SGuchun Chen 	case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
149f89c6bf7SGuchun Chen 	case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
150f89c6bf7SGuchun Chen 	case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
151f89c6bf7SGuchun Chen 	case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
152f89c6bf7SGuchun Chen 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
153f89c6bf7SGuchun Chen 		break;
154cc009e61SMukul Joshi 	case IP_VERSION(11, 0, 0):
15526776a70SHuang Rui 	case IP_VERSION(11, 0, 1):
156ec661f1cSEric Huang 	case IP_VERSION(11, 0, 2):
1575ddb5fe9SDavid Belanger 	case IP_VERSION(11, 0, 3):
15888c21c2bSYifan Zhang 	case IP_VERSION(11, 0, 4):
159cc009e61SMukul Joshi 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
160cc009e61SMukul Joshi 		break;
161f89c6bf7SGuchun Chen 	default:
162f89c6bf7SGuchun Chen 		dev_warn(kfd_device, "v9 event interrupt handler is set due to "
163f89c6bf7SGuchun Chen 			"mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
164f89c6bf7SGuchun Chen 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
165f89c6bf7SGuchun Chen 	}
166f89c6bf7SGuchun Chen }
167f89c6bf7SGuchun Chen 
168f0dc99a6SGraham Sider static void kfd_device_info_init(struct kfd_dev *kfd,
169f0dc99a6SGraham Sider 				 bool vf, uint32_t gfx_target_version)
170f0dc99a6SGraham Sider {
171f0dc99a6SGraham Sider 	uint32_t gc_version = KFD_GC_VERSION(kfd);
172f0dc99a6SGraham Sider 	uint32_t asic_type = kfd->adev->asic_type;
173f0dc99a6SGraham Sider 
174f0dc99a6SGraham Sider 	kfd->device_info.max_pasid_bits = 16;
175f0dc99a6SGraham Sider 	kfd->device_info.max_no_of_hqd = 24;
176f0dc99a6SGraham Sider 	kfd->device_info.num_of_watch_points = 4;
177f0dc99a6SGraham Sider 	kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
178f0dc99a6SGraham Sider 	kfd->device_info.gfx_target_version = gfx_target_version;
179f0dc99a6SGraham Sider 
180f0dc99a6SGraham Sider 	if (KFD_IS_SOC15(kfd)) {
181f0dc99a6SGraham Sider 		kfd->device_info.doorbell_size = 8;
182f0dc99a6SGraham Sider 		kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
183f0dc99a6SGraham Sider 		kfd->device_info.supports_cwsr = true;
184f0dc99a6SGraham Sider 
185cc009e61SMukul Joshi 		kfd_device_info_set_sdma_info(kfd);
186f89c6bf7SGuchun Chen 
187f89c6bf7SGuchun Chen 		kfd_device_info_set_event_interrupt_class(kfd);
188f0dc99a6SGraham Sider 
189f0dc99a6SGraham Sider 		/* Raven */
190f0dc99a6SGraham Sider 		if (gc_version == IP_VERSION(9, 1, 0) ||
191f0dc99a6SGraham Sider 		    gc_version == IP_VERSION(9, 2, 2))
192f0dc99a6SGraham Sider 			kfd->device_info.needs_iommu_device = true;
193f0dc99a6SGraham Sider 
194f0dc99a6SGraham Sider 		if (gc_version < IP_VERSION(11, 0, 0)) {
195f0dc99a6SGraham Sider 			/* Navi2x+, Navi1x+ */
19668e355c0SJesse Zhang 			if (gc_version == IP_VERSION(10, 3, 6))
19768e355c0SJesse Zhang 				kfd->device_info.no_atomic_fw_version = 14;
198c4e85551SPrike Liang 			else if (gc_version == IP_VERSION(10, 3, 7))
199c4e85551SPrike Liang 				kfd->device_info.no_atomic_fw_version = 3;
20068e355c0SJesse Zhang 			else if (gc_version >= IP_VERSION(10, 3, 0))
201f0dc99a6SGraham Sider 				kfd->device_info.no_atomic_fw_version = 92;
20227cc310fSchen gong 			else if (gc_version >= IP_VERSION(10, 1, 1))
20327cc310fSchen gong 				kfd->device_info.no_atomic_fw_version = 145;
204f0dc99a6SGraham Sider 
205f0dc99a6SGraham Sider 			/* Navi1x+ */
206f0dc99a6SGraham Sider 			if (gc_version >= IP_VERSION(10, 1, 1))
207f0dc99a6SGraham Sider 				kfd->device_info.needs_pci_atomics = true;
20800fa4035SSreekant Somasekharan 		} else if (gc_version < IP_VERSION(12, 0, 0)) {
20900fa4035SSreekant Somasekharan 			/*
21000fa4035SSreekant Somasekharan 			 * PCIe atomics support acknowledgment in GFX11 RS64 CPFW requires
21100fa4035SSreekant Somasekharan 			 * MEC version >= 509. Prior RS64 CPFW versions (and all F32) require
21200fa4035SSreekant Somasekharan 			 * PCIe atomics support.
21300fa4035SSreekant Somasekharan 			 */
21400fa4035SSreekant Somasekharan 			kfd->device_info.needs_pci_atomics = true;
21500fa4035SSreekant Somasekharan 			kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
216f0dc99a6SGraham Sider 		}
217f0dc99a6SGraham Sider 	} else {
218f0dc99a6SGraham Sider 		kfd->device_info.doorbell_size = 4;
219f0dc99a6SGraham Sider 		kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
220f0dc99a6SGraham Sider 		kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
221f0dc99a6SGraham Sider 		kfd->device_info.num_sdma_queues_per_engine = 2;
222f0dc99a6SGraham Sider 
223f0dc99a6SGraham Sider 		if (asic_type != CHIP_KAVERI &&
224f0dc99a6SGraham Sider 		    asic_type != CHIP_HAWAII &&
225f0dc99a6SGraham Sider 		    asic_type != CHIP_TONGA)
226f0dc99a6SGraham Sider 			kfd->device_info.supports_cwsr = true;
227f0dc99a6SGraham Sider 
228f0dc99a6SGraham Sider 		if (asic_type == CHIP_KAVERI ||
229f0dc99a6SGraham Sider 		    asic_type == CHIP_CARRIZO)
230f0dc99a6SGraham Sider 			kfd->device_info.needs_iommu_device = true;
231f0dc99a6SGraham Sider 
232f0dc99a6SGraham Sider 		if (asic_type != CHIP_HAWAII && !vf)
233f0dc99a6SGraham Sider 			kfd->device_info.needs_pci_atomics = true;
234f0dc99a6SGraham Sider 	}
235f0dc99a6SGraham Sider }
236f0dc99a6SGraham Sider 
237b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
2384a488a7aSOded Gabbay {
239f0dc99a6SGraham Sider 	struct kfd_dev *kfd = NULL;
240f0dc99a6SGraham Sider 	const struct kfd2kgd_calls *f2g = NULL;
241f0dc99a6SGraham Sider 	uint32_t gfx_target_version = 0;
242050091abSYong Zhao 
243c868d584SAlex Deucher 	switch (adev->asic_type) {
244c868d584SAlex Deucher #ifdef KFD_SUPPORT_IOMMU_V2
245c868d584SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK
246c868d584SAlex Deucher 	case CHIP_KAVERI:
247f0dc99a6SGraham Sider 		gfx_target_version = 70000;
248f0dc99a6SGraham Sider 		if (!vf)
249c868d584SAlex Deucher 			f2g = &gfx_v7_kfd2kgd;
250c868d584SAlex Deucher 		break;
251c868d584SAlex Deucher #endif
252c868d584SAlex Deucher 	case CHIP_CARRIZO:
253f0dc99a6SGraham Sider 		gfx_target_version = 80001;
254f0dc99a6SGraham Sider 		if (!vf)
255c868d584SAlex Deucher 			f2g = &gfx_v8_kfd2kgd;
256c868d584SAlex Deucher 		break;
257c868d584SAlex Deucher #endif
258c868d584SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK
259c868d584SAlex Deucher 	case CHIP_HAWAII:
260f0dc99a6SGraham Sider 		gfx_target_version = 70001;
2610f7ef0b9SFelix Kuehling 		if (!amdgpu_exp_hw_support)
2620f7ef0b9SFelix Kuehling 			pr_info(
2630f7ef0b9SFelix Kuehling 	"KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
2640f7ef0b9SFelix Kuehling 				);
2650f7ef0b9SFelix Kuehling 		else if (!vf)
266c868d584SAlex Deucher 			f2g = &gfx_v7_kfd2kgd;
267c868d584SAlex Deucher 		break;
268c868d584SAlex Deucher #endif
269c868d584SAlex Deucher 	case CHIP_TONGA:
270f0dc99a6SGraham Sider 		gfx_target_version = 80002;
271f0dc99a6SGraham Sider 		if (!vf)
272c868d584SAlex Deucher 			f2g = &gfx_v8_kfd2kgd;
273c868d584SAlex Deucher 		break;
274c868d584SAlex Deucher 	case CHIP_FIJI:
275c868d584SAlex Deucher 	case CHIP_POLARIS10:
276f0dc99a6SGraham Sider 		gfx_target_version = 80003;
277c868d584SAlex Deucher 		f2g = &gfx_v8_kfd2kgd;
278c868d584SAlex Deucher 		break;
279c868d584SAlex Deucher 	case CHIP_POLARIS11:
280c868d584SAlex Deucher 	case CHIP_POLARIS12:
281c868d584SAlex Deucher 	case CHIP_VEGAM:
282f0dc99a6SGraham Sider 		gfx_target_version = 80003;
283f0dc99a6SGraham Sider 		if (!vf)
284c868d584SAlex Deucher 			f2g = &gfx_v8_kfd2kgd;
285c868d584SAlex Deucher 		break;
286c868d584SAlex Deucher 	default:
287c868d584SAlex Deucher 		switch (adev->ip_versions[GC_HWIP][0]) {
2882c1f19b3SGraham Sider 		/* Vega 10 */
289c868d584SAlex Deucher 		case IP_VERSION(9, 0, 1):
290f0dc99a6SGraham Sider 			gfx_target_version = 90000;
291c868d584SAlex Deucher 			f2g = &gfx_v9_kfd2kgd;
292c868d584SAlex Deucher 			break;
293c868d584SAlex Deucher #ifdef KFD_SUPPORT_IOMMU_V2
2942c1f19b3SGraham Sider 		/* Raven */
295c868d584SAlex Deucher 		case IP_VERSION(9, 1, 0):
296c868d584SAlex Deucher 		case IP_VERSION(9, 2, 2):
297f0dc99a6SGraham Sider 			gfx_target_version = 90002;
298f0dc99a6SGraham Sider 			if (!vf)
299c868d584SAlex Deucher 				f2g = &gfx_v9_kfd2kgd;
300c868d584SAlex Deucher 			break;
301c868d584SAlex Deucher #endif
3022c1f19b3SGraham Sider 		/* Vega12 */
303c868d584SAlex Deucher 		case IP_VERSION(9, 2, 1):
304f0dc99a6SGraham Sider 			gfx_target_version = 90004;
305f0dc99a6SGraham Sider 			if (!vf)
306c868d584SAlex Deucher 				f2g = &gfx_v9_kfd2kgd;
307c868d584SAlex Deucher 			break;
3082c1f19b3SGraham Sider 		/* Renoir */
309c868d584SAlex Deucher 		case IP_VERSION(9, 3, 0):
310f0dc99a6SGraham Sider 			gfx_target_version = 90012;
311f0dc99a6SGraham Sider 			if (!vf)
312c868d584SAlex Deucher 				f2g = &gfx_v9_kfd2kgd;
313c868d584SAlex Deucher 			break;
3142c1f19b3SGraham Sider 		/* Vega20 */
315c868d584SAlex Deucher 		case IP_VERSION(9, 4, 0):
316f0dc99a6SGraham Sider 			gfx_target_version = 90006;
317f0dc99a6SGraham Sider 			if (!vf)
318c868d584SAlex Deucher 				f2g = &gfx_v9_kfd2kgd;
319c868d584SAlex Deucher 			break;
3202c1f19b3SGraham Sider 		/* Arcturus */
321c868d584SAlex Deucher 		case IP_VERSION(9, 4, 1):
322f0dc99a6SGraham Sider 			gfx_target_version = 90008;
323c868d584SAlex Deucher 			f2g = &arcturus_kfd2kgd;
324c868d584SAlex Deucher 			break;
3252c1f19b3SGraham Sider 		/* Aldebaran */
326c868d584SAlex Deucher 		case IP_VERSION(9, 4, 2):
327f0dc99a6SGraham Sider 			gfx_target_version = 90010;
328c868d584SAlex Deucher 			f2g = &aldebaran_kfd2kgd;
329c868d584SAlex Deucher 			break;
33070bdfedaSGraham Sider 		case IP_VERSION(9, 4, 3):
33170bdfedaSGraham Sider 			gfx_target_version = 90400;
332f544afacSAmber Lin 			f2g = &gc_9_4_3_kfd2kgd;
33370bdfedaSGraham Sider 			break;
3342c1f19b3SGraham Sider 		/* Navi10 */
335c868d584SAlex Deucher 		case IP_VERSION(10, 1, 10):
336f0dc99a6SGraham Sider 			gfx_target_version = 100100;
337f0dc99a6SGraham Sider 			if (!vf)
338c868d584SAlex Deucher 				f2g = &gfx_v10_kfd2kgd;
339c868d584SAlex Deucher 			break;
3402c1f19b3SGraham Sider 		/* Navi12 */
341c868d584SAlex Deucher 		case IP_VERSION(10, 1, 2):
342f0dc99a6SGraham Sider 			gfx_target_version = 100101;
343c868d584SAlex Deucher 			f2g = &gfx_v10_kfd2kgd;
344c868d584SAlex Deucher 			break;
3452c1f19b3SGraham Sider 		/* Navi14 */
346c868d584SAlex Deucher 		case IP_VERSION(10, 1, 1):
347f0dc99a6SGraham Sider 			gfx_target_version = 100102;
348f0dc99a6SGraham Sider 			if (!vf)
349c868d584SAlex Deucher 				f2g = &gfx_v10_kfd2kgd;
350c868d584SAlex Deucher 			break;
3512c1f19b3SGraham Sider 		/* Cyan Skillfish */
352c868d584SAlex Deucher 		case IP_VERSION(10, 1, 3):
353f9ed188dSLang Yu 		case IP_VERSION(10, 1, 4):
354f0dc99a6SGraham Sider 			gfx_target_version = 100103;
355f0dc99a6SGraham Sider 			if (!vf)
356c868d584SAlex Deucher 				f2g = &gfx_v10_kfd2kgd;
357c868d584SAlex Deucher 			break;
3582c1f19b3SGraham Sider 		/* Sienna Cichlid */
359c868d584SAlex Deucher 		case IP_VERSION(10, 3, 0):
360f0dc99a6SGraham Sider 			gfx_target_version = 100300;
361c868d584SAlex Deucher 			f2g = &gfx_v10_3_kfd2kgd;
362c868d584SAlex Deucher 			break;
3632c1f19b3SGraham Sider 		/* Navy Flounder */
364c868d584SAlex Deucher 		case IP_VERSION(10, 3, 2):
365f0dc99a6SGraham Sider 			gfx_target_version = 100301;
366c868d584SAlex Deucher 			f2g = &gfx_v10_3_kfd2kgd;
367c868d584SAlex Deucher 			break;
3682c1f19b3SGraham Sider 		/* Van Gogh */
369c868d584SAlex Deucher 		case IP_VERSION(10, 3, 1):
370f0dc99a6SGraham Sider 			gfx_target_version = 100303;
371f0dc99a6SGraham Sider 			if (!vf)
372c868d584SAlex Deucher 				f2g = &gfx_v10_3_kfd2kgd;
373c868d584SAlex Deucher 			break;
3742c1f19b3SGraham Sider 		/* Dimgrey Cavefish */
375c868d584SAlex Deucher 		case IP_VERSION(10, 3, 4):
376f0dc99a6SGraham Sider 			gfx_target_version = 100302;
377c868d584SAlex Deucher 			f2g = &gfx_v10_3_kfd2kgd;
378c868d584SAlex Deucher 			break;
3792c1f19b3SGraham Sider 		/* Beige Goby */
380c868d584SAlex Deucher 		case IP_VERSION(10, 3, 5):
381f0dc99a6SGraham Sider 			gfx_target_version = 100304;
382c868d584SAlex Deucher 			f2g = &gfx_v10_3_kfd2kgd;
383c868d584SAlex Deucher 			break;
3842c1f19b3SGraham Sider 		/* Yellow Carp */
385c868d584SAlex Deucher 		case IP_VERSION(10, 3, 3):
386f0dc99a6SGraham Sider 			gfx_target_version = 100305;
387f0dc99a6SGraham Sider 			if (!vf)
388c868d584SAlex Deucher 				f2g = &gfx_v10_3_kfd2kgd;
389c868d584SAlex Deucher 			break;
3907c4f4f19SMario Limonciello 		case IP_VERSION(10, 3, 6):
3917c4f4f19SMario Limonciello 		case IP_VERSION(10, 3, 7):
3922724efa3SPrike Liang 			gfx_target_version = 100306;
3937c4f4f19SMario Limonciello 			if (!vf)
3947c4f4f19SMario Limonciello 				f2g = &gfx_v10_3_kfd2kgd;
3957c4f4f19SMario Limonciello 			break;
396cc009e61SMukul Joshi 		case IP_VERSION(11, 0, 0):
397cc009e61SMukul Joshi 			gfx_target_version = 110000;
398cc009e61SMukul Joshi 			f2g = &gfx_v11_kfd2kgd;
399cc009e61SMukul Joshi 			break;
40026776a70SHuang Rui 		case IP_VERSION(11, 0, 1):
40188c21c2bSYifan Zhang 		case IP_VERSION(11, 0, 4):
40226776a70SHuang Rui 			gfx_target_version = 110003;
40326776a70SHuang Rui 			f2g = &gfx_v11_kfd2kgd;
40426776a70SHuang Rui 			break;
405ec661f1cSEric Huang 		case IP_VERSION(11, 0, 2):
406ec661f1cSEric Huang 			gfx_target_version = 110002;
407ec661f1cSEric Huang 			f2g = &gfx_v11_kfd2kgd;
408ec661f1cSEric Huang 			break;
4095ddb5fe9SDavid Belanger 		case IP_VERSION(11, 0, 3):
4105ddb5fe9SDavid Belanger 			/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
4115ddb5fe9SDavid Belanger 			gfx_target_version = 110001;
4125ddb5fe9SDavid Belanger 			f2g = &gfx_v11_kfd2kgd;
4135ddb5fe9SDavid Belanger 			break;
414c868d584SAlex Deucher 		default:
415f0dc99a6SGraham Sider 			break;
416050091abSYong Zhao 		}
417c868d584SAlex Deucher 		break;
418c868d584SAlex Deucher 	}
4194a488a7aSOded Gabbay 
420f0dc99a6SGraham Sider 	if (!f2g) {
421e4804a39SGraham Sider 		if (adev->ip_versions[GC_HWIP][0])
422e4804a39SGraham Sider 			dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
423e4804a39SGraham Sider 				adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
424e4804a39SGraham Sider 		else
425050091abSYong Zhao 			dev_err(kfd_device, "%s %s not supported in kfd\n",
426c868d584SAlex Deucher 				amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
4274a488a7aSOded Gabbay 		return NULL;
4284ebc7182SYong Zhao 	}
4294a488a7aSOded Gabbay 
430d35f00d8SEric Huang 	kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
431d35f00d8SEric Huang 	if (!kfd)
432d35f00d8SEric Huang 		return NULL;
433d35f00d8SEric Huang 
434c6c57446SGraham Sider 	kfd->adev = adev;
435f0dc99a6SGraham Sider 	kfd_device_info_init(kfd, vf, gfx_target_version);
43619f6d2a6SOded Gabbay 	kfd->init_complete = false;
437cea405b1SXihan Zhang 	kfd->kfd2kgd = f2g;
43843d8107fSHarish Kasiviswanathan 	atomic_set(&kfd->compute_profile, 0);
439cea405b1SXihan Zhang 
440cea405b1SXihan Zhang 	mutex_init(&kfd->doorbell_mutex);
441cea405b1SXihan Zhang 	memset(&kfd->doorbell_available_index, 0,
442cea405b1SXihan Zhang 		sizeof(kfd->doorbell_available_index));
4434a488a7aSOded Gabbay 
44459d7115dSMukul Joshi 	ida_init(&kfd->doorbell_ida);
44559d7115dSMukul Joshi 
4464a488a7aSOded Gabbay 	return kfd;
4474a488a7aSOded Gabbay }
4484a488a7aSOded Gabbay 
449373d7080SFelix Kuehling static void kfd_cwsr_init(struct kfd_dev *kfd)
450373d7080SFelix Kuehling {
451f0dc99a6SGraham Sider 	if (cwsr_enable && kfd->device_info.supports_cwsr) {
452046e674bSGraham Sider 		if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
453373d7080SFelix Kuehling 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
454373d7080SFelix Kuehling 			kfd->cwsr_isa = cwsr_trap_gfx8_hex;
455373d7080SFelix Kuehling 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
456046e674bSGraham Sider 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
4573baa24f0SOak Zeng 			BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
4583baa24f0SOak Zeng 			kfd->cwsr_isa = cwsr_trap_arcturus_hex;
4593baa24f0SOak Zeng 			kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
460046e674bSGraham Sider 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
4610ef6845cSJay Cornwall 			BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
4620ef6845cSJay Cornwall 			kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
4630ef6845cSJay Cornwall 			kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
464828d9a87SHawking Zhang 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) {
4651d44ff3dSJay Cornwall 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex) > PAGE_SIZE);
4661d44ff3dSJay Cornwall 			kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
4671d44ff3dSJay Cornwall 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
468046e674bSGraham Sider 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
4693e76c239SFelix Kuehling 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
4703e76c239SFelix Kuehling 			kfd->cwsr_isa = cwsr_trap_gfx9_hex;
4713e76c239SFelix Kuehling 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
472046e674bSGraham Sider 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
47380b6cfedSJay Cornwall 			BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
47480b6cfedSJay Cornwall 			kfd->cwsr_isa = cwsr_trap_nv1x_hex;
47580b6cfedSJay Cornwall 			kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
4766a817038SJay Cornwall 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
47714328aa5SPhilip Cox 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
47814328aa5SPhilip Cox 			kfd->cwsr_isa = cwsr_trap_gfx10_hex;
47914328aa5SPhilip Cox 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
4806a817038SJay Cornwall 		} else {
4816a817038SJay Cornwall 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
4826a817038SJay Cornwall 			kfd->cwsr_isa = cwsr_trap_gfx11_hex;
4836a817038SJay Cornwall 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
4843e76c239SFelix Kuehling 		}
4853e76c239SFelix Kuehling 
486373d7080SFelix Kuehling 		kfd->cwsr_enabled = true;
487373d7080SFelix Kuehling 	}
488373d7080SFelix Kuehling }
489373d7080SFelix Kuehling 
490*8dc1db31SMukul Joshi static int kfd_gws_init(struct kfd_node *node)
49129633d0eSJoseph Greathouse {
49229633d0eSJoseph Greathouse 	int ret = 0;
493*8dc1db31SMukul Joshi 	struct kfd_dev *kfd = node->kfd;
49429633d0eSJoseph Greathouse 
495*8dc1db31SMukul Joshi 	if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
49629633d0eSJoseph Greathouse 		return 0;
49729633d0eSJoseph Greathouse 
498*8dc1db31SMukul Joshi 	if (hws_gws_support || (KFD_IS_SOC15(node) &&
499*8dc1db31SMukul Joshi 		((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
500046e674bSGraham Sider 			&& kfd->mec2_fw_version >= 0x81b3) ||
501*8dc1db31SMukul Joshi 		(KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
502046e674bSGraham Sider 			&& kfd->mec2_fw_version >= 0x1b3)  ||
503*8dc1db31SMukul Joshi 		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
504046e674bSGraham Sider 			&& kfd->mec2_fw_version >= 0x30)   ||
505*8dc1db31SMukul Joshi 		(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
506beb15bc1SJonathan Kim 			&& kfd->mec2_fw_version >= 0x28) ||
507*8dc1db31SMukul Joshi 		(KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
508*8dc1db31SMukul Joshi 			&& KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
509beb15bc1SJonathan Kim 			&& kfd->mec2_fw_version >= 0x6b))))
510*8dc1db31SMukul Joshi 		ret = amdgpu_amdkfd_alloc_gws(node->adev,
511*8dc1db31SMukul Joshi 				node->adev->gds.gws_size, &node->gws);
51229633d0eSJoseph Greathouse 
51329633d0eSJoseph Greathouse 	return ret;
51429633d0eSJoseph Greathouse }
51529633d0eSJoseph Greathouse 
516*8dc1db31SMukul Joshi static void kfd_smi_init(struct kfd_node *dev)
5172243f493SRajneesh Bhardwaj {
518938a0650SAmber Lin 	INIT_LIST_HEAD(&dev->smi_clients);
519938a0650SAmber Lin 	spin_lock_init(&dev->smi_lock);
520938a0650SAmber Lin }
521938a0650SAmber Lin 
522*8dc1db31SMukul Joshi static int kfd_init_node(struct kfd_node *node)
523*8dc1db31SMukul Joshi {
524*8dc1db31SMukul Joshi 	int err = -1;
525*8dc1db31SMukul Joshi 
526*8dc1db31SMukul Joshi 	if (kfd_interrupt_init(node)) {
527*8dc1db31SMukul Joshi 		dev_err(kfd_device, "Error initializing interrupts\n");
528*8dc1db31SMukul Joshi 		goto kfd_interrupt_error;
529*8dc1db31SMukul Joshi 	}
530*8dc1db31SMukul Joshi 
531*8dc1db31SMukul Joshi 	node->dqm = device_queue_manager_init(node);
532*8dc1db31SMukul Joshi 	if (!node->dqm) {
533*8dc1db31SMukul Joshi 		dev_err(kfd_device, "Error initializing queue manager\n");
534*8dc1db31SMukul Joshi 		goto device_queue_manager_error;
535*8dc1db31SMukul Joshi 	}
536*8dc1db31SMukul Joshi 
537*8dc1db31SMukul Joshi 	if (kfd_gws_init(node)) {
538*8dc1db31SMukul Joshi 		dev_err(kfd_device, "Could not allocate %d gws\n",
539*8dc1db31SMukul Joshi 			node->adev->gds.gws_size);
540*8dc1db31SMukul Joshi 		goto gws_error;
541*8dc1db31SMukul Joshi 	}
542*8dc1db31SMukul Joshi 
543*8dc1db31SMukul Joshi 	if (kfd_resume(node))
544*8dc1db31SMukul Joshi 		goto kfd_resume_error;
545*8dc1db31SMukul Joshi 
546*8dc1db31SMukul Joshi 	if (kfd_topology_add_device(node)) {
547*8dc1db31SMukul Joshi 		dev_err(kfd_device, "Error adding device to topology\n");
548*8dc1db31SMukul Joshi 		goto kfd_topology_add_device_error;
549*8dc1db31SMukul Joshi 	}
550*8dc1db31SMukul Joshi 
551*8dc1db31SMukul Joshi 	kfd_smi_init(node);
552*8dc1db31SMukul Joshi 
553*8dc1db31SMukul Joshi 	return 0;
554*8dc1db31SMukul Joshi 
555*8dc1db31SMukul Joshi kfd_topology_add_device_error:
556*8dc1db31SMukul Joshi kfd_resume_error:
557*8dc1db31SMukul Joshi gws_error:
558*8dc1db31SMukul Joshi 	device_queue_manager_uninit(node->dqm);
559*8dc1db31SMukul Joshi device_queue_manager_error:
560*8dc1db31SMukul Joshi 	kfd_interrupt_exit(node);
561*8dc1db31SMukul Joshi kfd_interrupt_error:
562*8dc1db31SMukul Joshi 	if (node->gws)
563*8dc1db31SMukul Joshi 		amdgpu_amdkfd_free_gws(node->adev, node->gws);
564*8dc1db31SMukul Joshi 
565*8dc1db31SMukul Joshi 	/* Cleanup the node memory here */
566*8dc1db31SMukul Joshi 	kfree(node);
567*8dc1db31SMukul Joshi 	return err;
568*8dc1db31SMukul Joshi }
569*8dc1db31SMukul Joshi 
570*8dc1db31SMukul Joshi static void kfd_cleanup_node(struct kfd_dev *kfd)
571*8dc1db31SMukul Joshi {
572*8dc1db31SMukul Joshi 	struct kfd_node *knode = kfd->node;
573*8dc1db31SMukul Joshi 
574*8dc1db31SMukul Joshi 	device_queue_manager_uninit(knode->dqm);
575*8dc1db31SMukul Joshi 	kfd_interrupt_exit(knode);
576*8dc1db31SMukul Joshi 	kfd_topology_remove_device(knode);
577*8dc1db31SMukul Joshi 	if (knode->gws)
578*8dc1db31SMukul Joshi 		amdgpu_amdkfd_free_gws(knode->adev, knode->gws);
579*8dc1db31SMukul Joshi 	kfree(knode);
580*8dc1db31SMukul Joshi 	kfd->node = NULL;
581*8dc1db31SMukul Joshi }
582*8dc1db31SMukul Joshi 
5834a488a7aSOded Gabbay bool kgd2kfd_device_init(struct kfd_dev *kfd,
5844a488a7aSOded Gabbay 			 const struct kgd2kfd_shared_resources *gpu_resources)
5854a488a7aSOded Gabbay {
586fd6a440eSJonathan Kim 	unsigned int size, map_process_packet_size;
587*8dc1db31SMukul Joshi 	struct kfd_node *node;
588*8dc1db31SMukul Joshi 	uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
589*8dc1db31SMukul Joshi 	unsigned int max_proc_per_quantum;
59019f6d2a6SOded Gabbay 
591574c4183SGraham Sider 	kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
5925ade6c9cSFelix Kuehling 			KGD_ENGINE_MEC1);
593574c4183SGraham Sider 	kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
59429633d0eSJoseph Greathouse 			KGD_ENGINE_MEC2);
595574c4183SGraham Sider 	kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
5965ade6c9cSFelix Kuehling 			KGD_ENGINE_SDMA1);
5974a488a7aSOded Gabbay 	kfd->shared_resources = *gpu_resources;
5984a488a7aSOded Gabbay 
599*8dc1db31SMukul Joshi 	first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
600*8dc1db31SMukul Joshi 	last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
601*8dc1db31SMukul Joshi 	vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
60244008d7aSYong Zhao 
603e312af6cSFelix Kuehling 	/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
604e312af6cSFelix Kuehling 	 * 32 and 64-bit requests are possible and must be
605e312af6cSFelix Kuehling 	 * supported.
606e312af6cSFelix Kuehling 	 */
6076bfc7c7eSGraham Sider 	kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
608e312af6cSFelix Kuehling 	if (!kfd->pci_atomic_requested &&
609f0dc99a6SGraham Sider 	    kfd->device_info.needs_pci_atomics &&
610f0dc99a6SGraham Sider 	    (!kfd->device_info.no_atomic_fw_version ||
611f0dc99a6SGraham Sider 	     kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
612e312af6cSFelix Kuehling 		dev_info(kfd_device,
613e312af6cSFelix Kuehling 			 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
614d69a3b76SMukul Joshi 			 kfd->adev->pdev->vendor, kfd->adev->pdev->device,
615e312af6cSFelix Kuehling 			 kfd->mec_fw_version,
616f0dc99a6SGraham Sider 			 kfd->device_info.no_atomic_fw_version);
617e312af6cSFelix Kuehling 		return false;
618e312af6cSFelix Kuehling 	}
619e312af6cSFelix Kuehling 
620a99c6d4fSFelix Kuehling 	/* Verify module parameters regarding mapped process number*/
621b7dfbd2eSTushar Patel 	if (hws_max_conc_proc >= 0)
622*8dc1db31SMukul Joshi 		max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd);
623b7dfbd2eSTushar Patel 	else
624*8dc1db31SMukul Joshi 		max_proc_per_quantum = vmid_num_kfd;
625a99c6d4fSFelix Kuehling 
62619f6d2a6SOded Gabbay 	/* calculate max size of mqds needed for queues */
627b8cbab04SOded Gabbay 	size = max_num_of_queues_per_device *
628f0dc99a6SGraham Sider 			kfd->device_info.mqd_size_aligned;
62919f6d2a6SOded Gabbay 
630e18e794eSOded Gabbay 	/*
631e18e794eSOded Gabbay 	 * calculate max size of runlist packet.
632e18e794eSOded Gabbay 	 * There can be only 2 packets at once
633e18e794eSOded Gabbay 	 */
634046e674bSGraham Sider 	map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
635fd6a440eSJonathan Kim 				sizeof(struct pm4_mes_map_process_aldebaran) :
636fd6a440eSJonathan Kim 				sizeof(struct pm4_mes_map_process);
637fd6a440eSJonathan Kim 	size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
638507968ddSFelix Kuehling 		max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
639507968ddSFelix Kuehling 		+ sizeof(struct pm4_mes_runlist)) * 2;
640e18e794eSOded Gabbay 
641e18e794eSOded Gabbay 	/* Add size of HIQ & DIQ */
642e18e794eSOded Gabbay 	size += KFD_KERNEL_QUEUE_SIZE * 2;
643e18e794eSOded Gabbay 
644e18e794eSOded Gabbay 	/* add another 512KB for all other allocations on gart (HPD, fences) */
64519f6d2a6SOded Gabbay 	size += 512 * 1024;
64619f6d2a6SOded Gabbay 
6477cd52c91SAmber Lin 	if (amdgpu_amdkfd_alloc_gtt_mem(
6486bfc7c7eSGraham Sider 			kfd->adev, size, &kfd->gtt_mem,
64915426dbbSYong Zhao 			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
65015426dbbSYong Zhao 			false)) {
65179775b62SKent Russell 		dev_err(kfd_device, "Could not allocate %d bytes\n", size);
652e09d4fc8SOak Zeng 		goto alloc_gtt_mem_failure;
65319f6d2a6SOded Gabbay 	}
65419f6d2a6SOded Gabbay 
65579775b62SKent Russell 	dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
656e18e794eSOded Gabbay 
65773a1da0bSOded Gabbay 	/* Initialize GTT sa with 512 byte chunk size */
65873a1da0bSOded Gabbay 	if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
65979775b62SKent Russell 		dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
66073a1da0bSOded Gabbay 		goto kfd_gtt_sa_init_error;
66173a1da0bSOded Gabbay 	}
66273a1da0bSOded Gabbay 
663735df2baSFelix Kuehling 	if (kfd_doorbell_init(kfd)) {
664735df2baSFelix Kuehling 		dev_err(kfd_device,
665735df2baSFelix Kuehling 			"Error initializing doorbell aperture\n");
666735df2baSFelix Kuehling 		goto kfd_doorbell_error;
667735df2baSFelix Kuehling 	}
66819f6d2a6SOded Gabbay 
669c5650327SDivya Shikre 	if (amdgpu_use_xgmi_p2p)
67002274fc0SGraham Sider 		kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
6710c1690e3SShaoyun Liu 
67202274fc0SGraham Sider 	kfd->noretry = kfd->adev->gmc.noretry;
6739b498efaSAlex Deucher 
6746127896fSHuang Rui 	/* If CRAT is broken, won't set iommu enabled */
6756127896fSHuang Rui 	kfd_double_confirm_iommu_support(kfd);
6766127896fSHuang Rui 
67764d1c3a4SFelix Kuehling 	if (kfd_iommu_device_init(kfd)) {
6786f4b590aSYifan Zhang 		kfd->use_iommu_v2 = false;
67964d1c3a4SFelix Kuehling 		dev_err(kfd_device, "Error initializing iommuv2\n");
68064d1c3a4SFelix Kuehling 		goto device_iommu_error;
68164c7f8cfSBen Goz 	}
68264c7f8cfSBen Goz 
683373d7080SFelix Kuehling 	kfd_cwsr_init(kfd);
684373d7080SFelix Kuehling 
68556c5977eSGraham Sider 	svm_migrate_init(kfd->adev);
686814ab993SPhilip Yang 
687*8dc1db31SMukul Joshi 	/* Allocate the KFD node */
688*8dc1db31SMukul Joshi 	node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL);
689*8dc1db31SMukul Joshi 	if (!node) {
690*8dc1db31SMukul Joshi 		dev_err(kfd_device, "Error allocating KFD node\n");
691*8dc1db31SMukul Joshi 		goto node_alloc_error;
692465ab9e0SOak Zeng 	}
693465ab9e0SOak Zeng 
694*8dc1db31SMukul Joshi 	node->adev = kfd->adev;
695*8dc1db31SMukul Joshi 	node->kfd = kfd;
696*8dc1db31SMukul Joshi 	node->kfd2kgd = kfd->kfd2kgd;
697*8dc1db31SMukul Joshi 	node->vm_info.vmid_num_kfd = vmid_num_kfd;
698*8dc1db31SMukul Joshi 	node->vm_info.first_vmid_kfd = first_vmid_kfd;
699*8dc1db31SMukul Joshi 	node->vm_info.last_vmid_kfd = last_vmid_kfd;
700*8dc1db31SMukul Joshi 	node->max_proc_per_quantum = max_proc_per_quantum;
701*8dc1db31SMukul Joshi 	atomic_set(&node->sram_ecc_flag, 0);
702*8dc1db31SMukul Joshi 
703*8dc1db31SMukul Joshi 	/* Initialize the KFD node */
704*8dc1db31SMukul Joshi 	if (kfd_init_node(node)) {
705*8dc1db31SMukul Joshi 		dev_err(kfd_device, "Error initializing KFD node\n");
706*8dc1db31SMukul Joshi 		goto node_init_error;
707*8dc1db31SMukul Joshi 	}
708*8dc1db31SMukul Joshi 	kfd->node = node;
709*8dc1db31SMukul Joshi 
710*8dc1db31SMukul Joshi 	if (kfd_resume_iommu(kfd))
711*8dc1db31SMukul Joshi 		goto kfd_resume_iommu_error;
712*8dc1db31SMukul Joshi 
713*8dc1db31SMukul Joshi 	amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info);
714938a0650SAmber Lin 
7154a488a7aSOded Gabbay 	kfd->init_complete = true;
716d69a3b76SMukul Joshi 	dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
717d69a3b76SMukul Joshi 		 kfd->adev->pdev->device);
7184a488a7aSOded Gabbay 
71979775b62SKent Russell 	pr_debug("Starting kfd with the following scheduling policy %d\n",
720*8dc1db31SMukul Joshi 		node->dqm->sched_policy);
72164c7f8cfSBen Goz 
72219f6d2a6SOded Gabbay 	goto out;
72319f6d2a6SOded Gabbay 
724*8dc1db31SMukul Joshi kfd_resume_iommu_error:
725*8dc1db31SMukul Joshi 	kfd_cleanup_node(kfd);
726*8dc1db31SMukul Joshi node_init_error:
727*8dc1db31SMukul Joshi node_alloc_error:
72864d1c3a4SFelix Kuehling device_iommu_error:
729735df2baSFelix Kuehling 	kfd_doorbell_fini(kfd);
730735df2baSFelix Kuehling kfd_doorbell_error:
73173a1da0bSOded Gabbay 	kfd_gtt_sa_fini(kfd);
73273a1da0bSOded Gabbay kfd_gtt_sa_init_error:
7336bfc7c7eSGraham Sider 	amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
734e09d4fc8SOak Zeng alloc_gtt_mem_failure:
73519f6d2a6SOded Gabbay 	dev_err(kfd_device,
73679775b62SKent Russell 		"device %x:%x NOT added due to errors\n",
737d69a3b76SMukul Joshi 		kfd->adev->pdev->vendor, kfd->adev->pdev->device);
73819f6d2a6SOded Gabbay out:
73919f6d2a6SOded Gabbay 	return kfd->init_complete;
7404a488a7aSOded Gabbay }
7414a488a7aSOded Gabbay 
7424a488a7aSOded Gabbay void kgd2kfd_device_exit(struct kfd_dev *kfd)
7434a488a7aSOded Gabbay {
744b17f068aSOded Gabbay 	if (kfd->init_complete) {
745*8dc1db31SMukul Joshi 		kfd_cleanup_node(kfd);
746735df2baSFelix Kuehling 		kfd_doorbell_fini(kfd);
74759d7115dSMukul Joshi 		ida_destroy(&kfd->doorbell_ida);
74873a1da0bSOded Gabbay 		kfd_gtt_sa_fini(kfd);
7496bfc7c7eSGraham Sider 		amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
750b17f068aSOded Gabbay 	}
7515b5c4e40SEvgeny Pinchuk 
7524a488a7aSOded Gabbay 	kfree(kfd);
7534a488a7aSOded Gabbay }
7544a488a7aSOded Gabbay 
755e3b7a967SShaoyun Liu int kgd2kfd_pre_reset(struct kfd_dev *kfd)
756e3b7a967SShaoyun Liu {
757*8dc1db31SMukul Joshi 	struct kfd_node *node = kfd->node;
758*8dc1db31SMukul Joshi 
759e42051d2SShaoyun Liu 	if (!kfd->init_complete)
760e42051d2SShaoyun Liu 		return 0;
76109c34e8dSFelix Kuehling 
762*8dc1db31SMukul Joshi 	kfd_smi_event_update_gpu_reset(node, false);
76355977744SMukul Joshi 
764*8dc1db31SMukul Joshi 	node->dqm->ops.pre_reset(node->dqm);
76509c34e8dSFelix Kuehling 
7669593f4d6SRajneesh Bhardwaj 	kgd2kfd_suspend(kfd, false);
767e42051d2SShaoyun Liu 
768*8dc1db31SMukul Joshi 	kfd_signal_reset_event(node);
769e3b7a967SShaoyun Liu 	return 0;
770e3b7a967SShaoyun Liu }
771e3b7a967SShaoyun Liu 
772e42051d2SShaoyun Liu /*
773e42051d2SShaoyun Liu  * Fix me. KFD won't be able to resume existing process for now.
774e42051d2SShaoyun Liu  * We will keep all existing process in a evicted state and
775e42051d2SShaoyun Liu  * wait the process to be terminated.
776e42051d2SShaoyun Liu  */
777e42051d2SShaoyun Liu 
778e3b7a967SShaoyun Liu int kgd2kfd_post_reset(struct kfd_dev *kfd)
779e3b7a967SShaoyun Liu {
780a1bd079fSyu kuai 	int ret;
781*8dc1db31SMukul Joshi 	struct kfd_node *node = kfd->node;
782e42051d2SShaoyun Liu 
783e42051d2SShaoyun Liu 	if (!kfd->init_complete)
784e3b7a967SShaoyun Liu 		return 0;
785e42051d2SShaoyun Liu 
786*8dc1db31SMukul Joshi 	ret = kfd_resume(node);
787e42051d2SShaoyun Liu 	if (ret)
788e42051d2SShaoyun Liu 		return ret;
789a1bd079fSyu kuai 	atomic_dec(&kfd_locked);
7909b54d201SEric Huang 
791*8dc1db31SMukul Joshi 	atomic_set(&node->sram_ecc_flag, 0);
7929b54d201SEric Huang 
793*8dc1db31SMukul Joshi 	kfd_smi_event_update_gpu_reset(node, true);
79455977744SMukul Joshi 
795e42051d2SShaoyun Liu 	return 0;
796e42051d2SShaoyun Liu }
797e42051d2SShaoyun Liu 
798e42051d2SShaoyun Liu bool kfd_is_locked(void)
799e42051d2SShaoyun Liu {
800e42051d2SShaoyun Liu 	return  (atomic_read(&kfd_locked) > 0);
801e3b7a967SShaoyun Liu }
802e3b7a967SShaoyun Liu 
8039593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
8044a488a7aSOded Gabbay {
805*8dc1db31SMukul Joshi 	struct kfd_node *node = kfd->node;
806*8dc1db31SMukul Joshi 
807733fa1f7SYong Zhao 	if (!kfd->init_complete)
808733fa1f7SYong Zhao 		return;
809733fa1f7SYong Zhao 
8109593f4d6SRajneesh Bhardwaj 	/* for runtime suspend, skip locking kfd */
8119593f4d6SRajneesh Bhardwaj 	if (!run_pm) {
81226103436SFelix Kuehling 		/* For first KFD device suspend all the KFD processes */
813e42051d2SShaoyun Liu 		if (atomic_inc_return(&kfd_locked) == 1)
81426103436SFelix Kuehling 			kfd_suspend_all_processes();
8159593f4d6SRajneesh Bhardwaj 	}
81626103436SFelix Kuehling 
817*8dc1db31SMukul Joshi 	node->dqm->ops.stop(node->dqm);
81864d1c3a4SFelix Kuehling 	kfd_iommu_suspend(kfd);
8194a488a7aSOded Gabbay }
8204a488a7aSOded Gabbay 
8219593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
8224a488a7aSOded Gabbay {
82326103436SFelix Kuehling 	int ret, count;
824*8dc1db31SMukul Joshi 	struct kfd_node *node = kfd->node;
82526103436SFelix Kuehling 
826b8935a7cSYong Zhao 	if (!kfd->init_complete)
827b8935a7cSYong Zhao 		return 0;
828b17f068aSOded Gabbay 
829*8dc1db31SMukul Joshi 	ret = kfd_resume(node);
83026103436SFelix Kuehling 	if (ret)
83126103436SFelix Kuehling 		return ret;
832b17f068aSOded Gabbay 
8339593f4d6SRajneesh Bhardwaj 	/* for runtime resume, skip unlocking kfd */
8349593f4d6SRajneesh Bhardwaj 	if (!run_pm) {
835e42051d2SShaoyun Liu 		count = atomic_dec_return(&kfd_locked);
83626103436SFelix Kuehling 		WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
83726103436SFelix Kuehling 		if (count == 0)
83826103436SFelix Kuehling 			ret = kfd_resume_all_processes();
8399593f4d6SRajneesh Bhardwaj 	}
84026103436SFelix Kuehling 
84126103436SFelix Kuehling 	return ret;
8424ebc7182SYong Zhao }
8434ebc7182SYong Zhao 
844f8846323SJames Zhu int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
845b8935a7cSYong Zhao {
8467ee938acSFelix Kuehling 	if (!kfd->init_complete)
8477ee938acSFelix Kuehling 		return 0;
8487ee938acSFelix Kuehling 
8497ee938acSFelix Kuehling 	return kfd_resume_iommu(kfd);
8507ee938acSFelix Kuehling }
8517ee938acSFelix Kuehling 
8527ee938acSFelix Kuehling static int kfd_resume_iommu(struct kfd_dev *kfd)
8537ee938acSFelix Kuehling {
854b8935a7cSYong Zhao 	int err = 0;
855b8935a7cSYong Zhao 
85664d1c3a4SFelix Kuehling 	err = kfd_iommu_resume(kfd);
857f8846323SJames Zhu 	if (err)
85864d1c3a4SFelix Kuehling 		dev_err(kfd_device,
85964d1c3a4SFelix Kuehling 			"Failed to resume IOMMU for device %x:%x\n",
860d69a3b76SMukul Joshi 			kfd->adev->pdev->vendor, kfd->adev->pdev->device);
86164d1c3a4SFelix Kuehling 	return err;
86264d1c3a4SFelix Kuehling }
863733fa1f7SYong Zhao 
864*8dc1db31SMukul Joshi static int kfd_resume(struct kfd_node *node)
865f8846323SJames Zhu {
866f8846323SJames Zhu 	int err = 0;
867f8846323SJames Zhu 
868*8dc1db31SMukul Joshi 	err = node->dqm->ops.start(node->dqm);
869499f4d38SYifan Zhang 	if (err)
870b8935a7cSYong Zhao 		dev_err(kfd_device,
871b8935a7cSYong Zhao 			"Error starting queue manager for device %x:%x\n",
872*8dc1db31SMukul Joshi 			node->adev->pdev->vendor, node->adev->pdev->device);
873b17f068aSOded Gabbay 
874b8935a7cSYong Zhao 	return err;
8754a488a7aSOded Gabbay }
8764a488a7aSOded Gabbay 
877b3eca59dSPhilip Yang static inline void kfd_queue_work(struct workqueue_struct *wq,
878b3eca59dSPhilip Yang 				  struct work_struct *work)
879b3eca59dSPhilip Yang {
880b3eca59dSPhilip Yang 	int cpu, new_cpu;
881b3eca59dSPhilip Yang 
882b3eca59dSPhilip Yang 	cpu = new_cpu = smp_processor_id();
883b3eca59dSPhilip Yang 	do {
884b3eca59dSPhilip Yang 		new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
885b3eca59dSPhilip Yang 		if (cpu_to_node(new_cpu) == numa_node_id())
886b3eca59dSPhilip Yang 			break;
887b3eca59dSPhilip Yang 	} while (cpu != new_cpu);
888b3eca59dSPhilip Yang 
889b3eca59dSPhilip Yang 	queue_work_on(new_cpu, wq, work);
890b3eca59dSPhilip Yang }
891b3eca59dSPhilip Yang 
892b3f5e6b4SAndrew Lewycky /* This is called directly from KGD at ISR. */
893b3f5e6b4SAndrew Lewycky void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
8944a488a7aSOded Gabbay {
89558e69886SLan Xiao 	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
89658e69886SLan Xiao 	bool is_patched = false;
8972383a767SChristian König 	unsigned long flags;
898*8dc1db31SMukul Joshi 	struct kfd_node *node = kfd->node;
89958e69886SLan Xiao 
9002249d558SAndrew Lewycky 	if (!kfd->init_complete)
9012249d558SAndrew Lewycky 		return;
9022249d558SAndrew Lewycky 
903f0dc99a6SGraham Sider 	if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
90458e69886SLan Xiao 		dev_err_once(kfd_device, "Ring entry too small\n");
90558e69886SLan Xiao 		return;
90658e69886SLan Xiao 	}
90758e69886SLan Xiao 
908*8dc1db31SMukul Joshi 	spin_lock_irqsave(&node->interrupt_lock, flags);
9092249d558SAndrew Lewycky 
910*8dc1db31SMukul Joshi 	if (node->interrupts_active
911*8dc1db31SMukul Joshi 	    && interrupt_is_wanted(node, ih_ring_entry,
91258e69886SLan Xiao 				   patched_ihre, &is_patched)
913*8dc1db31SMukul Joshi 	    && enqueue_ih_ring_entry(node,
91458e69886SLan Xiao 				     is_patched ? patched_ihre : ih_ring_entry))
915*8dc1db31SMukul Joshi 		kfd_queue_work(node->ih_wq, &node->interrupt_work);
9162249d558SAndrew Lewycky 
917*8dc1db31SMukul Joshi 	spin_unlock_irqrestore(&node->interrupt_lock, flags);
9184a488a7aSOded Gabbay }
9196e81090bSOded Gabbay 
920c7f21978SPhilip Yang int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
9216b95e797SFelix Kuehling {
9226b95e797SFelix Kuehling 	struct kfd_process *p;
9236b95e797SFelix Kuehling 	int r;
9246b95e797SFelix Kuehling 
9256b95e797SFelix Kuehling 	/* Because we are called from arbitrary context (workqueue) as opposed
9266b95e797SFelix Kuehling 	 * to process context, kfd_process could attempt to exit while we are
9276b95e797SFelix Kuehling 	 * running so the lookup function increments the process ref count.
9286b95e797SFelix Kuehling 	 */
9296b95e797SFelix Kuehling 	p = kfd_lookup_process_by_mm(mm);
9306b95e797SFelix Kuehling 	if (!p)
9316b95e797SFelix Kuehling 		return -ESRCH;
9326b95e797SFelix Kuehling 
933b2057956SFelix Kuehling 	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
934c7f21978SPhilip Yang 	r = kfd_process_evict_queues(p, trigger);
9356b95e797SFelix Kuehling 
9366b95e797SFelix Kuehling 	kfd_unref_process(p);
9376b95e797SFelix Kuehling 	return r;
9386b95e797SFelix Kuehling }
9396b95e797SFelix Kuehling 
9406b95e797SFelix Kuehling int kgd2kfd_resume_mm(struct mm_struct *mm)
9416b95e797SFelix Kuehling {
9426b95e797SFelix Kuehling 	struct kfd_process *p;
9436b95e797SFelix Kuehling 	int r;
9446b95e797SFelix Kuehling 
9456b95e797SFelix Kuehling 	/* Because we are called from arbitrary context (workqueue) as opposed
9466b95e797SFelix Kuehling 	 * to process context, kfd_process could attempt to exit while we are
9476b95e797SFelix Kuehling 	 * running so the lookup function increments the process ref count.
9486b95e797SFelix Kuehling 	 */
9496b95e797SFelix Kuehling 	p = kfd_lookup_process_by_mm(mm);
9506b95e797SFelix Kuehling 	if (!p)
9516b95e797SFelix Kuehling 		return -ESRCH;
9526b95e797SFelix Kuehling 
9536b95e797SFelix Kuehling 	r = kfd_process_restore_queues(p);
9546b95e797SFelix Kuehling 
9556b95e797SFelix Kuehling 	kfd_unref_process(p);
9566b95e797SFelix Kuehling 	return r;
9576b95e797SFelix Kuehling }
9586b95e797SFelix Kuehling 
95926103436SFelix Kuehling /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
96026103436SFelix Kuehling  *   prepare for safe eviction of KFD BOs that belong to the specified
96126103436SFelix Kuehling  *   process.
96226103436SFelix Kuehling  *
96326103436SFelix Kuehling  * @mm: mm_struct that identifies the specified KFD process
96426103436SFelix Kuehling  * @fence: eviction fence attached to KFD process BOs
96526103436SFelix Kuehling  *
96626103436SFelix Kuehling  */
96726103436SFelix Kuehling int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
96826103436SFelix Kuehling 					       struct dma_fence *fence)
96926103436SFelix Kuehling {
97026103436SFelix Kuehling 	struct kfd_process *p;
97126103436SFelix Kuehling 	unsigned long active_time;
97226103436SFelix Kuehling 	unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
97326103436SFelix Kuehling 
97426103436SFelix Kuehling 	if (!fence)
97526103436SFelix Kuehling 		return -EINVAL;
97626103436SFelix Kuehling 
97726103436SFelix Kuehling 	if (dma_fence_is_signaled(fence))
97826103436SFelix Kuehling 		return 0;
97926103436SFelix Kuehling 
98026103436SFelix Kuehling 	p = kfd_lookup_process_by_mm(mm);
98126103436SFelix Kuehling 	if (!p)
98226103436SFelix Kuehling 		return -ENODEV;
98326103436SFelix Kuehling 
98426103436SFelix Kuehling 	if (fence->seqno == p->last_eviction_seqno)
98526103436SFelix Kuehling 		goto out;
98626103436SFelix Kuehling 
98726103436SFelix Kuehling 	p->last_eviction_seqno = fence->seqno;
98826103436SFelix Kuehling 
98926103436SFelix Kuehling 	/* Avoid KFD process starvation. Wait for at least
99026103436SFelix Kuehling 	 * PROCESS_ACTIVE_TIME_MS before evicting the process again
99126103436SFelix Kuehling 	 */
99226103436SFelix Kuehling 	active_time = get_jiffies_64() - p->last_restore_timestamp;
99326103436SFelix Kuehling 	if (delay_jiffies > active_time)
99426103436SFelix Kuehling 		delay_jiffies -= active_time;
99526103436SFelix Kuehling 	else
99626103436SFelix Kuehling 		delay_jiffies = 0;
99726103436SFelix Kuehling 
99826103436SFelix Kuehling 	/* During process initialization eviction_work.dwork is initialized
99926103436SFelix Kuehling 	 * to kfd_evict_bo_worker
100026103436SFelix Kuehling 	 */
1001b2057956SFelix Kuehling 	WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
1002b2057956SFelix Kuehling 	     p->lead_thread->pid, delay_jiffies);
100326103436SFelix Kuehling 	schedule_delayed_work(&p->eviction_work, delay_jiffies);
100426103436SFelix Kuehling out:
100526103436SFelix Kuehling 	kfd_unref_process(p);
100626103436SFelix Kuehling 	return 0;
100726103436SFelix Kuehling }
100826103436SFelix Kuehling 
10096e81090bSOded Gabbay static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
10106e81090bSOded Gabbay 				unsigned int chunk_size)
10116e81090bSOded Gabbay {
101232fa8219SFelix Kuehling 	if (WARN_ON(buf_size < chunk_size))
101332fa8219SFelix Kuehling 		return -EINVAL;
101432fa8219SFelix Kuehling 	if (WARN_ON(buf_size == 0))
101532fa8219SFelix Kuehling 		return -EINVAL;
101632fa8219SFelix Kuehling 	if (WARN_ON(chunk_size == 0))
101732fa8219SFelix Kuehling 		return -EINVAL;
10186e81090bSOded Gabbay 
10196e81090bSOded Gabbay 	kfd->gtt_sa_chunk_size = chunk_size;
10206e81090bSOded Gabbay 	kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
10216e81090bSOded Gabbay 
1022f43a9f18SChristophe JAILLET 	kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
1023f43a9f18SChristophe JAILLET 					   GFP_KERNEL);
10246e81090bSOded Gabbay 	if (!kfd->gtt_sa_bitmap)
10256e81090bSOded Gabbay 		return -ENOMEM;
10266e81090bSOded Gabbay 
102779775b62SKent Russell 	pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
10286e81090bSOded Gabbay 			kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
10296e81090bSOded Gabbay 
10306e81090bSOded Gabbay 	mutex_init(&kfd->gtt_sa_lock);
10316e81090bSOded Gabbay 
10326e81090bSOded Gabbay 	return 0;
10336e81090bSOded Gabbay }
10346e81090bSOded Gabbay 
10356e81090bSOded Gabbay static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
10366e81090bSOded Gabbay {
10376e81090bSOded Gabbay 	mutex_destroy(&kfd->gtt_sa_lock);
1038f43a9f18SChristophe JAILLET 	bitmap_free(kfd->gtt_sa_bitmap);
10396e81090bSOded Gabbay }
10406e81090bSOded Gabbay 
10416e81090bSOded Gabbay static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
10426e81090bSOded Gabbay 						unsigned int bit_num,
10436e81090bSOded Gabbay 						unsigned int chunk_size)
10446e81090bSOded Gabbay {
10456e81090bSOded Gabbay 	return start_addr + bit_num * chunk_size;
10466e81090bSOded Gabbay }
10476e81090bSOded Gabbay 
10486e81090bSOded Gabbay static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
10496e81090bSOded Gabbay 						unsigned int bit_num,
10506e81090bSOded Gabbay 						unsigned int chunk_size)
10516e81090bSOded Gabbay {
10526e81090bSOded Gabbay 	return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
10536e81090bSOded Gabbay }
10546e81090bSOded Gabbay 
1055*8dc1db31SMukul Joshi int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
10566e81090bSOded Gabbay 			struct kfd_mem_obj **mem_obj)
10576e81090bSOded Gabbay {
10586e81090bSOded Gabbay 	unsigned int found, start_search, cur_size;
1059*8dc1db31SMukul Joshi 	struct kfd_dev *kfd = node->kfd;
10606e81090bSOded Gabbay 
10616e81090bSOded Gabbay 	if (size == 0)
10626e81090bSOded Gabbay 		return -EINVAL;
10636e81090bSOded Gabbay 
10646e81090bSOded Gabbay 	if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
10656e81090bSOded Gabbay 		return -ENOMEM;
10666e81090bSOded Gabbay 
10671cd106ecSFelix Kuehling 	*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
10681cd106ecSFelix Kuehling 	if (!(*mem_obj))
10696e81090bSOded Gabbay 		return -ENOMEM;
10706e81090bSOded Gabbay 
107179775b62SKent Russell 	pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
10726e81090bSOded Gabbay 
10736e81090bSOded Gabbay 	start_search = 0;
10746e81090bSOded Gabbay 
10756e81090bSOded Gabbay 	mutex_lock(&kfd->gtt_sa_lock);
10766e81090bSOded Gabbay 
10776e81090bSOded Gabbay kfd_gtt_restart_search:
10786e81090bSOded Gabbay 	/* Find the first chunk that is free */
10796e81090bSOded Gabbay 	found = find_next_zero_bit(kfd->gtt_sa_bitmap,
10806e81090bSOded Gabbay 					kfd->gtt_sa_num_of_chunks,
10816e81090bSOded Gabbay 					start_search);
10826e81090bSOded Gabbay 
108379775b62SKent Russell 	pr_debug("Found = %d\n", found);
10846e81090bSOded Gabbay 
10856e81090bSOded Gabbay 	/* If there wasn't any free chunk, bail out */
10866e81090bSOded Gabbay 	if (found == kfd->gtt_sa_num_of_chunks)
10876e81090bSOded Gabbay 		goto kfd_gtt_no_free_chunk;
10886e81090bSOded Gabbay 
10896e81090bSOded Gabbay 	/* Update fields of mem_obj */
10906e81090bSOded Gabbay 	(*mem_obj)->range_start = found;
10916e81090bSOded Gabbay 	(*mem_obj)->range_end = found;
10926e81090bSOded Gabbay 	(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
10936e81090bSOded Gabbay 					kfd->gtt_start_gpu_addr,
10946e81090bSOded Gabbay 					found,
10956e81090bSOded Gabbay 					kfd->gtt_sa_chunk_size);
10966e81090bSOded Gabbay 	(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
10976e81090bSOded Gabbay 					kfd->gtt_start_cpu_ptr,
10986e81090bSOded Gabbay 					found,
10996e81090bSOded Gabbay 					kfd->gtt_sa_chunk_size);
11006e81090bSOded Gabbay 
110179775b62SKent Russell 	pr_debug("gpu_addr = %p, cpu_addr = %p\n",
11026e81090bSOded Gabbay 			(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
11036e81090bSOded Gabbay 
11046e81090bSOded Gabbay 	/* If we need only one chunk, mark it as allocated and get out */
11056e81090bSOded Gabbay 	if (size <= kfd->gtt_sa_chunk_size) {
110679775b62SKent Russell 		pr_debug("Single bit\n");
1107b8b9ba58SChristophe JAILLET 		__set_bit(found, kfd->gtt_sa_bitmap);
11086e81090bSOded Gabbay 		goto kfd_gtt_out;
11096e81090bSOded Gabbay 	}
11106e81090bSOded Gabbay 
11116e81090bSOded Gabbay 	/* Otherwise, try to see if we have enough contiguous chunks */
11126e81090bSOded Gabbay 	cur_size = size - kfd->gtt_sa_chunk_size;
11136e81090bSOded Gabbay 	do {
11146e81090bSOded Gabbay 		(*mem_obj)->range_end =
11156e81090bSOded Gabbay 			find_next_zero_bit(kfd->gtt_sa_bitmap,
11166e81090bSOded Gabbay 					kfd->gtt_sa_num_of_chunks, ++found);
11176e81090bSOded Gabbay 		/*
11186e81090bSOded Gabbay 		 * If next free chunk is not contiguous than we need to
11196e81090bSOded Gabbay 		 * restart our search from the last free chunk we found (which
11206e81090bSOded Gabbay 		 * wasn't contiguous to the previous ones
11216e81090bSOded Gabbay 		 */
11226e81090bSOded Gabbay 		if ((*mem_obj)->range_end != found) {
11236e81090bSOded Gabbay 			start_search = found;
11246e81090bSOded Gabbay 			goto kfd_gtt_restart_search;
11256e81090bSOded Gabbay 		}
11266e81090bSOded Gabbay 
11276e81090bSOded Gabbay 		/*
11286e81090bSOded Gabbay 		 * If we reached end of buffer, bail out with error
11296e81090bSOded Gabbay 		 */
11306e81090bSOded Gabbay 		if (found == kfd->gtt_sa_num_of_chunks)
11316e81090bSOded Gabbay 			goto kfd_gtt_no_free_chunk;
11326e81090bSOded Gabbay 
11336e81090bSOded Gabbay 		/* Check if we don't need another chunk */
11346e81090bSOded Gabbay 		if (cur_size <= kfd->gtt_sa_chunk_size)
11356e81090bSOded Gabbay 			cur_size = 0;
11366e81090bSOded Gabbay 		else
11376e81090bSOded Gabbay 			cur_size -= kfd->gtt_sa_chunk_size;
11386e81090bSOded Gabbay 
11396e81090bSOded Gabbay 	} while (cur_size > 0);
11406e81090bSOded Gabbay 
114179775b62SKent Russell 	pr_debug("range_start = %d, range_end = %d\n",
11426e81090bSOded Gabbay 		(*mem_obj)->range_start, (*mem_obj)->range_end);
11436e81090bSOded Gabbay 
11446e81090bSOded Gabbay 	/* Mark the chunks as allocated */
1145b8b9ba58SChristophe JAILLET 	bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
1146b8b9ba58SChristophe JAILLET 		   (*mem_obj)->range_end - (*mem_obj)->range_start + 1);
11476e81090bSOded Gabbay 
11486e81090bSOded Gabbay kfd_gtt_out:
11496e81090bSOded Gabbay 	mutex_unlock(&kfd->gtt_sa_lock);
11506e81090bSOded Gabbay 	return 0;
11516e81090bSOded Gabbay 
11526e81090bSOded Gabbay kfd_gtt_no_free_chunk:
11533148a6a0SJack Zhang 	pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
11546e81090bSOded Gabbay 	mutex_unlock(&kfd->gtt_sa_lock);
11553148a6a0SJack Zhang 	kfree(*mem_obj);
11566e81090bSOded Gabbay 	return -ENOMEM;
11576e81090bSOded Gabbay }
11586e81090bSOded Gabbay 
1159*8dc1db31SMukul Joshi int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
11606e81090bSOded Gabbay {
1161*8dc1db31SMukul Joshi 	struct kfd_dev *kfd = node->kfd;
1162*8dc1db31SMukul Joshi 
11639216ed29SOded Gabbay 	/* Act like kfree when trying to free a NULL object */
11649216ed29SOded Gabbay 	if (!mem_obj)
11659216ed29SOded Gabbay 		return 0;
11666e81090bSOded Gabbay 
116779775b62SKent Russell 	pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
11686e81090bSOded Gabbay 			mem_obj, mem_obj->range_start, mem_obj->range_end);
11696e81090bSOded Gabbay 
11706e81090bSOded Gabbay 	mutex_lock(&kfd->gtt_sa_lock);
11716e81090bSOded Gabbay 
11726e81090bSOded Gabbay 	/* Mark the chunks as free */
1173b8b9ba58SChristophe JAILLET 	bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
1174b8b9ba58SChristophe JAILLET 		     mem_obj->range_end - mem_obj->range_start + 1);
11756e81090bSOded Gabbay 
11766e81090bSOded Gabbay 	mutex_unlock(&kfd->gtt_sa_lock);
11776e81090bSOded Gabbay 
11786e81090bSOded Gabbay 	kfree(mem_obj);
11796e81090bSOded Gabbay 	return 0;
11806e81090bSOded Gabbay }
1181a29ec470SShaoyun Liu 
11829b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
11839b54d201SEric Huang {
11849b54d201SEric Huang 	if (kfd)
1185*8dc1db31SMukul Joshi 		atomic_inc(&kfd->node->sram_ecc_flag);
11869b54d201SEric Huang }
11879b54d201SEric Huang 
1188*8dc1db31SMukul Joshi void kfd_inc_compute_active(struct kfd_node *node)
118943d8107fSHarish Kasiviswanathan {
1190*8dc1db31SMukul Joshi 	if (atomic_inc_return(&node->kfd->compute_profile) == 1)
1191*8dc1db31SMukul Joshi 		amdgpu_amdkfd_set_compute_idle(node->adev, false);
119243d8107fSHarish Kasiviswanathan }
119343d8107fSHarish Kasiviswanathan 
1194*8dc1db31SMukul Joshi void kfd_dec_compute_active(struct kfd_node *node)
119543d8107fSHarish Kasiviswanathan {
1196*8dc1db31SMukul Joshi 	int count = atomic_dec_return(&node->kfd->compute_profile);
119743d8107fSHarish Kasiviswanathan 
119843d8107fSHarish Kasiviswanathan 	if (count == 0)
1199*8dc1db31SMukul Joshi 		amdgpu_amdkfd_set_compute_idle(node->adev, true);
120043d8107fSHarish Kasiviswanathan 	WARN_ONCE(count < 0, "Compute profile ref. count error");
120143d8107fSHarish Kasiviswanathan }
120243d8107fSHarish Kasiviswanathan 
1203410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
12042c2b0d88SMukul Joshi {
1205158fc08dSAmber Lin 	if (kfd && kfd->init_complete)
1206*8dc1db31SMukul Joshi 		kfd_smi_event_update_thermal_throttling(kfd->node, throttle_bitmask);
12072c2b0d88SMukul Joshi }
12082c2b0d88SMukul Joshi 
1209ee2f17f4SAmber Lin /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1210ee2f17f4SAmber Lin  * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1211ee2f17f4SAmber Lin  * When the device has more than two engines, we reserve two for PCIe to enable
1212ee2f17f4SAmber Lin  * full-duplex and the rest are used as XGMI.
1213ee2f17f4SAmber Lin  */
1214*8dc1db31SMukul Joshi unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
1215ee2f17f4SAmber Lin {
1216ee2f17f4SAmber Lin 	/* If XGMI is not supported, all SDMA engines are PCIe */
1217*8dc1db31SMukul Joshi 	if (!node->adev->gmc.xgmi.supported)
1218*8dc1db31SMukul Joshi 		return node->adev->sdma.num_instances;
1219ee2f17f4SAmber Lin 
1220*8dc1db31SMukul Joshi 	return min(node->adev->sdma.num_instances, 2);
1221ee2f17f4SAmber Lin }
1222ee2f17f4SAmber Lin 
1223*8dc1db31SMukul Joshi unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
1224ee2f17f4SAmber Lin {
1225ee2f17f4SAmber Lin 	/* After reserved for PCIe, the rest of engines are XGMI */
1226*8dc1db31SMukul Joshi 	return node->adev->sdma.num_instances - kfd_get_num_sdma_engines(node);
1227ee2f17f4SAmber Lin }
1228ee2f17f4SAmber Lin 
1229a29ec470SShaoyun Liu #if defined(CONFIG_DEBUG_FS)
1230a29ec470SShaoyun Liu 
1231a29ec470SShaoyun Liu /* This function will send a package to HIQ to hang the HWS
1232a29ec470SShaoyun Liu  * which will trigger a GPU reset and bring the HWS back to normal state
1233a29ec470SShaoyun Liu  */
1234*8dc1db31SMukul Joshi int kfd_debugfs_hang_hws(struct kfd_node *dev)
1235a29ec470SShaoyun Liu {
1236a29ec470SShaoyun Liu 	if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1237a29ec470SShaoyun Liu 		pr_err("HWS is not enabled");
1238a29ec470SShaoyun Liu 		return -EINVAL;
1239a29ec470SShaoyun Liu 	}
1240a29ec470SShaoyun Liu 
12414f942aaeSOak Zeng 	return dqm_debugfs_hang_hws(dev->dqm);
1242a29ec470SShaoyun Liu }
1243a29ec470SShaoyun Liu 
1244a29ec470SShaoyun Liu #endif
1245