14a488a7aSOded Gabbay /*
24a488a7aSOded Gabbay  * Copyright 2014 Advanced Micro Devices, Inc.
34a488a7aSOded Gabbay  *
44a488a7aSOded Gabbay  * Permission is hereby granted, free of charge, to any person obtaining a
54a488a7aSOded Gabbay  * copy of this software and associated documentation files (the "Software"),
64a488a7aSOded Gabbay  * to deal in the Software without restriction, including without limitation
74a488a7aSOded Gabbay  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
84a488a7aSOded Gabbay  * and/or sell copies of the Software, and to permit persons to whom the
94a488a7aSOded Gabbay  * Software is furnished to do so, subject to the following conditions:
104a488a7aSOded Gabbay  *
114a488a7aSOded Gabbay  * The above copyright notice and this permission notice shall be included in
124a488a7aSOded Gabbay  * all copies or substantial portions of the Software.
134a488a7aSOded Gabbay  *
144a488a7aSOded Gabbay  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
154a488a7aSOded Gabbay  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
164a488a7aSOded Gabbay  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
174a488a7aSOded Gabbay  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
184a488a7aSOded Gabbay  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
194a488a7aSOded Gabbay  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
204a488a7aSOded Gabbay  * OTHER DEALINGS IN THE SOFTWARE.
214a488a7aSOded Gabbay  */
224a488a7aSOded Gabbay 
234a488a7aSOded Gabbay #include <linux/bsearch.h>
244a488a7aSOded Gabbay #include <linux/pci.h>
254a488a7aSOded Gabbay #include <linux/slab.h>
264a488a7aSOded Gabbay #include "kfd_priv.h"
2764c7f8cfSBen Goz #include "kfd_device_queue_manager.h"
28507968ddSFelix Kuehling #include "kfd_pm4_headers_vi.h"
29fd6a440eSJonathan Kim #include "kfd_pm4_headers_aldebaran.h"
300db54b24SYong Zhao #include "cwsr_trap_handler.h"
3164d1c3a4SFelix Kuehling #include "kfd_iommu.h"
325b87245fSAmber Lin #include "amdgpu_amdkfd.h"
332c2b0d88SMukul Joshi #include "kfd_smi_events.h"
34814ab993SPhilip Yang #include "kfd_migrate.h"
355b983db8SAlex Deucher #include "amdgpu.h"
364a488a7aSOded Gabbay 
3719f6d2a6SOded Gabbay #define MQD_SIZE_ALIGNED 768
38e42051d2SShaoyun Liu 
39e42051d2SShaoyun Liu /*
40e42051d2SShaoyun Liu  * kfd_locked is used to lock the kfd driver during suspend or reset
41e42051d2SShaoyun Liu  * once locked, kfd driver will stop any further GPU execution.
42e42051d2SShaoyun Liu  * create process (open) will return -EAGAIN.
43e42051d2SShaoyun Liu  */
44e42051d2SShaoyun Liu static atomic_t kfd_locked = ATOMIC_INIT(0);
4519f6d2a6SOded Gabbay 
46a3e520a2SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK
47e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
48a3e520a2SAlex Deucher #endif
49e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
50e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
51e392c887SYong Zhao extern const struct kfd2kgd_calls arcturus_kfd2kgd;
525073506cSJonathan Kim extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
53e392c887SYong Zhao extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
543a2f0c81SYong Zhao extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
55e392c887SYong Zhao 
566e81090bSOded Gabbay static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
576e81090bSOded Gabbay 				unsigned int chunk_size);
586e81090bSOded Gabbay static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
596e81090bSOded Gabbay 
60b8935a7cSYong Zhao static int kfd_resume(struct kfd_dev *kfd);
61b8935a7cSYong Zhao 
62f0dc99a6SGraham Sider static void kfd_device_info_init(struct kfd_dev *kfd,
63f0dc99a6SGraham Sider 				 bool vf, uint32_t gfx_target_version)
64f0dc99a6SGraham Sider {
65f0dc99a6SGraham Sider 	uint32_t gc_version = KFD_GC_VERSION(kfd);
66f0dc99a6SGraham Sider 	uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
67f0dc99a6SGraham Sider 	uint32_t asic_type = kfd->adev->asic_type;
68f0dc99a6SGraham Sider 
69f0dc99a6SGraham Sider 	kfd->device_info.max_pasid_bits = 16;
70f0dc99a6SGraham Sider 	kfd->device_info.max_no_of_hqd = 24;
71f0dc99a6SGraham Sider 	kfd->device_info.num_of_watch_points = 4;
72f0dc99a6SGraham Sider 	kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
73f0dc99a6SGraham Sider 	kfd->device_info.gfx_target_version = gfx_target_version;
74f0dc99a6SGraham Sider 
75f0dc99a6SGraham Sider 	if (KFD_IS_SOC15(kfd)) {
76f0dc99a6SGraham Sider 		kfd->device_info.doorbell_size = 8;
77f0dc99a6SGraham Sider 		kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
78f0dc99a6SGraham Sider 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
79f0dc99a6SGraham Sider 		kfd->device_info.supports_cwsr = true;
80f0dc99a6SGraham Sider 
81f0dc99a6SGraham Sider 		if ((sdma_version >= IP_VERSION(4, 0, 0)  &&
82f0dc99a6SGraham Sider 		     sdma_version <= IP_VERSION(4, 2, 0)) ||
83f0dc99a6SGraham Sider 		     sdma_version == IP_VERSION(5, 2, 1)  ||
84f0dc99a6SGraham Sider 		     sdma_version == IP_VERSION(5, 2, 3))
85f0dc99a6SGraham Sider 			kfd->device_info.num_sdma_queues_per_engine = 2;
86f0dc99a6SGraham Sider 		else
87f0dc99a6SGraham Sider 			kfd->device_info.num_sdma_queues_per_engine = 8;
88f0dc99a6SGraham Sider 
89f0dc99a6SGraham Sider 		/* Raven */
90f0dc99a6SGraham Sider 		if (gc_version == IP_VERSION(9, 1, 0) ||
91f0dc99a6SGraham Sider 		    gc_version == IP_VERSION(9, 2, 2))
92f0dc99a6SGraham Sider 			kfd->device_info.needs_iommu_device = true;
93f0dc99a6SGraham Sider 
94f0dc99a6SGraham Sider 		if (gc_version < IP_VERSION(11, 0, 0)) {
95f0dc99a6SGraham Sider 			/* Navi2x+, Navi1x+ */
96f0dc99a6SGraham Sider 			if (gc_version >= IP_VERSION(10, 3, 0))
97f0dc99a6SGraham Sider 				kfd->device_info.no_atomic_fw_version = 92;
9827cc310fSchen gong 			else if (gc_version >= IP_VERSION(10, 1, 1))
9927cc310fSchen gong 				kfd->device_info.no_atomic_fw_version = 145;
100f0dc99a6SGraham Sider 
101f0dc99a6SGraham Sider 			/* Navi1x+ */
102f0dc99a6SGraham Sider 			if (gc_version >= IP_VERSION(10, 1, 1))
103f0dc99a6SGraham Sider 				kfd->device_info.needs_pci_atomics = true;
104f0dc99a6SGraham Sider 		}
105f0dc99a6SGraham Sider 	} else {
106f0dc99a6SGraham Sider 		kfd->device_info.doorbell_size = 4;
107f0dc99a6SGraham Sider 		kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
108f0dc99a6SGraham Sider 		kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
109f0dc99a6SGraham Sider 		kfd->device_info.num_sdma_queues_per_engine = 2;
110f0dc99a6SGraham Sider 
111f0dc99a6SGraham Sider 		if (asic_type != CHIP_KAVERI &&
112f0dc99a6SGraham Sider 		    asic_type != CHIP_HAWAII &&
113f0dc99a6SGraham Sider 		    asic_type != CHIP_TONGA)
114f0dc99a6SGraham Sider 			kfd->device_info.supports_cwsr = true;
115f0dc99a6SGraham Sider 
116f0dc99a6SGraham Sider 		if (asic_type == CHIP_KAVERI ||
117f0dc99a6SGraham Sider 		    asic_type == CHIP_CARRIZO)
118f0dc99a6SGraham Sider 			kfd->device_info.needs_iommu_device = true;
119f0dc99a6SGraham Sider 
120f0dc99a6SGraham Sider 		if (asic_type != CHIP_HAWAII && !vf)
121f0dc99a6SGraham Sider 			kfd->device_info.needs_pci_atomics = true;
122f0dc99a6SGraham Sider 	}
123f0dc99a6SGraham Sider }
124f0dc99a6SGraham Sider 
125b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
1264a488a7aSOded Gabbay {
127f0dc99a6SGraham Sider 	struct kfd_dev *kfd = NULL;
128f0dc99a6SGraham Sider 	const struct kfd2kgd_calls *f2g = NULL;
1295b983db8SAlex Deucher 	struct pci_dev *pdev = adev->pdev;
130f0dc99a6SGraham Sider 	uint32_t gfx_target_version = 0;
131050091abSYong Zhao 
132c868d584SAlex Deucher 	switch (adev->asic_type) {
133c868d584SAlex Deucher #ifdef KFD_SUPPORT_IOMMU_V2
134c868d584SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK
135c868d584SAlex Deucher 	case CHIP_KAVERI:
136f0dc99a6SGraham Sider 		gfx_target_version = 70000;
137f0dc99a6SGraham Sider 		if (!vf)
138c868d584SAlex Deucher 			f2g = &gfx_v7_kfd2kgd;
139c868d584SAlex Deucher 		break;
140c868d584SAlex Deucher #endif
141c868d584SAlex Deucher 	case CHIP_CARRIZO:
142f0dc99a6SGraham Sider 		gfx_target_version = 80001;
143f0dc99a6SGraham Sider 		if (!vf)
144c868d584SAlex Deucher 			f2g = &gfx_v8_kfd2kgd;
145c868d584SAlex Deucher 		break;
146c868d584SAlex Deucher #endif
147c868d584SAlex Deucher #ifdef CONFIG_DRM_AMDGPU_CIK
148c868d584SAlex Deucher 	case CHIP_HAWAII:
149f0dc99a6SGraham Sider 		gfx_target_version = 70001;
1500f7ef0b9SFelix Kuehling 		if (!amdgpu_exp_hw_support)
1510f7ef0b9SFelix Kuehling 			pr_info(
1520f7ef0b9SFelix Kuehling 	"KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
1530f7ef0b9SFelix Kuehling 				);
1540f7ef0b9SFelix Kuehling 		else if (!vf)
155c868d584SAlex Deucher 			f2g = &gfx_v7_kfd2kgd;
156c868d584SAlex Deucher 		break;
157c868d584SAlex Deucher #endif
158c868d584SAlex Deucher 	case CHIP_TONGA:
159f0dc99a6SGraham Sider 		gfx_target_version = 80002;
160f0dc99a6SGraham Sider 		if (!vf)
161c868d584SAlex Deucher 			f2g = &gfx_v8_kfd2kgd;
162c868d584SAlex Deucher 		break;
163c868d584SAlex Deucher 	case CHIP_FIJI:
164f0dc99a6SGraham Sider 		gfx_target_version = 80003;
165c868d584SAlex Deucher 		f2g = &gfx_v8_kfd2kgd;
166c868d584SAlex Deucher 		break;
167c868d584SAlex Deucher 	case CHIP_POLARIS10:
168f0dc99a6SGraham Sider 		gfx_target_version = 80003;
169c868d584SAlex Deucher 		f2g = &gfx_v8_kfd2kgd;
170c868d584SAlex Deucher 		break;
171c868d584SAlex Deucher 	case CHIP_POLARIS11:
172f0dc99a6SGraham Sider 		gfx_target_version = 80003;
173f0dc99a6SGraham Sider 		if (!vf)
174c868d584SAlex Deucher 			f2g = &gfx_v8_kfd2kgd;
175c868d584SAlex Deucher 		break;
176c868d584SAlex Deucher 	case CHIP_POLARIS12:
177f0dc99a6SGraham Sider 		gfx_target_version = 80003;
178f0dc99a6SGraham Sider 		if (!vf)
179c868d584SAlex Deucher 			f2g = &gfx_v8_kfd2kgd;
180c868d584SAlex Deucher 		break;
181c868d584SAlex Deucher 	case CHIP_VEGAM:
182f0dc99a6SGraham Sider 		gfx_target_version = 80003;
183f0dc99a6SGraham Sider 		if (!vf)
184c868d584SAlex Deucher 			f2g = &gfx_v8_kfd2kgd;
185c868d584SAlex Deucher 		break;
186c868d584SAlex Deucher 	default:
187c868d584SAlex Deucher 		switch (adev->ip_versions[GC_HWIP][0]) {
1882c1f19b3SGraham Sider 		/* Vega 10 */
189c868d584SAlex Deucher 		case IP_VERSION(9, 0, 1):
190f0dc99a6SGraham Sider 			gfx_target_version = 90000;
191c868d584SAlex Deucher 			f2g = &gfx_v9_kfd2kgd;
192c868d584SAlex Deucher 			break;
193c868d584SAlex Deucher #ifdef KFD_SUPPORT_IOMMU_V2
1942c1f19b3SGraham Sider 		/* Raven */
195c868d584SAlex Deucher 		case IP_VERSION(9, 1, 0):
196c868d584SAlex Deucher 		case IP_VERSION(9, 2, 2):
197f0dc99a6SGraham Sider 			gfx_target_version = 90002;
198f0dc99a6SGraham Sider 			if (!vf)
199c868d584SAlex Deucher 				f2g = &gfx_v9_kfd2kgd;
200c868d584SAlex Deucher 			break;
201c868d584SAlex Deucher #endif
2022c1f19b3SGraham Sider 		/* Vega12 */
203c868d584SAlex Deucher 		case IP_VERSION(9, 2, 1):
204f0dc99a6SGraham Sider 			gfx_target_version = 90004;
205f0dc99a6SGraham Sider 			if (!vf)
206c868d584SAlex Deucher 				f2g = &gfx_v9_kfd2kgd;
207c868d584SAlex Deucher 			break;
2082c1f19b3SGraham Sider 		/* Renoir */
209c868d584SAlex Deucher 		case IP_VERSION(9, 3, 0):
210f0dc99a6SGraham Sider 			gfx_target_version = 90012;
211f0dc99a6SGraham Sider 			if (!vf)
212c868d584SAlex Deucher 				f2g = &gfx_v9_kfd2kgd;
213c868d584SAlex Deucher 			break;
2142c1f19b3SGraham Sider 		/* Vega20 */
215c868d584SAlex Deucher 		case IP_VERSION(9, 4, 0):
216f0dc99a6SGraham Sider 			gfx_target_version = 90006;
217f0dc99a6SGraham Sider 			if (!vf)
218c868d584SAlex Deucher 				f2g = &gfx_v9_kfd2kgd;
219c868d584SAlex Deucher 			break;
2202c1f19b3SGraham Sider 		/* Arcturus */
221c868d584SAlex Deucher 		case IP_VERSION(9, 4, 1):
222f0dc99a6SGraham Sider 			gfx_target_version = 90008;
223c868d584SAlex Deucher 			f2g = &arcturus_kfd2kgd;
224c868d584SAlex Deucher 			break;
2252c1f19b3SGraham Sider 		/* Aldebaran */
226c868d584SAlex Deucher 		case IP_VERSION(9, 4, 2):
227f0dc99a6SGraham Sider 			gfx_target_version = 90010;
228c868d584SAlex Deucher 			f2g = &aldebaran_kfd2kgd;
229c868d584SAlex Deucher 			break;
2302c1f19b3SGraham Sider 		/* Navi10 */
231c868d584SAlex Deucher 		case IP_VERSION(10, 1, 10):
232f0dc99a6SGraham Sider 			gfx_target_version = 100100;
233f0dc99a6SGraham Sider 			if (!vf)
234c868d584SAlex Deucher 				f2g = &gfx_v10_kfd2kgd;
235c868d584SAlex Deucher 			break;
2362c1f19b3SGraham Sider 		/* Navi12 */
237c868d584SAlex Deucher 		case IP_VERSION(10, 1, 2):
238f0dc99a6SGraham Sider 			gfx_target_version = 100101;
239c868d584SAlex Deucher 			f2g = &gfx_v10_kfd2kgd;
240c868d584SAlex Deucher 			break;
2412c1f19b3SGraham Sider 		/* Navi14 */
242c868d584SAlex Deucher 		case IP_VERSION(10, 1, 1):
243f0dc99a6SGraham Sider 			gfx_target_version = 100102;
244f0dc99a6SGraham Sider 			if (!vf)
245c868d584SAlex Deucher 				f2g = &gfx_v10_kfd2kgd;
246c868d584SAlex Deucher 			break;
2472c1f19b3SGraham Sider 		/* Cyan Skillfish */
248c868d584SAlex Deucher 		case IP_VERSION(10, 1, 3):
249f0dc99a6SGraham Sider 			gfx_target_version = 100103;
250f0dc99a6SGraham Sider 			if (!vf)
251c868d584SAlex Deucher 				f2g = &gfx_v10_kfd2kgd;
252c868d584SAlex Deucher 			break;
2532c1f19b3SGraham Sider 		/* Sienna Cichlid */
254c868d584SAlex Deucher 		case IP_VERSION(10, 3, 0):
255f0dc99a6SGraham Sider 			gfx_target_version = 100300;
256c868d584SAlex Deucher 			f2g = &gfx_v10_3_kfd2kgd;
257c868d584SAlex Deucher 			break;
2582c1f19b3SGraham Sider 		/* Navy Flounder */
259c868d584SAlex Deucher 		case IP_VERSION(10, 3, 2):
260f0dc99a6SGraham Sider 			gfx_target_version = 100301;
261c868d584SAlex Deucher 			f2g = &gfx_v10_3_kfd2kgd;
262c868d584SAlex Deucher 			break;
2632c1f19b3SGraham Sider 		/* Van Gogh */
264c868d584SAlex Deucher 		case IP_VERSION(10, 3, 1):
265f0dc99a6SGraham Sider 			gfx_target_version = 100303;
266f0dc99a6SGraham Sider 			if (!vf)
267c868d584SAlex Deucher 				f2g = &gfx_v10_3_kfd2kgd;
268c868d584SAlex Deucher 			break;
2692c1f19b3SGraham Sider 		/* Dimgrey Cavefish */
270c868d584SAlex Deucher 		case IP_VERSION(10, 3, 4):
271f0dc99a6SGraham Sider 			gfx_target_version = 100302;
272c868d584SAlex Deucher 			f2g = &gfx_v10_3_kfd2kgd;
273c868d584SAlex Deucher 			break;
2742c1f19b3SGraham Sider 		/* Beige Goby */
275c868d584SAlex Deucher 		case IP_VERSION(10, 3, 5):
276f0dc99a6SGraham Sider 			gfx_target_version = 100304;
277c868d584SAlex Deucher 			f2g = &gfx_v10_3_kfd2kgd;
278c868d584SAlex Deucher 			break;
2792c1f19b3SGraham Sider 		/* Yellow Carp */
280c868d584SAlex Deucher 		case IP_VERSION(10, 3, 3):
281f0dc99a6SGraham Sider 			gfx_target_version = 100305;
282f0dc99a6SGraham Sider 			if (!vf)
283c868d584SAlex Deucher 				f2g = &gfx_v10_3_kfd2kgd;
284c868d584SAlex Deucher 			break;
285c868d584SAlex Deucher 		default:
286f0dc99a6SGraham Sider 			break;
287050091abSYong Zhao 		}
288c868d584SAlex Deucher 		break;
289c868d584SAlex Deucher 	}
2904a488a7aSOded Gabbay 
291f0dc99a6SGraham Sider 	if (!f2g) {
292e4804a39SGraham Sider 		if (adev->ip_versions[GC_HWIP][0])
293e4804a39SGraham Sider 			dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
294e4804a39SGraham Sider 				adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
295e4804a39SGraham Sider 		else
296050091abSYong Zhao 			dev_err(kfd_device, "%s %s not supported in kfd\n",
297c868d584SAlex Deucher 				amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
2984a488a7aSOded Gabbay 		return NULL;
2994ebc7182SYong Zhao 	}
3004a488a7aSOded Gabbay 
301d35f00d8SEric Huang 	kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
302d35f00d8SEric Huang 	if (!kfd)
303d35f00d8SEric Huang 		return NULL;
304d35f00d8SEric Huang 
305c6c57446SGraham Sider 	kfd->adev = adev;
306f0dc99a6SGraham Sider 	kfd_device_info_init(kfd, vf, gfx_target_version);
3074a488a7aSOded Gabbay 	kfd->pdev = pdev;
30819f6d2a6SOded Gabbay 	kfd->init_complete = false;
309cea405b1SXihan Zhang 	kfd->kfd2kgd = f2g;
31043d8107fSHarish Kasiviswanathan 	atomic_set(&kfd->compute_profile, 0);
311cea405b1SXihan Zhang 
312cea405b1SXihan Zhang 	mutex_init(&kfd->doorbell_mutex);
313cea405b1SXihan Zhang 	memset(&kfd->doorbell_available_index, 0,
314cea405b1SXihan Zhang 		sizeof(kfd->doorbell_available_index));
3154a488a7aSOded Gabbay 
3169b54d201SEric Huang 	atomic_set(&kfd->sram_ecc_flag, 0);
3179b54d201SEric Huang 
31859d7115dSMukul Joshi 	ida_init(&kfd->doorbell_ida);
31959d7115dSMukul Joshi 
3204a488a7aSOded Gabbay 	return kfd;
3214a488a7aSOded Gabbay }
3224a488a7aSOded Gabbay 
323373d7080SFelix Kuehling static void kfd_cwsr_init(struct kfd_dev *kfd)
324373d7080SFelix Kuehling {
325f0dc99a6SGraham Sider 	if (cwsr_enable && kfd->device_info.supports_cwsr) {
326046e674bSGraham Sider 		if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
327373d7080SFelix Kuehling 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
328373d7080SFelix Kuehling 			kfd->cwsr_isa = cwsr_trap_gfx8_hex;
329373d7080SFelix Kuehling 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
330046e674bSGraham Sider 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
3313baa24f0SOak Zeng 			BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
3323baa24f0SOak Zeng 			kfd->cwsr_isa = cwsr_trap_arcturus_hex;
3333baa24f0SOak Zeng 			kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
334046e674bSGraham Sider 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
3350ef6845cSJay Cornwall 			BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
3360ef6845cSJay Cornwall 			kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
3370ef6845cSJay Cornwall 			kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
338046e674bSGraham Sider 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
3393e76c239SFelix Kuehling 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
3403e76c239SFelix Kuehling 			kfd->cwsr_isa = cwsr_trap_gfx9_hex;
3413e76c239SFelix Kuehling 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
342046e674bSGraham Sider 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
34380b6cfedSJay Cornwall 			BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
34480b6cfedSJay Cornwall 			kfd->cwsr_isa = cwsr_trap_nv1x_hex;
34580b6cfedSJay Cornwall 			kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
34614328aa5SPhilip Cox 		} else {
34714328aa5SPhilip Cox 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
34814328aa5SPhilip Cox 			kfd->cwsr_isa = cwsr_trap_gfx10_hex;
34914328aa5SPhilip Cox 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
3503e76c239SFelix Kuehling 		}
3513e76c239SFelix Kuehling 
352373d7080SFelix Kuehling 		kfd->cwsr_enabled = true;
353373d7080SFelix Kuehling 	}
354373d7080SFelix Kuehling }
355373d7080SFelix Kuehling 
35629633d0eSJoseph Greathouse static int kfd_gws_init(struct kfd_dev *kfd)
35729633d0eSJoseph Greathouse {
35829633d0eSJoseph Greathouse 	int ret = 0;
35929633d0eSJoseph Greathouse 
36029633d0eSJoseph Greathouse 	if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
36129633d0eSJoseph Greathouse 		return 0;
36229633d0eSJoseph Greathouse 
363046e674bSGraham Sider 	if (hws_gws_support || (KFD_IS_SOC15(kfd) &&
364046e674bSGraham Sider 		((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1)
365046e674bSGraham Sider 			&& kfd->mec2_fw_version >= 0x81b3) ||
366046e674bSGraham Sider 		(KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0)
367046e674bSGraham Sider 			&& kfd->mec2_fw_version >= 0x1b3)  ||
368046e674bSGraham Sider 		(KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
369046e674bSGraham Sider 			&& kfd->mec2_fw_version >= 0x30)   ||
370046e674bSGraham Sider 		(KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
371*addaac0cSJonathan Kim 			&& kfd->mec2_fw_version >= 0x28))))
3726bfc7c7eSGraham Sider 		ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
37302274fc0SGraham Sider 				kfd->adev->gds.gws_size, &kfd->gws);
37429633d0eSJoseph Greathouse 
37529633d0eSJoseph Greathouse 	return ret;
37629633d0eSJoseph Greathouse }
37729633d0eSJoseph Greathouse 
378938a0650SAmber Lin static void kfd_smi_init(struct kfd_dev *dev) {
379938a0650SAmber Lin 	INIT_LIST_HEAD(&dev->smi_clients);
380938a0650SAmber Lin 	spin_lock_init(&dev->smi_lock);
381938a0650SAmber Lin }
382938a0650SAmber Lin 
3834a488a7aSOded Gabbay bool kgd2kfd_device_init(struct kfd_dev *kfd,
3843a0c3423SHarish Kasiviswanathan 			 struct drm_device *ddev,
3854a488a7aSOded Gabbay 			 const struct kgd2kfd_shared_resources *gpu_resources)
3864a488a7aSOded Gabbay {
387fd6a440eSJonathan Kim 	unsigned int size, map_process_packet_size;
38819f6d2a6SOded Gabbay 
3893a0c3423SHarish Kasiviswanathan 	kfd->ddev = ddev;
390574c4183SGraham Sider 	kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
3915ade6c9cSFelix Kuehling 			KGD_ENGINE_MEC1);
392574c4183SGraham Sider 	kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
39329633d0eSJoseph Greathouse 			KGD_ENGINE_MEC2);
394574c4183SGraham Sider 	kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
3955ade6c9cSFelix Kuehling 			KGD_ENGINE_SDMA1);
3964a488a7aSOded Gabbay 	kfd->shared_resources = *gpu_resources;
3974a488a7aSOded Gabbay 
39844008d7aSYong Zhao 	kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
39944008d7aSYong Zhao 	kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
40044008d7aSYong Zhao 	kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
40144008d7aSYong Zhao 			- kfd->vm_info.first_vmid_kfd + 1;
40244008d7aSYong Zhao 
403e312af6cSFelix Kuehling 	/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
404e312af6cSFelix Kuehling 	 * 32 and 64-bit requests are possible and must be
405e312af6cSFelix Kuehling 	 * supported.
406e312af6cSFelix Kuehling 	 */
4076bfc7c7eSGraham Sider 	kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
408e312af6cSFelix Kuehling 	if (!kfd->pci_atomic_requested &&
409f0dc99a6SGraham Sider 	    kfd->device_info.needs_pci_atomics &&
410f0dc99a6SGraham Sider 	    (!kfd->device_info.no_atomic_fw_version ||
411f0dc99a6SGraham Sider 	     kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
412e312af6cSFelix Kuehling 		dev_info(kfd_device,
413e312af6cSFelix Kuehling 			 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
414e312af6cSFelix Kuehling 			 kfd->pdev->vendor, kfd->pdev->device,
415e312af6cSFelix Kuehling 			 kfd->mec_fw_version,
416f0dc99a6SGraham Sider 			 kfd->device_info.no_atomic_fw_version);
417e312af6cSFelix Kuehling 		return false;
418e312af6cSFelix Kuehling 	}
419e312af6cSFelix Kuehling 
420a99c6d4fSFelix Kuehling 	/* Verify module parameters regarding mapped process number*/
421a99c6d4fSFelix Kuehling 	if ((hws_max_conc_proc < 0)
422a99c6d4fSFelix Kuehling 			|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
423a99c6d4fSFelix Kuehling 		dev_err(kfd_device,
424a99c6d4fSFelix Kuehling 			"hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
425a99c6d4fSFelix Kuehling 			hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
426a99c6d4fSFelix Kuehling 			kfd->vm_info.vmid_num_kfd);
427a99c6d4fSFelix Kuehling 		kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
428a99c6d4fSFelix Kuehling 	} else
429a99c6d4fSFelix Kuehling 		kfd->max_proc_per_quantum = hws_max_conc_proc;
430a99c6d4fSFelix Kuehling 
43119f6d2a6SOded Gabbay 	/* calculate max size of mqds needed for queues */
432b8cbab04SOded Gabbay 	size = max_num_of_queues_per_device *
433f0dc99a6SGraham Sider 			kfd->device_info.mqd_size_aligned;
43419f6d2a6SOded Gabbay 
435e18e794eSOded Gabbay 	/*
436e18e794eSOded Gabbay 	 * calculate max size of runlist packet.
437e18e794eSOded Gabbay 	 * There can be only 2 packets at once
438e18e794eSOded Gabbay 	 */
439046e674bSGraham Sider 	map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
440fd6a440eSJonathan Kim 				sizeof(struct pm4_mes_map_process_aldebaran) :
441fd6a440eSJonathan Kim 				sizeof(struct pm4_mes_map_process);
442fd6a440eSJonathan Kim 	size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
443507968ddSFelix Kuehling 		max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
444507968ddSFelix Kuehling 		+ sizeof(struct pm4_mes_runlist)) * 2;
445e18e794eSOded Gabbay 
446e18e794eSOded Gabbay 	/* Add size of HIQ & DIQ */
447e18e794eSOded Gabbay 	size += KFD_KERNEL_QUEUE_SIZE * 2;
448e18e794eSOded Gabbay 
449e18e794eSOded Gabbay 	/* add another 512KB for all other allocations on gart (HPD, fences) */
45019f6d2a6SOded Gabbay 	size += 512 * 1024;
45119f6d2a6SOded Gabbay 
4527cd52c91SAmber Lin 	if (amdgpu_amdkfd_alloc_gtt_mem(
4536bfc7c7eSGraham Sider 			kfd->adev, size, &kfd->gtt_mem,
45415426dbbSYong Zhao 			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
45515426dbbSYong Zhao 			false)) {
45679775b62SKent Russell 		dev_err(kfd_device, "Could not allocate %d bytes\n", size);
457e09d4fc8SOak Zeng 		goto alloc_gtt_mem_failure;
45819f6d2a6SOded Gabbay 	}
45919f6d2a6SOded Gabbay 
46079775b62SKent Russell 	dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
461e18e794eSOded Gabbay 
46273a1da0bSOded Gabbay 	/* Initialize GTT sa with 512 byte chunk size */
46373a1da0bSOded Gabbay 	if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
46479775b62SKent Russell 		dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
46573a1da0bSOded Gabbay 		goto kfd_gtt_sa_init_error;
46673a1da0bSOded Gabbay 	}
46773a1da0bSOded Gabbay 
468735df2baSFelix Kuehling 	if (kfd_doorbell_init(kfd)) {
469735df2baSFelix Kuehling 		dev_err(kfd_device,
470735df2baSFelix Kuehling 			"Error initializing doorbell aperture\n");
471735df2baSFelix Kuehling 		goto kfd_doorbell_error;
472735df2baSFelix Kuehling 	}
47319f6d2a6SOded Gabbay 
47402274fc0SGraham Sider 	kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
4750c1690e3SShaoyun Liu 
47602274fc0SGraham Sider 	kfd->noretry = kfd->adev->gmc.noretry;
4779b498efaSAlex Deucher 
4782249d558SAndrew Lewycky 	if (kfd_interrupt_init(kfd)) {
47979775b62SKent Russell 		dev_err(kfd_device, "Error initializing interrupts\n");
4802249d558SAndrew Lewycky 		goto kfd_interrupt_error;
4812249d558SAndrew Lewycky 	}
4822249d558SAndrew Lewycky 
48364c7f8cfSBen Goz 	kfd->dqm = device_queue_manager_init(kfd);
48464c7f8cfSBen Goz 	if (!kfd->dqm) {
48579775b62SKent Russell 		dev_err(kfd_device, "Error initializing queue manager\n");
48664c7f8cfSBen Goz 		goto device_queue_manager_error;
48764c7f8cfSBen Goz 	}
48864c7f8cfSBen Goz 
48929633d0eSJoseph Greathouse 	/* If supported on this device, allocate global GWS that is shared
49029633d0eSJoseph Greathouse 	 * by all KFD processes
49129633d0eSJoseph Greathouse 	 */
49229633d0eSJoseph Greathouse 	if (kfd_gws_init(kfd)) {
49329633d0eSJoseph Greathouse 		dev_err(kfd_device, "Could not allocate %d gws\n",
49402274fc0SGraham Sider 			kfd->adev->gds.gws_size);
49529633d0eSJoseph Greathouse 		goto gws_error;
49629633d0eSJoseph Greathouse 	}
49729633d0eSJoseph Greathouse 
4986127896fSHuang Rui 	/* If CRAT is broken, won't set iommu enabled */
4996127896fSHuang Rui 	kfd_double_confirm_iommu_support(kfd);
5006127896fSHuang Rui 
50164d1c3a4SFelix Kuehling 	if (kfd_iommu_device_init(kfd)) {
5026f4b590aSYifan Zhang 		kfd->use_iommu_v2 = false;
50364d1c3a4SFelix Kuehling 		dev_err(kfd_device, "Error initializing iommuv2\n");
50464d1c3a4SFelix Kuehling 		goto device_iommu_error;
50564c7f8cfSBen Goz 	}
50664c7f8cfSBen Goz 
507373d7080SFelix Kuehling 	kfd_cwsr_init(kfd);
508373d7080SFelix Kuehling 
50956c5977eSGraham Sider 	svm_migrate_init(kfd->adev);
510814ab993SPhilip Yang 
511afd18180SYifan Zhang 	if(kgd2kfd_resume_iommu(kfd))
512afd18180SYifan Zhang 		goto device_iommu_error;
513afd18180SYifan Zhang 
514b8935a7cSYong Zhao 	if (kfd_resume(kfd))
515b8935a7cSYong Zhao 		goto kfd_resume_error;
516b8935a7cSYong Zhao 
517fbeb661bSYair Shachar 	kfd->dbgmgr = NULL;
518fbeb661bSYair Shachar 
519465ab9e0SOak Zeng 	if (kfd_topology_add_device(kfd)) {
520465ab9e0SOak Zeng 		dev_err(kfd_device, "Error adding device to topology\n");
521465ab9e0SOak Zeng 		goto kfd_topology_add_device_error;
522465ab9e0SOak Zeng 	}
523465ab9e0SOak Zeng 
524938a0650SAmber Lin 	kfd_smi_init(kfd);
525938a0650SAmber Lin 
5264a488a7aSOded Gabbay 	kfd->init_complete = true;
52779775b62SKent Russell 	dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
5284a488a7aSOded Gabbay 		 kfd->pdev->device);
5294a488a7aSOded Gabbay 
53079775b62SKent Russell 	pr_debug("Starting kfd with the following scheduling policy %d\n",
531d146c5a7SFelix Kuehling 		kfd->dqm->sched_policy);
53264c7f8cfSBen Goz 
53319f6d2a6SOded Gabbay 	goto out;
53419f6d2a6SOded Gabbay 
535465ab9e0SOak Zeng kfd_topology_add_device_error:
536b8935a7cSYong Zhao kfd_resume_error:
53764d1c3a4SFelix Kuehling device_iommu_error:
53829633d0eSJoseph Greathouse gws_error:
53964c7f8cfSBen Goz 	device_queue_manager_uninit(kfd->dqm);
54064c7f8cfSBen Goz device_queue_manager_error:
5412249d558SAndrew Lewycky 	kfd_interrupt_exit(kfd);
5422249d558SAndrew Lewycky kfd_interrupt_error:
543735df2baSFelix Kuehling 	kfd_doorbell_fini(kfd);
544735df2baSFelix Kuehling kfd_doorbell_error:
54573a1da0bSOded Gabbay 	kfd_gtt_sa_fini(kfd);
54673a1da0bSOded Gabbay kfd_gtt_sa_init_error:
5476bfc7c7eSGraham Sider 	amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
548e09d4fc8SOak Zeng alloc_gtt_mem_failure:
54929633d0eSJoseph Greathouse 	if (kfd->gws)
5506bfc7c7eSGraham Sider 		amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
55119f6d2a6SOded Gabbay 	dev_err(kfd_device,
55279775b62SKent Russell 		"device %x:%x NOT added due to errors\n",
55319f6d2a6SOded Gabbay 		kfd->pdev->vendor, kfd->pdev->device);
55419f6d2a6SOded Gabbay out:
55519f6d2a6SOded Gabbay 	return kfd->init_complete;
5564a488a7aSOded Gabbay }
5574a488a7aSOded Gabbay 
5584a488a7aSOded Gabbay void kgd2kfd_device_exit(struct kfd_dev *kfd)
5594a488a7aSOded Gabbay {
560b17f068aSOded Gabbay 	if (kfd->init_complete) {
56164c7f8cfSBen Goz 		device_queue_manager_uninit(kfd->dqm);
5622249d558SAndrew Lewycky 		kfd_interrupt_exit(kfd);
56319f6d2a6SOded Gabbay 		kfd_topology_remove_device(kfd);
564735df2baSFelix Kuehling 		kfd_doorbell_fini(kfd);
56559d7115dSMukul Joshi 		ida_destroy(&kfd->doorbell_ida);
56673a1da0bSOded Gabbay 		kfd_gtt_sa_fini(kfd);
5676bfc7c7eSGraham Sider 		amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
56829633d0eSJoseph Greathouse 		if (kfd->gws)
5696bfc7c7eSGraham Sider 			amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
570b17f068aSOded Gabbay 	}
5715b5c4e40SEvgeny Pinchuk 
5724a488a7aSOded Gabbay 	kfree(kfd);
5734a488a7aSOded Gabbay }
5744a488a7aSOded Gabbay 
575e3b7a967SShaoyun Liu int kgd2kfd_pre_reset(struct kfd_dev *kfd)
576e3b7a967SShaoyun Liu {
577e42051d2SShaoyun Liu 	if (!kfd->init_complete)
578e42051d2SShaoyun Liu 		return 0;
57909c34e8dSFelix Kuehling 
58055977744SMukul Joshi 	kfd_smi_event_update_gpu_reset(kfd, false);
58155977744SMukul Joshi 
58209c34e8dSFelix Kuehling 	kfd->dqm->ops.pre_reset(kfd->dqm);
58309c34e8dSFelix Kuehling 
5849593f4d6SRajneesh Bhardwaj 	kgd2kfd_suspend(kfd, false);
585e42051d2SShaoyun Liu 
586e42051d2SShaoyun Liu 	kfd_signal_reset_event(kfd);
587e3b7a967SShaoyun Liu 	return 0;
588e3b7a967SShaoyun Liu }
589e3b7a967SShaoyun Liu 
590e42051d2SShaoyun Liu /*
591e42051d2SShaoyun Liu  * Fix me. KFD won't be able to resume existing process for now.
592e42051d2SShaoyun Liu  * We will keep all existing process in a evicted state and
593e42051d2SShaoyun Liu  * wait the process to be terminated.
594e42051d2SShaoyun Liu  */
595e42051d2SShaoyun Liu 
596e3b7a967SShaoyun Liu int kgd2kfd_post_reset(struct kfd_dev *kfd)
597e3b7a967SShaoyun Liu {
598a1bd079fSyu kuai 	int ret;
599e42051d2SShaoyun Liu 
600e42051d2SShaoyun Liu 	if (!kfd->init_complete)
601e3b7a967SShaoyun Liu 		return 0;
602e42051d2SShaoyun Liu 
603e42051d2SShaoyun Liu 	ret = kfd_resume(kfd);
604e42051d2SShaoyun Liu 	if (ret)
605e42051d2SShaoyun Liu 		return ret;
606a1bd079fSyu kuai 	atomic_dec(&kfd_locked);
6079b54d201SEric Huang 
6089b54d201SEric Huang 	atomic_set(&kfd->sram_ecc_flag, 0);
6099b54d201SEric Huang 
61055977744SMukul Joshi 	kfd_smi_event_update_gpu_reset(kfd, true);
61155977744SMukul Joshi 
612e42051d2SShaoyun Liu 	return 0;
613e42051d2SShaoyun Liu }
614e42051d2SShaoyun Liu 
615e42051d2SShaoyun Liu bool kfd_is_locked(void)
616e42051d2SShaoyun Liu {
617e42051d2SShaoyun Liu 	return  (atomic_read(&kfd_locked) > 0);
618e3b7a967SShaoyun Liu }
619e3b7a967SShaoyun Liu 
6209593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
6214a488a7aSOded Gabbay {
622733fa1f7SYong Zhao 	if (!kfd->init_complete)
623733fa1f7SYong Zhao 		return;
624733fa1f7SYong Zhao 
6259593f4d6SRajneesh Bhardwaj 	/* for runtime suspend, skip locking kfd */
6269593f4d6SRajneesh Bhardwaj 	if (!run_pm) {
62726103436SFelix Kuehling 		/* For first KFD device suspend all the KFD processes */
628e42051d2SShaoyun Liu 		if (atomic_inc_return(&kfd_locked) == 1)
62926103436SFelix Kuehling 			kfd_suspend_all_processes();
6309593f4d6SRajneesh Bhardwaj 	}
63126103436SFelix Kuehling 
63245c9a5e4SOded Gabbay 	kfd->dqm->ops.stop(kfd->dqm);
63364d1c3a4SFelix Kuehling 	kfd_iommu_suspend(kfd);
6344a488a7aSOded Gabbay }
6354a488a7aSOded Gabbay 
6369593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
6374a488a7aSOded Gabbay {
63826103436SFelix Kuehling 	int ret, count;
63926103436SFelix Kuehling 
640b8935a7cSYong Zhao 	if (!kfd->init_complete)
641b8935a7cSYong Zhao 		return 0;
642b17f068aSOded Gabbay 
64326103436SFelix Kuehling 	ret = kfd_resume(kfd);
64426103436SFelix Kuehling 	if (ret)
64526103436SFelix Kuehling 		return ret;
646b17f068aSOded Gabbay 
6479593f4d6SRajneesh Bhardwaj 	/* for runtime resume, skip unlocking kfd */
6489593f4d6SRajneesh Bhardwaj 	if (!run_pm) {
649e42051d2SShaoyun Liu 		count = atomic_dec_return(&kfd_locked);
65026103436SFelix Kuehling 		WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
65126103436SFelix Kuehling 		if (count == 0)
65226103436SFelix Kuehling 			ret = kfd_resume_all_processes();
6539593f4d6SRajneesh Bhardwaj 	}
65426103436SFelix Kuehling 
65526103436SFelix Kuehling 	return ret;
6564ebc7182SYong Zhao }
6574ebc7182SYong Zhao 
658f8846323SJames Zhu int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
659b8935a7cSYong Zhao {
660b8935a7cSYong Zhao 	int err = 0;
661b8935a7cSYong Zhao 
66264d1c3a4SFelix Kuehling 	err = kfd_iommu_resume(kfd);
663f8846323SJames Zhu 	if (err)
66464d1c3a4SFelix Kuehling 		dev_err(kfd_device,
66564d1c3a4SFelix Kuehling 			"Failed to resume IOMMU for device %x:%x\n",
66664d1c3a4SFelix Kuehling 			kfd->pdev->vendor, kfd->pdev->device);
66764d1c3a4SFelix Kuehling 	return err;
66864d1c3a4SFelix Kuehling }
669733fa1f7SYong Zhao 
670f8846323SJames Zhu static int kfd_resume(struct kfd_dev *kfd)
671f8846323SJames Zhu {
672f8846323SJames Zhu 	int err = 0;
673f8846323SJames Zhu 
674b8935a7cSYong Zhao 	err = kfd->dqm->ops.start(kfd->dqm);
675499f4d38SYifan Zhang 	if (err)
676b8935a7cSYong Zhao 		dev_err(kfd_device,
677b8935a7cSYong Zhao 			"Error starting queue manager for device %x:%x\n",
678b8935a7cSYong Zhao 			kfd->pdev->vendor, kfd->pdev->device);
679b17f068aSOded Gabbay 
680b8935a7cSYong Zhao 	return err;
6814a488a7aSOded Gabbay }
6824a488a7aSOded Gabbay 
683b3eca59dSPhilip Yang static inline void kfd_queue_work(struct workqueue_struct *wq,
684b3eca59dSPhilip Yang 				  struct work_struct *work)
685b3eca59dSPhilip Yang {
686b3eca59dSPhilip Yang 	int cpu, new_cpu;
687b3eca59dSPhilip Yang 
688b3eca59dSPhilip Yang 	cpu = new_cpu = smp_processor_id();
689b3eca59dSPhilip Yang 	do {
690b3eca59dSPhilip Yang 		new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
691b3eca59dSPhilip Yang 		if (cpu_to_node(new_cpu) == numa_node_id())
692b3eca59dSPhilip Yang 			break;
693b3eca59dSPhilip Yang 	} while (cpu != new_cpu);
694b3eca59dSPhilip Yang 
695b3eca59dSPhilip Yang 	queue_work_on(new_cpu, wq, work);
696b3eca59dSPhilip Yang }
697b3eca59dSPhilip Yang 
698b3f5e6b4SAndrew Lewycky /* This is called directly from KGD at ISR. */
699b3f5e6b4SAndrew Lewycky void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
7004a488a7aSOded Gabbay {
70158e69886SLan Xiao 	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
70258e69886SLan Xiao 	bool is_patched = false;
7032383a767SChristian König 	unsigned long flags;
70458e69886SLan Xiao 
7052249d558SAndrew Lewycky 	if (!kfd->init_complete)
7062249d558SAndrew Lewycky 		return;
7072249d558SAndrew Lewycky 
708f0dc99a6SGraham Sider 	if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
70958e69886SLan Xiao 		dev_err_once(kfd_device, "Ring entry too small\n");
71058e69886SLan Xiao 		return;
71158e69886SLan Xiao 	}
71258e69886SLan Xiao 
7132383a767SChristian König 	spin_lock_irqsave(&kfd->interrupt_lock, flags);
7142249d558SAndrew Lewycky 
7152249d558SAndrew Lewycky 	if (kfd->interrupts_active
71658e69886SLan Xiao 	    && interrupt_is_wanted(kfd, ih_ring_entry,
71758e69886SLan Xiao 				   patched_ihre, &is_patched)
71858e69886SLan Xiao 	    && enqueue_ih_ring_entry(kfd,
71958e69886SLan Xiao 				     is_patched ? patched_ihre : ih_ring_entry))
720b3eca59dSPhilip Yang 		kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
7212249d558SAndrew Lewycky 
7222383a767SChristian König 	spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
7234a488a7aSOded Gabbay }
7246e81090bSOded Gabbay 
7256b95e797SFelix Kuehling int kgd2kfd_quiesce_mm(struct mm_struct *mm)
7266b95e797SFelix Kuehling {
7276b95e797SFelix Kuehling 	struct kfd_process *p;
7286b95e797SFelix Kuehling 	int r;
7296b95e797SFelix Kuehling 
7306b95e797SFelix Kuehling 	/* Because we are called from arbitrary context (workqueue) as opposed
7316b95e797SFelix Kuehling 	 * to process context, kfd_process could attempt to exit while we are
7326b95e797SFelix Kuehling 	 * running so the lookup function increments the process ref count.
7336b95e797SFelix Kuehling 	 */
7346b95e797SFelix Kuehling 	p = kfd_lookup_process_by_mm(mm);
7356b95e797SFelix Kuehling 	if (!p)
7366b95e797SFelix Kuehling 		return -ESRCH;
7376b95e797SFelix Kuehling 
738b2057956SFelix Kuehling 	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
7396b95e797SFelix Kuehling 	r = kfd_process_evict_queues(p);
7406b95e797SFelix Kuehling 
7416b95e797SFelix Kuehling 	kfd_unref_process(p);
7426b95e797SFelix Kuehling 	return r;
7436b95e797SFelix Kuehling }
7446b95e797SFelix Kuehling 
7456b95e797SFelix Kuehling int kgd2kfd_resume_mm(struct mm_struct *mm)
7466b95e797SFelix Kuehling {
7476b95e797SFelix Kuehling 	struct kfd_process *p;
7486b95e797SFelix Kuehling 	int r;
7496b95e797SFelix Kuehling 
7506b95e797SFelix Kuehling 	/* Because we are called from arbitrary context (workqueue) as opposed
7516b95e797SFelix Kuehling 	 * to process context, kfd_process could attempt to exit while we are
7526b95e797SFelix Kuehling 	 * running so the lookup function increments the process ref count.
7536b95e797SFelix Kuehling 	 */
7546b95e797SFelix Kuehling 	p = kfd_lookup_process_by_mm(mm);
7556b95e797SFelix Kuehling 	if (!p)
7566b95e797SFelix Kuehling 		return -ESRCH;
7576b95e797SFelix Kuehling 
7586b95e797SFelix Kuehling 	r = kfd_process_restore_queues(p);
7596b95e797SFelix Kuehling 
7606b95e797SFelix Kuehling 	kfd_unref_process(p);
7616b95e797SFelix Kuehling 	return r;
7626b95e797SFelix Kuehling }
7636b95e797SFelix Kuehling 
76426103436SFelix Kuehling /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
76526103436SFelix Kuehling  *   prepare for safe eviction of KFD BOs that belong to the specified
76626103436SFelix Kuehling  *   process.
76726103436SFelix Kuehling  *
76826103436SFelix Kuehling  * @mm: mm_struct that identifies the specified KFD process
76926103436SFelix Kuehling  * @fence: eviction fence attached to KFD process BOs
77026103436SFelix Kuehling  *
77126103436SFelix Kuehling  */
77226103436SFelix Kuehling int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
77326103436SFelix Kuehling 					       struct dma_fence *fence)
77426103436SFelix Kuehling {
77526103436SFelix Kuehling 	struct kfd_process *p;
77626103436SFelix Kuehling 	unsigned long active_time;
77726103436SFelix Kuehling 	unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
77826103436SFelix Kuehling 
77926103436SFelix Kuehling 	if (!fence)
78026103436SFelix Kuehling 		return -EINVAL;
78126103436SFelix Kuehling 
78226103436SFelix Kuehling 	if (dma_fence_is_signaled(fence))
78326103436SFelix Kuehling 		return 0;
78426103436SFelix Kuehling 
78526103436SFelix Kuehling 	p = kfd_lookup_process_by_mm(mm);
78626103436SFelix Kuehling 	if (!p)
78726103436SFelix Kuehling 		return -ENODEV;
78826103436SFelix Kuehling 
78926103436SFelix Kuehling 	if (fence->seqno == p->last_eviction_seqno)
79026103436SFelix Kuehling 		goto out;
79126103436SFelix Kuehling 
79226103436SFelix Kuehling 	p->last_eviction_seqno = fence->seqno;
79326103436SFelix Kuehling 
79426103436SFelix Kuehling 	/* Avoid KFD process starvation. Wait for at least
79526103436SFelix Kuehling 	 * PROCESS_ACTIVE_TIME_MS before evicting the process again
79626103436SFelix Kuehling 	 */
79726103436SFelix Kuehling 	active_time = get_jiffies_64() - p->last_restore_timestamp;
79826103436SFelix Kuehling 	if (delay_jiffies > active_time)
79926103436SFelix Kuehling 		delay_jiffies -= active_time;
80026103436SFelix Kuehling 	else
80126103436SFelix Kuehling 		delay_jiffies = 0;
80226103436SFelix Kuehling 
80326103436SFelix Kuehling 	/* During process initialization eviction_work.dwork is initialized
80426103436SFelix Kuehling 	 * to kfd_evict_bo_worker
80526103436SFelix Kuehling 	 */
806b2057956SFelix Kuehling 	WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
807b2057956SFelix Kuehling 	     p->lead_thread->pid, delay_jiffies);
80826103436SFelix Kuehling 	schedule_delayed_work(&p->eviction_work, delay_jiffies);
80926103436SFelix Kuehling out:
81026103436SFelix Kuehling 	kfd_unref_process(p);
81126103436SFelix Kuehling 	return 0;
81226103436SFelix Kuehling }
81326103436SFelix Kuehling 
8146e81090bSOded Gabbay static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
8156e81090bSOded Gabbay 				unsigned int chunk_size)
8166e81090bSOded Gabbay {
8178625ff9cSFelix Kuehling 	unsigned int num_of_longs;
8186e81090bSOded Gabbay 
81932fa8219SFelix Kuehling 	if (WARN_ON(buf_size < chunk_size))
82032fa8219SFelix Kuehling 		return -EINVAL;
82132fa8219SFelix Kuehling 	if (WARN_ON(buf_size == 0))
82232fa8219SFelix Kuehling 		return -EINVAL;
82332fa8219SFelix Kuehling 	if (WARN_ON(chunk_size == 0))
82432fa8219SFelix Kuehling 		return -EINVAL;
8256e81090bSOded Gabbay 
8266e81090bSOded Gabbay 	kfd->gtt_sa_chunk_size = chunk_size;
8276e81090bSOded Gabbay 	kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
8286e81090bSOded Gabbay 
8298625ff9cSFelix Kuehling 	num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
8308625ff9cSFelix Kuehling 		BITS_PER_LONG;
8316e81090bSOded Gabbay 
8328625ff9cSFelix Kuehling 	kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
8336e81090bSOded Gabbay 
8346e81090bSOded Gabbay 	if (!kfd->gtt_sa_bitmap)
8356e81090bSOded Gabbay 		return -ENOMEM;
8366e81090bSOded Gabbay 
83779775b62SKent Russell 	pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
8386e81090bSOded Gabbay 			kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
8396e81090bSOded Gabbay 
8406e81090bSOded Gabbay 	mutex_init(&kfd->gtt_sa_lock);
8416e81090bSOded Gabbay 
8426e81090bSOded Gabbay 	return 0;
8436e81090bSOded Gabbay 
8446e81090bSOded Gabbay }
8456e81090bSOded Gabbay 
8466e81090bSOded Gabbay static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
8476e81090bSOded Gabbay {
8486e81090bSOded Gabbay 	mutex_destroy(&kfd->gtt_sa_lock);
8496e81090bSOded Gabbay 	kfree(kfd->gtt_sa_bitmap);
8506e81090bSOded Gabbay }
8516e81090bSOded Gabbay 
8526e81090bSOded Gabbay static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
8536e81090bSOded Gabbay 						unsigned int bit_num,
8546e81090bSOded Gabbay 						unsigned int chunk_size)
8556e81090bSOded Gabbay {
8566e81090bSOded Gabbay 	return start_addr + bit_num * chunk_size;
8576e81090bSOded Gabbay }
8586e81090bSOded Gabbay 
8596e81090bSOded Gabbay static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
8606e81090bSOded Gabbay 						unsigned int bit_num,
8616e81090bSOded Gabbay 						unsigned int chunk_size)
8626e81090bSOded Gabbay {
8636e81090bSOded Gabbay 	return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
8646e81090bSOded Gabbay }
8656e81090bSOded Gabbay 
8666e81090bSOded Gabbay int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
8676e81090bSOded Gabbay 			struct kfd_mem_obj **mem_obj)
8686e81090bSOded Gabbay {
8696e81090bSOded Gabbay 	unsigned int found, start_search, cur_size;
8706e81090bSOded Gabbay 
8716e81090bSOded Gabbay 	if (size == 0)
8726e81090bSOded Gabbay 		return -EINVAL;
8736e81090bSOded Gabbay 
8746e81090bSOded Gabbay 	if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
8756e81090bSOded Gabbay 		return -ENOMEM;
8766e81090bSOded Gabbay 
8771cd106ecSFelix Kuehling 	*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
8781cd106ecSFelix Kuehling 	if (!(*mem_obj))
8796e81090bSOded Gabbay 		return -ENOMEM;
8806e81090bSOded Gabbay 
88179775b62SKent Russell 	pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
8826e81090bSOded Gabbay 
8836e81090bSOded Gabbay 	start_search = 0;
8846e81090bSOded Gabbay 
8856e81090bSOded Gabbay 	mutex_lock(&kfd->gtt_sa_lock);
8866e81090bSOded Gabbay 
8876e81090bSOded Gabbay kfd_gtt_restart_search:
8886e81090bSOded Gabbay 	/* Find the first chunk that is free */
8896e81090bSOded Gabbay 	found = find_next_zero_bit(kfd->gtt_sa_bitmap,
8906e81090bSOded Gabbay 					kfd->gtt_sa_num_of_chunks,
8916e81090bSOded Gabbay 					start_search);
8926e81090bSOded Gabbay 
89379775b62SKent Russell 	pr_debug("Found = %d\n", found);
8946e81090bSOded Gabbay 
8956e81090bSOded Gabbay 	/* If there wasn't any free chunk, bail out */
8966e81090bSOded Gabbay 	if (found == kfd->gtt_sa_num_of_chunks)
8976e81090bSOded Gabbay 		goto kfd_gtt_no_free_chunk;
8986e81090bSOded Gabbay 
8996e81090bSOded Gabbay 	/* Update fields of mem_obj */
9006e81090bSOded Gabbay 	(*mem_obj)->range_start = found;
9016e81090bSOded Gabbay 	(*mem_obj)->range_end = found;
9026e81090bSOded Gabbay 	(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
9036e81090bSOded Gabbay 					kfd->gtt_start_gpu_addr,
9046e81090bSOded Gabbay 					found,
9056e81090bSOded Gabbay 					kfd->gtt_sa_chunk_size);
9066e81090bSOded Gabbay 	(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
9076e81090bSOded Gabbay 					kfd->gtt_start_cpu_ptr,
9086e81090bSOded Gabbay 					found,
9096e81090bSOded Gabbay 					kfd->gtt_sa_chunk_size);
9106e81090bSOded Gabbay 
91179775b62SKent Russell 	pr_debug("gpu_addr = %p, cpu_addr = %p\n",
9126e81090bSOded Gabbay 			(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
9136e81090bSOded Gabbay 
9146e81090bSOded Gabbay 	/* If we need only one chunk, mark it as allocated and get out */
9156e81090bSOded Gabbay 	if (size <= kfd->gtt_sa_chunk_size) {
91679775b62SKent Russell 		pr_debug("Single bit\n");
9176e81090bSOded Gabbay 		set_bit(found, kfd->gtt_sa_bitmap);
9186e81090bSOded Gabbay 		goto kfd_gtt_out;
9196e81090bSOded Gabbay 	}
9206e81090bSOded Gabbay 
9216e81090bSOded Gabbay 	/* Otherwise, try to see if we have enough contiguous chunks */
9226e81090bSOded Gabbay 	cur_size = size - kfd->gtt_sa_chunk_size;
9236e81090bSOded Gabbay 	do {
9246e81090bSOded Gabbay 		(*mem_obj)->range_end =
9256e81090bSOded Gabbay 			find_next_zero_bit(kfd->gtt_sa_bitmap,
9266e81090bSOded Gabbay 					kfd->gtt_sa_num_of_chunks, ++found);
9276e81090bSOded Gabbay 		/*
9286e81090bSOded Gabbay 		 * If next free chunk is not contiguous than we need to
9296e81090bSOded Gabbay 		 * restart our search from the last free chunk we found (which
9306e81090bSOded Gabbay 		 * wasn't contiguous to the previous ones
9316e81090bSOded Gabbay 		 */
9326e81090bSOded Gabbay 		if ((*mem_obj)->range_end != found) {
9336e81090bSOded Gabbay 			start_search = found;
9346e81090bSOded Gabbay 			goto kfd_gtt_restart_search;
9356e81090bSOded Gabbay 		}
9366e81090bSOded Gabbay 
9376e81090bSOded Gabbay 		/*
9386e81090bSOded Gabbay 		 * If we reached end of buffer, bail out with error
9396e81090bSOded Gabbay 		 */
9406e81090bSOded Gabbay 		if (found == kfd->gtt_sa_num_of_chunks)
9416e81090bSOded Gabbay 			goto kfd_gtt_no_free_chunk;
9426e81090bSOded Gabbay 
9436e81090bSOded Gabbay 		/* Check if we don't need another chunk */
9446e81090bSOded Gabbay 		if (cur_size <= kfd->gtt_sa_chunk_size)
9456e81090bSOded Gabbay 			cur_size = 0;
9466e81090bSOded Gabbay 		else
9476e81090bSOded Gabbay 			cur_size -= kfd->gtt_sa_chunk_size;
9486e81090bSOded Gabbay 
9496e81090bSOded Gabbay 	} while (cur_size > 0);
9506e81090bSOded Gabbay 
95179775b62SKent Russell 	pr_debug("range_start = %d, range_end = %d\n",
9526e81090bSOded Gabbay 		(*mem_obj)->range_start, (*mem_obj)->range_end);
9536e81090bSOded Gabbay 
9546e81090bSOded Gabbay 	/* Mark the chunks as allocated */
9556e81090bSOded Gabbay 	for (found = (*mem_obj)->range_start;
9566e81090bSOded Gabbay 		found <= (*mem_obj)->range_end;
9576e81090bSOded Gabbay 		found++)
9586e81090bSOded Gabbay 		set_bit(found, kfd->gtt_sa_bitmap);
9596e81090bSOded Gabbay 
9606e81090bSOded Gabbay kfd_gtt_out:
9616e81090bSOded Gabbay 	mutex_unlock(&kfd->gtt_sa_lock);
9626e81090bSOded Gabbay 	return 0;
9636e81090bSOded Gabbay 
9646e81090bSOded Gabbay kfd_gtt_no_free_chunk:
9653148a6a0SJack Zhang 	pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
9666e81090bSOded Gabbay 	mutex_unlock(&kfd->gtt_sa_lock);
9673148a6a0SJack Zhang 	kfree(*mem_obj);
9686e81090bSOded Gabbay 	return -ENOMEM;
9696e81090bSOded Gabbay }
9706e81090bSOded Gabbay 
9716e81090bSOded Gabbay int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
9726e81090bSOded Gabbay {
9736e81090bSOded Gabbay 	unsigned int bit;
9746e81090bSOded Gabbay 
9759216ed29SOded Gabbay 	/* Act like kfree when trying to free a NULL object */
9769216ed29SOded Gabbay 	if (!mem_obj)
9779216ed29SOded Gabbay 		return 0;
9786e81090bSOded Gabbay 
97979775b62SKent Russell 	pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
9806e81090bSOded Gabbay 			mem_obj, mem_obj->range_start, mem_obj->range_end);
9816e81090bSOded Gabbay 
9826e81090bSOded Gabbay 	mutex_lock(&kfd->gtt_sa_lock);
9836e81090bSOded Gabbay 
9846e81090bSOded Gabbay 	/* Mark the chunks as free */
9856e81090bSOded Gabbay 	for (bit = mem_obj->range_start;
9866e81090bSOded Gabbay 		bit <= mem_obj->range_end;
9876e81090bSOded Gabbay 		bit++)
9886e81090bSOded Gabbay 		clear_bit(bit, kfd->gtt_sa_bitmap);
9896e81090bSOded Gabbay 
9906e81090bSOded Gabbay 	mutex_unlock(&kfd->gtt_sa_lock);
9916e81090bSOded Gabbay 
9926e81090bSOded Gabbay 	kfree(mem_obj);
9936e81090bSOded Gabbay 	return 0;
9946e81090bSOded Gabbay }
995a29ec470SShaoyun Liu 
9969b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
9979b54d201SEric Huang {
9989b54d201SEric Huang 	if (kfd)
9999b54d201SEric Huang 		atomic_inc(&kfd->sram_ecc_flag);
10009b54d201SEric Huang }
10019b54d201SEric Huang 
100243d8107fSHarish Kasiviswanathan void kfd_inc_compute_active(struct kfd_dev *kfd)
100343d8107fSHarish Kasiviswanathan {
100443d8107fSHarish Kasiviswanathan 	if (atomic_inc_return(&kfd->compute_profile) == 1)
10056bfc7c7eSGraham Sider 		amdgpu_amdkfd_set_compute_idle(kfd->adev, false);
100643d8107fSHarish Kasiviswanathan }
100743d8107fSHarish Kasiviswanathan 
100843d8107fSHarish Kasiviswanathan void kfd_dec_compute_active(struct kfd_dev *kfd)
100943d8107fSHarish Kasiviswanathan {
101043d8107fSHarish Kasiviswanathan 	int count = atomic_dec_return(&kfd->compute_profile);
101143d8107fSHarish Kasiviswanathan 
101243d8107fSHarish Kasiviswanathan 	if (count == 0)
10136bfc7c7eSGraham Sider 		amdgpu_amdkfd_set_compute_idle(kfd->adev, true);
101443d8107fSHarish Kasiviswanathan 	WARN_ONCE(count < 0, "Compute profile ref. count error");
101543d8107fSHarish Kasiviswanathan }
101643d8107fSHarish Kasiviswanathan 
1017410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
10182c2b0d88SMukul Joshi {
1019158fc08dSAmber Lin 	if (kfd && kfd->init_complete)
10202c2b0d88SMukul Joshi 		kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
10212c2b0d88SMukul Joshi }
10222c2b0d88SMukul Joshi 
1023ee2f17f4SAmber Lin /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1024ee2f17f4SAmber Lin  * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1025ee2f17f4SAmber Lin  * When the device has more than two engines, we reserve two for PCIe to enable
1026ee2f17f4SAmber Lin  * full-duplex and the rest are used as XGMI.
1027ee2f17f4SAmber Lin  */
1028ee2f17f4SAmber Lin unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev)
1029ee2f17f4SAmber Lin {
1030ee2f17f4SAmber Lin 	/* If XGMI is not supported, all SDMA engines are PCIe */
1031ee2f17f4SAmber Lin 	if (!kdev->adev->gmc.xgmi.supported)
1032ee2f17f4SAmber Lin 		return kdev->adev->sdma.num_instances;
1033ee2f17f4SAmber Lin 
1034ee2f17f4SAmber Lin 	return min(kdev->adev->sdma.num_instances, 2);
1035ee2f17f4SAmber Lin }
1036ee2f17f4SAmber Lin 
1037ee2f17f4SAmber Lin unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
1038ee2f17f4SAmber Lin {
1039ee2f17f4SAmber Lin 	/* After reserved for PCIe, the rest of engines are XGMI */
1040ee2f17f4SAmber Lin 	return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev);
1041ee2f17f4SAmber Lin }
1042ee2f17f4SAmber Lin 
1043a29ec470SShaoyun Liu #if defined(CONFIG_DEBUG_FS)
1044a29ec470SShaoyun Liu 
1045a29ec470SShaoyun Liu /* This function will send a package to HIQ to hang the HWS
1046a29ec470SShaoyun Liu  * which will trigger a GPU reset and bring the HWS back to normal state
1047a29ec470SShaoyun Liu  */
1048a29ec470SShaoyun Liu int kfd_debugfs_hang_hws(struct kfd_dev *dev)
1049a29ec470SShaoyun Liu {
1050a29ec470SShaoyun Liu 	if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1051a29ec470SShaoyun Liu 		pr_err("HWS is not enabled");
1052a29ec470SShaoyun Liu 		return -EINVAL;
1053a29ec470SShaoyun Liu 	}
1054a29ec470SShaoyun Liu 
10554f942aaeSOak Zeng 	return dqm_debugfs_hang_hws(dev->dqm);
1056a29ec470SShaoyun Liu }
1057a29ec470SShaoyun Liu 
1058a29ec470SShaoyun Liu #endif
1059