1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/bsearch.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include "kfd_priv.h"
28 #include "kfd_device_queue_manager.h"
29 #include "kfd_pm4_headers_vi.h"
30 #include "kfd_pm4_headers_aldebaran.h"
31 #include "cwsr_trap_handler.h"
32 #include "kfd_iommu.h"
33 #include "amdgpu_amdkfd.h"
34 #include "kfd_smi_events.h"
35 #include "kfd_migrate.h"
36 #include "amdgpu.h"
37 
38 #define MQD_SIZE_ALIGNED 768
39 
40 /*
41  * kfd_locked is used to lock the kfd driver during suspend or reset
42  * once locked, kfd driver will stop any further GPU execution.
43  * create process (open) will return -EAGAIN.
44  */
45 static atomic_t kfd_locked = ATOMIC_INIT(0);
46 
47 #ifdef CONFIG_DRM_AMDGPU_CIK
48 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
49 #endif
50 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
51 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
52 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
53 extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
54 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
55 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
56 extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
57 
58 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
59 				unsigned int chunk_size);
60 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
61 
62 static int kfd_resume(struct kfd_dev *kfd);
63 
64 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
65 {
66 	uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
67 
68 	switch (sdma_version) {
69 	case IP_VERSION(4, 0, 0):/* VEGA10 */
70 	case IP_VERSION(4, 0, 1):/* VEGA12 */
71 	case IP_VERSION(4, 1, 0):/* RAVEN */
72 	case IP_VERSION(4, 1, 1):/* RAVEN */
73 	case IP_VERSION(4, 1, 2):/* RENOIR */
74 	case IP_VERSION(5, 2, 1):/* VANGOGH */
75 	case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
76 	case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
77 	case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
78 	case IP_VERSION(6, 0, 1):
79 		kfd->device_info.num_sdma_queues_per_engine = 2;
80 		break;
81 	case IP_VERSION(4, 2, 0):/* VEGA20 */
82 	case IP_VERSION(4, 2, 2):/* ARCTURUS */
83 	case IP_VERSION(4, 4, 0):/* ALDEBARAN */
84 	case IP_VERSION(5, 0, 0):/* NAVI10 */
85 	case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
86 	case IP_VERSION(5, 0, 2):/* NAVI14 */
87 	case IP_VERSION(5, 0, 5):/* NAVI12 */
88 	case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
89 	case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
90 	case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
91 	case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
92 	case IP_VERSION(6, 0, 0):
93 	case IP_VERSION(6, 0, 2):
94 		kfd->device_info.num_sdma_queues_per_engine = 8;
95 		break;
96 	default:
97 		dev_warn(kfd_device,
98 			"Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
99 			sdma_version);
100 		kfd->device_info.num_sdma_queues_per_engine = 8;
101 	}
102 
103 	switch (sdma_version) {
104 	case IP_VERSION(6, 0, 0):
105 	case IP_VERSION(6, 0, 1):
106 	case IP_VERSION(6, 0, 2):
107 		/* Reserve 1 for paging and 1 for gfx */
108 		kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
109 		/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
110 		kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
111 		break;
112 	default:
113 		break;
114 	}
115 }
116 
117 static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
118 {
119 	uint32_t gc_version = KFD_GC_VERSION(kfd);
120 
121 	switch (gc_version) {
122 	case IP_VERSION(9, 0, 1): /* VEGA10 */
123 	case IP_VERSION(9, 1, 0): /* RAVEN */
124 	case IP_VERSION(9, 2, 1): /* VEGA12 */
125 	case IP_VERSION(9, 2, 2): /* RAVEN */
126 	case IP_VERSION(9, 3, 0): /* RENOIR */
127 	case IP_VERSION(9, 4, 0): /* VEGA20 */
128 	case IP_VERSION(9, 4, 1): /* ARCTURUS */
129 	case IP_VERSION(9, 4, 2): /* ALDEBARAN */
130 	case IP_VERSION(10, 3, 1): /* VANGOGH */
131 	case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
132 	case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
133 	case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
134 	case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
135 	case IP_VERSION(10, 1, 4):
136 	case IP_VERSION(10, 1, 10): /* NAVI10 */
137 	case IP_VERSION(10, 1, 2): /* NAVI12 */
138 	case IP_VERSION(10, 1, 1): /* NAVI14 */
139 	case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
140 	case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
141 	case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
142 	case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
143 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
144 		break;
145 	case IP_VERSION(11, 0, 0):
146 	case IP_VERSION(11, 0, 1):
147 	case IP_VERSION(11, 0, 2):
148 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
149 		break;
150 	default:
151 		dev_warn(kfd_device, "v9 event interrupt handler is set due to "
152 			"mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
153 		kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
154 	}
155 }
156 
157 static void kfd_device_info_init(struct kfd_dev *kfd,
158 				 bool vf, uint32_t gfx_target_version)
159 {
160 	uint32_t gc_version = KFD_GC_VERSION(kfd);
161 	uint32_t asic_type = kfd->adev->asic_type;
162 
163 	kfd->device_info.max_pasid_bits = 16;
164 	kfd->device_info.max_no_of_hqd = 24;
165 	kfd->device_info.num_of_watch_points = 4;
166 	kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
167 	kfd->device_info.gfx_target_version = gfx_target_version;
168 
169 	if (KFD_IS_SOC15(kfd)) {
170 		kfd->device_info.doorbell_size = 8;
171 		kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
172 		kfd->device_info.supports_cwsr = true;
173 
174 		kfd_device_info_set_sdma_info(kfd);
175 
176 		kfd_device_info_set_event_interrupt_class(kfd);
177 
178 		/* Raven */
179 		if (gc_version == IP_VERSION(9, 1, 0) ||
180 		    gc_version == IP_VERSION(9, 2, 2))
181 			kfd->device_info.needs_iommu_device = true;
182 
183 		if (gc_version < IP_VERSION(11, 0, 0)) {
184 			/* Navi2x+, Navi1x+ */
185 			if (gc_version == IP_VERSION(10, 3, 6))
186 				kfd->device_info.no_atomic_fw_version = 14;
187 			else if (gc_version == IP_VERSION(10, 3, 7))
188 				kfd->device_info.no_atomic_fw_version = 3;
189 			else if (gc_version >= IP_VERSION(10, 3, 0))
190 				kfd->device_info.no_atomic_fw_version = 92;
191 			else if (gc_version >= IP_VERSION(10, 1, 1))
192 				kfd->device_info.no_atomic_fw_version = 145;
193 
194 			/* Navi1x+ */
195 			if (gc_version >= IP_VERSION(10, 1, 1))
196 				kfd->device_info.needs_pci_atomics = true;
197 		}
198 	} else {
199 		kfd->device_info.doorbell_size = 4;
200 		kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
201 		kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
202 		kfd->device_info.num_sdma_queues_per_engine = 2;
203 
204 		if (asic_type != CHIP_KAVERI &&
205 		    asic_type != CHIP_HAWAII &&
206 		    asic_type != CHIP_TONGA)
207 			kfd->device_info.supports_cwsr = true;
208 
209 		if (asic_type == CHIP_KAVERI ||
210 		    asic_type == CHIP_CARRIZO)
211 			kfd->device_info.needs_iommu_device = true;
212 
213 		if (asic_type != CHIP_HAWAII && !vf)
214 			kfd->device_info.needs_pci_atomics = true;
215 	}
216 }
217 
218 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
219 {
220 	struct kfd_dev *kfd = NULL;
221 	const struct kfd2kgd_calls *f2g = NULL;
222 	struct pci_dev *pdev = adev->pdev;
223 	uint32_t gfx_target_version = 0;
224 
225 	switch (adev->asic_type) {
226 #ifdef KFD_SUPPORT_IOMMU_V2
227 #ifdef CONFIG_DRM_AMDGPU_CIK
228 	case CHIP_KAVERI:
229 		gfx_target_version = 70000;
230 		if (!vf)
231 			f2g = &gfx_v7_kfd2kgd;
232 		break;
233 #endif
234 	case CHIP_CARRIZO:
235 		gfx_target_version = 80001;
236 		if (!vf)
237 			f2g = &gfx_v8_kfd2kgd;
238 		break;
239 #endif
240 #ifdef CONFIG_DRM_AMDGPU_CIK
241 	case CHIP_HAWAII:
242 		gfx_target_version = 70001;
243 		if (!amdgpu_exp_hw_support)
244 			pr_info(
245 	"KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
246 				);
247 		else if (!vf)
248 			f2g = &gfx_v7_kfd2kgd;
249 		break;
250 #endif
251 	case CHIP_TONGA:
252 		gfx_target_version = 80002;
253 		if (!vf)
254 			f2g = &gfx_v8_kfd2kgd;
255 		break;
256 	case CHIP_FIJI:
257 		gfx_target_version = 80003;
258 		f2g = &gfx_v8_kfd2kgd;
259 		break;
260 	case CHIP_POLARIS10:
261 		gfx_target_version = 80003;
262 		f2g = &gfx_v8_kfd2kgd;
263 		break;
264 	case CHIP_POLARIS11:
265 		gfx_target_version = 80003;
266 		if (!vf)
267 			f2g = &gfx_v8_kfd2kgd;
268 		break;
269 	case CHIP_POLARIS12:
270 		gfx_target_version = 80003;
271 		if (!vf)
272 			f2g = &gfx_v8_kfd2kgd;
273 		break;
274 	case CHIP_VEGAM:
275 		gfx_target_version = 80003;
276 		if (!vf)
277 			f2g = &gfx_v8_kfd2kgd;
278 		break;
279 	default:
280 		switch (adev->ip_versions[GC_HWIP][0]) {
281 		/* Vega 10 */
282 		case IP_VERSION(9, 0, 1):
283 			gfx_target_version = 90000;
284 			f2g = &gfx_v9_kfd2kgd;
285 			break;
286 #ifdef KFD_SUPPORT_IOMMU_V2
287 		/* Raven */
288 		case IP_VERSION(9, 1, 0):
289 		case IP_VERSION(9, 2, 2):
290 			gfx_target_version = 90002;
291 			if (!vf)
292 				f2g = &gfx_v9_kfd2kgd;
293 			break;
294 #endif
295 		/* Vega12 */
296 		case IP_VERSION(9, 2, 1):
297 			gfx_target_version = 90004;
298 			if (!vf)
299 				f2g = &gfx_v9_kfd2kgd;
300 			break;
301 		/* Renoir */
302 		case IP_VERSION(9, 3, 0):
303 			gfx_target_version = 90012;
304 			if (!vf)
305 				f2g = &gfx_v9_kfd2kgd;
306 			break;
307 		/* Vega20 */
308 		case IP_VERSION(9, 4, 0):
309 			gfx_target_version = 90006;
310 			if (!vf)
311 				f2g = &gfx_v9_kfd2kgd;
312 			break;
313 		/* Arcturus */
314 		case IP_VERSION(9, 4, 1):
315 			gfx_target_version = 90008;
316 			f2g = &arcturus_kfd2kgd;
317 			break;
318 		/* Aldebaran */
319 		case IP_VERSION(9, 4, 2):
320 			gfx_target_version = 90010;
321 			f2g = &aldebaran_kfd2kgd;
322 			break;
323 		/* Navi10 */
324 		case IP_VERSION(10, 1, 10):
325 			gfx_target_version = 100100;
326 			if (!vf)
327 				f2g = &gfx_v10_kfd2kgd;
328 			break;
329 		/* Navi12 */
330 		case IP_VERSION(10, 1, 2):
331 			gfx_target_version = 100101;
332 			f2g = &gfx_v10_kfd2kgd;
333 			break;
334 		/* Navi14 */
335 		case IP_VERSION(10, 1, 1):
336 			gfx_target_version = 100102;
337 			if (!vf)
338 				f2g = &gfx_v10_kfd2kgd;
339 			break;
340 		/* Cyan Skillfish */
341 		case IP_VERSION(10, 1, 3):
342 		case IP_VERSION(10, 1, 4):
343 			gfx_target_version = 100103;
344 			if (!vf)
345 				f2g = &gfx_v10_kfd2kgd;
346 			break;
347 		/* Sienna Cichlid */
348 		case IP_VERSION(10, 3, 0):
349 			gfx_target_version = 100300;
350 			f2g = &gfx_v10_3_kfd2kgd;
351 			break;
352 		/* Navy Flounder */
353 		case IP_VERSION(10, 3, 2):
354 			gfx_target_version = 100301;
355 			f2g = &gfx_v10_3_kfd2kgd;
356 			break;
357 		/* Van Gogh */
358 		case IP_VERSION(10, 3, 1):
359 			gfx_target_version = 100303;
360 			if (!vf)
361 				f2g = &gfx_v10_3_kfd2kgd;
362 			break;
363 		/* Dimgrey Cavefish */
364 		case IP_VERSION(10, 3, 4):
365 			gfx_target_version = 100302;
366 			f2g = &gfx_v10_3_kfd2kgd;
367 			break;
368 		/* Beige Goby */
369 		case IP_VERSION(10, 3, 5):
370 			gfx_target_version = 100304;
371 			f2g = &gfx_v10_3_kfd2kgd;
372 			break;
373 		/* Yellow Carp */
374 		case IP_VERSION(10, 3, 3):
375 			gfx_target_version = 100305;
376 			if (!vf)
377 				f2g = &gfx_v10_3_kfd2kgd;
378 			break;
379 		case IP_VERSION(10, 3, 6):
380 			gfx_target_version = 100306;
381 			if (!vf)
382 				f2g = &gfx_v10_3_kfd2kgd;
383 			break;
384 		case IP_VERSION(10, 3, 7):
385 			gfx_target_version = 100307;
386 			if (!vf)
387 				f2g = &gfx_v10_3_kfd2kgd;
388 			break;
389 		case IP_VERSION(11, 0, 0):
390 			gfx_target_version = 110000;
391 			f2g = &gfx_v11_kfd2kgd;
392 			break;
393 		case IP_VERSION(11, 0, 1):
394 			gfx_target_version = 110003;
395 			f2g = &gfx_v11_kfd2kgd;
396 			break;
397 		case IP_VERSION(11, 0, 2):
398 			gfx_target_version = 110002;
399 			f2g = &gfx_v11_kfd2kgd;
400 			break;
401 		default:
402 			break;
403 		}
404 		break;
405 	}
406 
407 	if (!f2g) {
408 		if (adev->ip_versions[GC_HWIP][0])
409 			dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
410 				adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
411 		else
412 			dev_err(kfd_device, "%s %s not supported in kfd\n",
413 				amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
414 		return NULL;
415 	}
416 
417 	kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
418 	if (!kfd)
419 		return NULL;
420 
421 	kfd->adev = adev;
422 	kfd_device_info_init(kfd, vf, gfx_target_version);
423 	kfd->pdev = pdev;
424 	kfd->init_complete = false;
425 	kfd->kfd2kgd = f2g;
426 	atomic_set(&kfd->compute_profile, 0);
427 
428 	mutex_init(&kfd->doorbell_mutex);
429 	memset(&kfd->doorbell_available_index, 0,
430 		sizeof(kfd->doorbell_available_index));
431 
432 	atomic_set(&kfd->sram_ecc_flag, 0);
433 
434 	ida_init(&kfd->doorbell_ida);
435 
436 	return kfd;
437 }
438 
439 static void kfd_cwsr_init(struct kfd_dev *kfd)
440 {
441 	if (cwsr_enable && kfd->device_info.supports_cwsr) {
442 		if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
443 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
444 			kfd->cwsr_isa = cwsr_trap_gfx8_hex;
445 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
446 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
447 			BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
448 			kfd->cwsr_isa = cwsr_trap_arcturus_hex;
449 			kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
450 		} else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
451 			BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
452 			kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
453 			kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
454 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
455 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
456 			kfd->cwsr_isa = cwsr_trap_gfx9_hex;
457 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
458 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
459 			BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
460 			kfd->cwsr_isa = cwsr_trap_nv1x_hex;
461 			kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
462 		} else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
463 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
464 			kfd->cwsr_isa = cwsr_trap_gfx10_hex;
465 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
466 		} else {
467 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
468 			kfd->cwsr_isa = cwsr_trap_gfx11_hex;
469 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
470 		}
471 
472 		kfd->cwsr_enabled = true;
473 	}
474 }
475 
476 static int kfd_gws_init(struct kfd_dev *kfd)
477 {
478 	int ret = 0;
479 
480 	if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
481 		return 0;
482 
483 	if (hws_gws_support || (KFD_IS_SOC15(kfd) &&
484 		((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1)
485 			&& kfd->mec2_fw_version >= 0x81b3) ||
486 		(KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0)
487 			&& kfd->mec2_fw_version >= 0x1b3)  ||
488 		(KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
489 			&& kfd->mec2_fw_version >= 0x30)   ||
490 		(KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
491 			&& kfd->mec2_fw_version >= 0x28))))
492 		ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
493 				kfd->adev->gds.gws_size, &kfd->gws);
494 
495 	return ret;
496 }
497 
498 static void kfd_smi_init(struct kfd_dev *dev)
499 {
500 	INIT_LIST_HEAD(&dev->smi_clients);
501 	spin_lock_init(&dev->smi_lock);
502 }
503 
504 bool kgd2kfd_device_init(struct kfd_dev *kfd,
505 			 struct drm_device *ddev,
506 			 const struct kgd2kfd_shared_resources *gpu_resources)
507 {
508 	unsigned int size, map_process_packet_size;
509 
510 	kfd->ddev = ddev;
511 	kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
512 			KGD_ENGINE_MEC1);
513 	kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
514 			KGD_ENGINE_MEC2);
515 	kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
516 			KGD_ENGINE_SDMA1);
517 	kfd->shared_resources = *gpu_resources;
518 
519 	kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
520 	kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
521 	kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
522 			- kfd->vm_info.first_vmid_kfd + 1;
523 
524 	/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
525 	 * 32 and 64-bit requests are possible and must be
526 	 * supported.
527 	 */
528 	kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
529 	if (!kfd->pci_atomic_requested &&
530 	    kfd->device_info.needs_pci_atomics &&
531 	    (!kfd->device_info.no_atomic_fw_version ||
532 	     kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
533 		dev_info(kfd_device,
534 			 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
535 			 kfd->pdev->vendor, kfd->pdev->device,
536 			 kfd->mec_fw_version,
537 			 kfd->device_info.no_atomic_fw_version);
538 		return false;
539 	}
540 
541 	/* Verify module parameters regarding mapped process number*/
542 	if (hws_max_conc_proc >= 0)
543 		kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
544 	else
545 		kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
546 
547 	/* calculate max size of mqds needed for queues */
548 	size = max_num_of_queues_per_device *
549 			kfd->device_info.mqd_size_aligned;
550 
551 	/*
552 	 * calculate max size of runlist packet.
553 	 * There can be only 2 packets at once
554 	 */
555 	map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
556 				sizeof(struct pm4_mes_map_process_aldebaran) :
557 				sizeof(struct pm4_mes_map_process);
558 	size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
559 		max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
560 		+ sizeof(struct pm4_mes_runlist)) * 2;
561 
562 	/* Add size of HIQ & DIQ */
563 	size += KFD_KERNEL_QUEUE_SIZE * 2;
564 
565 	/* add another 512KB for all other allocations on gart (HPD, fences) */
566 	size += 512 * 1024;
567 
568 	if (amdgpu_amdkfd_alloc_gtt_mem(
569 			kfd->adev, size, &kfd->gtt_mem,
570 			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
571 			false)) {
572 		dev_err(kfd_device, "Could not allocate %d bytes\n", size);
573 		goto alloc_gtt_mem_failure;
574 	}
575 
576 	dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
577 
578 	/* Initialize GTT sa with 512 byte chunk size */
579 	if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
580 		dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
581 		goto kfd_gtt_sa_init_error;
582 	}
583 
584 	if (kfd_doorbell_init(kfd)) {
585 		dev_err(kfd_device,
586 			"Error initializing doorbell aperture\n");
587 		goto kfd_doorbell_error;
588 	}
589 
590 	if (amdgpu_use_xgmi_p2p)
591 		kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
592 
593 	kfd->noretry = kfd->adev->gmc.noretry;
594 
595 	if (kfd_interrupt_init(kfd)) {
596 		dev_err(kfd_device, "Error initializing interrupts\n");
597 		goto kfd_interrupt_error;
598 	}
599 
600 	kfd->dqm = device_queue_manager_init(kfd);
601 	if (!kfd->dqm) {
602 		dev_err(kfd_device, "Error initializing queue manager\n");
603 		goto device_queue_manager_error;
604 	}
605 
606 	/* If supported on this device, allocate global GWS that is shared
607 	 * by all KFD processes
608 	 */
609 	if (kfd_gws_init(kfd)) {
610 		dev_err(kfd_device, "Could not allocate %d gws\n",
611 			kfd->adev->gds.gws_size);
612 		goto gws_error;
613 	}
614 
615 	/* If CRAT is broken, won't set iommu enabled */
616 	kfd_double_confirm_iommu_support(kfd);
617 
618 	if (kfd_iommu_device_init(kfd)) {
619 		kfd->use_iommu_v2 = false;
620 		dev_err(kfd_device, "Error initializing iommuv2\n");
621 		goto device_iommu_error;
622 	}
623 
624 	kfd_cwsr_init(kfd);
625 
626 	svm_migrate_init(kfd->adev);
627 
628 	if (kgd2kfd_resume_iommu(kfd))
629 		goto device_iommu_error;
630 
631 	if (kfd_resume(kfd))
632 		goto kfd_resume_error;
633 
634 	amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info);
635 
636 	if (kfd_topology_add_device(kfd)) {
637 		dev_err(kfd_device, "Error adding device to topology\n");
638 		goto kfd_topology_add_device_error;
639 	}
640 
641 	kfd_smi_init(kfd);
642 
643 	kfd->init_complete = true;
644 	dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
645 		 kfd->pdev->device);
646 
647 	pr_debug("Starting kfd with the following scheduling policy %d\n",
648 		kfd->dqm->sched_policy);
649 
650 	goto out;
651 
652 kfd_topology_add_device_error:
653 kfd_resume_error:
654 device_iommu_error:
655 gws_error:
656 	device_queue_manager_uninit(kfd->dqm);
657 device_queue_manager_error:
658 	kfd_interrupt_exit(kfd);
659 kfd_interrupt_error:
660 	kfd_doorbell_fini(kfd);
661 kfd_doorbell_error:
662 	kfd_gtt_sa_fini(kfd);
663 kfd_gtt_sa_init_error:
664 	amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
665 alloc_gtt_mem_failure:
666 	if (kfd->gws)
667 		amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
668 	dev_err(kfd_device,
669 		"device %x:%x NOT added due to errors\n",
670 		kfd->pdev->vendor, kfd->pdev->device);
671 out:
672 	return kfd->init_complete;
673 }
674 
675 void kgd2kfd_device_exit(struct kfd_dev *kfd)
676 {
677 	if (kfd->init_complete) {
678 		device_queue_manager_uninit(kfd->dqm);
679 		kfd_interrupt_exit(kfd);
680 		kfd_topology_remove_device(kfd);
681 		kfd_doorbell_fini(kfd);
682 		ida_destroy(&kfd->doorbell_ida);
683 		kfd_gtt_sa_fini(kfd);
684 		amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
685 		if (kfd->gws)
686 			amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
687 	}
688 
689 	kfree(kfd);
690 }
691 
692 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
693 {
694 	if (!kfd->init_complete)
695 		return 0;
696 
697 	kfd_smi_event_update_gpu_reset(kfd, false);
698 
699 	kfd->dqm->ops.pre_reset(kfd->dqm);
700 
701 	kgd2kfd_suspend(kfd, false);
702 
703 	kfd_signal_reset_event(kfd);
704 	return 0;
705 }
706 
707 /*
708  * Fix me. KFD won't be able to resume existing process for now.
709  * We will keep all existing process in a evicted state and
710  * wait the process to be terminated.
711  */
712 
713 int kgd2kfd_post_reset(struct kfd_dev *kfd)
714 {
715 	int ret;
716 
717 	if (!kfd->init_complete)
718 		return 0;
719 
720 	ret = kfd_resume(kfd);
721 	if (ret)
722 		return ret;
723 	atomic_dec(&kfd_locked);
724 
725 	atomic_set(&kfd->sram_ecc_flag, 0);
726 
727 	kfd_smi_event_update_gpu_reset(kfd, true);
728 
729 	return 0;
730 }
731 
732 bool kfd_is_locked(void)
733 {
734 	return  (atomic_read(&kfd_locked) > 0);
735 }
736 
737 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
738 {
739 	if (!kfd->init_complete)
740 		return;
741 
742 	/* for runtime suspend, skip locking kfd */
743 	if (!run_pm) {
744 		/* For first KFD device suspend all the KFD processes */
745 		if (atomic_inc_return(&kfd_locked) == 1)
746 			kfd_suspend_all_processes();
747 	}
748 
749 	kfd->dqm->ops.stop(kfd->dqm);
750 	kfd_iommu_suspend(kfd);
751 }
752 
753 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
754 {
755 	int ret, count;
756 
757 	if (!kfd->init_complete)
758 		return 0;
759 
760 	ret = kfd_resume(kfd);
761 	if (ret)
762 		return ret;
763 
764 	/* for runtime resume, skip unlocking kfd */
765 	if (!run_pm) {
766 		count = atomic_dec_return(&kfd_locked);
767 		WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
768 		if (count == 0)
769 			ret = kfd_resume_all_processes();
770 	}
771 
772 	return ret;
773 }
774 
775 int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
776 {
777 	int err = 0;
778 
779 	err = kfd_iommu_resume(kfd);
780 	if (err)
781 		dev_err(kfd_device,
782 			"Failed to resume IOMMU for device %x:%x\n",
783 			kfd->pdev->vendor, kfd->pdev->device);
784 	return err;
785 }
786 
787 static int kfd_resume(struct kfd_dev *kfd)
788 {
789 	int err = 0;
790 
791 	err = kfd->dqm->ops.start(kfd->dqm);
792 	if (err)
793 		dev_err(kfd_device,
794 			"Error starting queue manager for device %x:%x\n",
795 			kfd->pdev->vendor, kfd->pdev->device);
796 
797 	return err;
798 }
799 
800 static inline void kfd_queue_work(struct workqueue_struct *wq,
801 				  struct work_struct *work)
802 {
803 	int cpu, new_cpu;
804 
805 	cpu = new_cpu = smp_processor_id();
806 	do {
807 		new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
808 		if (cpu_to_node(new_cpu) == numa_node_id())
809 			break;
810 	} while (cpu != new_cpu);
811 
812 	queue_work_on(new_cpu, wq, work);
813 }
814 
815 /* This is called directly from KGD at ISR. */
816 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
817 {
818 	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
819 	bool is_patched = false;
820 	unsigned long flags;
821 
822 	if (!kfd->init_complete)
823 		return;
824 
825 	if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
826 		dev_err_once(kfd_device, "Ring entry too small\n");
827 		return;
828 	}
829 
830 	spin_lock_irqsave(&kfd->interrupt_lock, flags);
831 
832 	if (kfd->interrupts_active
833 	    && interrupt_is_wanted(kfd, ih_ring_entry,
834 				   patched_ihre, &is_patched)
835 	    && enqueue_ih_ring_entry(kfd,
836 				     is_patched ? patched_ihre : ih_ring_entry))
837 		kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
838 
839 	spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
840 }
841 
842 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
843 {
844 	struct kfd_process *p;
845 	int r;
846 
847 	/* Because we are called from arbitrary context (workqueue) as opposed
848 	 * to process context, kfd_process could attempt to exit while we are
849 	 * running so the lookup function increments the process ref count.
850 	 */
851 	p = kfd_lookup_process_by_mm(mm);
852 	if (!p)
853 		return -ESRCH;
854 
855 	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
856 	r = kfd_process_evict_queues(p);
857 
858 	kfd_unref_process(p);
859 	return r;
860 }
861 
862 int kgd2kfd_resume_mm(struct mm_struct *mm)
863 {
864 	struct kfd_process *p;
865 	int r;
866 
867 	/* Because we are called from arbitrary context (workqueue) as opposed
868 	 * to process context, kfd_process could attempt to exit while we are
869 	 * running so the lookup function increments the process ref count.
870 	 */
871 	p = kfd_lookup_process_by_mm(mm);
872 	if (!p)
873 		return -ESRCH;
874 
875 	r = kfd_process_restore_queues(p);
876 
877 	kfd_unref_process(p);
878 	return r;
879 }
880 
881 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
882  *   prepare for safe eviction of KFD BOs that belong to the specified
883  *   process.
884  *
885  * @mm: mm_struct that identifies the specified KFD process
886  * @fence: eviction fence attached to KFD process BOs
887  *
888  */
889 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
890 					       struct dma_fence *fence)
891 {
892 	struct kfd_process *p;
893 	unsigned long active_time;
894 	unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
895 
896 	if (!fence)
897 		return -EINVAL;
898 
899 	if (dma_fence_is_signaled(fence))
900 		return 0;
901 
902 	p = kfd_lookup_process_by_mm(mm);
903 	if (!p)
904 		return -ENODEV;
905 
906 	if (fence->seqno == p->last_eviction_seqno)
907 		goto out;
908 
909 	p->last_eviction_seqno = fence->seqno;
910 
911 	/* Avoid KFD process starvation. Wait for at least
912 	 * PROCESS_ACTIVE_TIME_MS before evicting the process again
913 	 */
914 	active_time = get_jiffies_64() - p->last_restore_timestamp;
915 	if (delay_jiffies > active_time)
916 		delay_jiffies -= active_time;
917 	else
918 		delay_jiffies = 0;
919 
920 	/* During process initialization eviction_work.dwork is initialized
921 	 * to kfd_evict_bo_worker
922 	 */
923 	WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
924 	     p->lead_thread->pid, delay_jiffies);
925 	schedule_delayed_work(&p->eviction_work, delay_jiffies);
926 out:
927 	kfd_unref_process(p);
928 	return 0;
929 }
930 
931 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
932 				unsigned int chunk_size)
933 {
934 	if (WARN_ON(buf_size < chunk_size))
935 		return -EINVAL;
936 	if (WARN_ON(buf_size == 0))
937 		return -EINVAL;
938 	if (WARN_ON(chunk_size == 0))
939 		return -EINVAL;
940 
941 	kfd->gtt_sa_chunk_size = chunk_size;
942 	kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
943 
944 	kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
945 					   GFP_KERNEL);
946 	if (!kfd->gtt_sa_bitmap)
947 		return -ENOMEM;
948 
949 	pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
950 			kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
951 
952 	mutex_init(&kfd->gtt_sa_lock);
953 
954 	return 0;
955 }
956 
957 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
958 {
959 	mutex_destroy(&kfd->gtt_sa_lock);
960 	bitmap_free(kfd->gtt_sa_bitmap);
961 }
962 
963 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
964 						unsigned int bit_num,
965 						unsigned int chunk_size)
966 {
967 	return start_addr + bit_num * chunk_size;
968 }
969 
970 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
971 						unsigned int bit_num,
972 						unsigned int chunk_size)
973 {
974 	return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
975 }
976 
977 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
978 			struct kfd_mem_obj **mem_obj)
979 {
980 	unsigned int found, start_search, cur_size;
981 
982 	if (size == 0)
983 		return -EINVAL;
984 
985 	if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
986 		return -ENOMEM;
987 
988 	*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
989 	if (!(*mem_obj))
990 		return -ENOMEM;
991 
992 	pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
993 
994 	start_search = 0;
995 
996 	mutex_lock(&kfd->gtt_sa_lock);
997 
998 kfd_gtt_restart_search:
999 	/* Find the first chunk that is free */
1000 	found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1001 					kfd->gtt_sa_num_of_chunks,
1002 					start_search);
1003 
1004 	pr_debug("Found = %d\n", found);
1005 
1006 	/* If there wasn't any free chunk, bail out */
1007 	if (found == kfd->gtt_sa_num_of_chunks)
1008 		goto kfd_gtt_no_free_chunk;
1009 
1010 	/* Update fields of mem_obj */
1011 	(*mem_obj)->range_start = found;
1012 	(*mem_obj)->range_end = found;
1013 	(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1014 					kfd->gtt_start_gpu_addr,
1015 					found,
1016 					kfd->gtt_sa_chunk_size);
1017 	(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1018 					kfd->gtt_start_cpu_ptr,
1019 					found,
1020 					kfd->gtt_sa_chunk_size);
1021 
1022 	pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1023 			(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1024 
1025 	/* If we need only one chunk, mark it as allocated and get out */
1026 	if (size <= kfd->gtt_sa_chunk_size) {
1027 		pr_debug("Single bit\n");
1028 		__set_bit(found, kfd->gtt_sa_bitmap);
1029 		goto kfd_gtt_out;
1030 	}
1031 
1032 	/* Otherwise, try to see if we have enough contiguous chunks */
1033 	cur_size = size - kfd->gtt_sa_chunk_size;
1034 	do {
1035 		(*mem_obj)->range_end =
1036 			find_next_zero_bit(kfd->gtt_sa_bitmap,
1037 					kfd->gtt_sa_num_of_chunks, ++found);
1038 		/*
1039 		 * If next free chunk is not contiguous than we need to
1040 		 * restart our search from the last free chunk we found (which
1041 		 * wasn't contiguous to the previous ones
1042 		 */
1043 		if ((*mem_obj)->range_end != found) {
1044 			start_search = found;
1045 			goto kfd_gtt_restart_search;
1046 		}
1047 
1048 		/*
1049 		 * If we reached end of buffer, bail out with error
1050 		 */
1051 		if (found == kfd->gtt_sa_num_of_chunks)
1052 			goto kfd_gtt_no_free_chunk;
1053 
1054 		/* Check if we don't need another chunk */
1055 		if (cur_size <= kfd->gtt_sa_chunk_size)
1056 			cur_size = 0;
1057 		else
1058 			cur_size -= kfd->gtt_sa_chunk_size;
1059 
1060 	} while (cur_size > 0);
1061 
1062 	pr_debug("range_start = %d, range_end = %d\n",
1063 		(*mem_obj)->range_start, (*mem_obj)->range_end);
1064 
1065 	/* Mark the chunks as allocated */
1066 	bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
1067 		   (*mem_obj)->range_end - (*mem_obj)->range_start + 1);
1068 
1069 kfd_gtt_out:
1070 	mutex_unlock(&kfd->gtt_sa_lock);
1071 	return 0;
1072 
1073 kfd_gtt_no_free_chunk:
1074 	pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1075 	mutex_unlock(&kfd->gtt_sa_lock);
1076 	kfree(*mem_obj);
1077 	return -ENOMEM;
1078 }
1079 
1080 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
1081 {
1082 	/* Act like kfree when trying to free a NULL object */
1083 	if (!mem_obj)
1084 		return 0;
1085 
1086 	pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1087 			mem_obj, mem_obj->range_start, mem_obj->range_end);
1088 
1089 	mutex_lock(&kfd->gtt_sa_lock);
1090 
1091 	/* Mark the chunks as free */
1092 	bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
1093 		     mem_obj->range_end - mem_obj->range_start + 1);
1094 
1095 	mutex_unlock(&kfd->gtt_sa_lock);
1096 
1097 	kfree(mem_obj);
1098 	return 0;
1099 }
1100 
1101 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1102 {
1103 	if (kfd)
1104 		atomic_inc(&kfd->sram_ecc_flag);
1105 }
1106 
1107 void kfd_inc_compute_active(struct kfd_dev *kfd)
1108 {
1109 	if (atomic_inc_return(&kfd->compute_profile) == 1)
1110 		amdgpu_amdkfd_set_compute_idle(kfd->adev, false);
1111 }
1112 
1113 void kfd_dec_compute_active(struct kfd_dev *kfd)
1114 {
1115 	int count = atomic_dec_return(&kfd->compute_profile);
1116 
1117 	if (count == 0)
1118 		amdgpu_amdkfd_set_compute_idle(kfd->adev, true);
1119 	WARN_ONCE(count < 0, "Compute profile ref. count error");
1120 }
1121 
1122 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1123 {
1124 	if (kfd && kfd->init_complete)
1125 		kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
1126 }
1127 
1128 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1129  * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1130  * When the device has more than two engines, we reserve two for PCIe to enable
1131  * full-duplex and the rest are used as XGMI.
1132  */
1133 unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev)
1134 {
1135 	/* If XGMI is not supported, all SDMA engines are PCIe */
1136 	if (!kdev->adev->gmc.xgmi.supported)
1137 		return kdev->adev->sdma.num_instances;
1138 
1139 	return min(kdev->adev->sdma.num_instances, 2);
1140 }
1141 
1142 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
1143 {
1144 	/* After reserved for PCIe, the rest of engines are XGMI */
1145 	return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev);
1146 }
1147 
1148 #if defined(CONFIG_DEBUG_FS)
1149 
1150 /* This function will send a package to HIQ to hang the HWS
1151  * which will trigger a GPU reset and bring the HWS back to normal state
1152  */
1153 int kfd_debugfs_hang_hws(struct kfd_dev *dev)
1154 {
1155 	if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1156 		pr_err("HWS is not enabled");
1157 		return -EINVAL;
1158 	}
1159 
1160 	return dqm_debugfs_hang_hws(dev->dqm);
1161 }
1162 
1163 #endif
1164