1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
29 #include <linux/dma-buf.h>
30 
31 const struct kgd2kfd_calls *kgd2kfd;
32 
33 static const unsigned int compute_vmid_bitmap = 0xFF00;
34 
35 /* Total memory size in system memory and all GPU VRAM. Used to
36  * estimate worst case amount of memory to reserve for page tables
37  */
38 uint64_t amdgpu_amdkfd_total_mem_size;
39 
40 int amdgpu_amdkfd_init(void)
41 {
42 	struct sysinfo si;
43 	int ret;
44 
45 	si_meminfo(&si);
46 	amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
47 	amdgpu_amdkfd_total_mem_size *= si.mem_unit;
48 
49 #ifdef CONFIG_HSA_AMD
50 	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
51 	if (ret)
52 		kgd2kfd = NULL;
53 	amdgpu_amdkfd_gpuvm_init_mem_limits();
54 #else
55 	kgd2kfd = NULL;
56 	ret = -ENOENT;
57 #endif
58 
59 	return ret;
60 }
61 
62 void amdgpu_amdkfd_fini(void)
63 {
64 	if (kgd2kfd)
65 		kgd2kfd->exit();
66 }
67 
68 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
69 {
70 	const struct kfd2kgd_calls *kfd2kgd;
71 
72 	if (!kgd2kfd)
73 		return;
74 
75 	switch (adev->asic_type) {
76 #ifdef CONFIG_DRM_AMDGPU_CIK
77 	case CHIP_KAVERI:
78 	case CHIP_HAWAII:
79 		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
80 		break;
81 #endif
82 	case CHIP_CARRIZO:
83 	case CHIP_TONGA:
84 	case CHIP_FIJI:
85 	case CHIP_POLARIS10:
86 	case CHIP_POLARIS11:
87 	case CHIP_POLARIS12:
88 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
89 		break;
90 	case CHIP_VEGA10:
91 	case CHIP_VEGA12:
92 	case CHIP_VEGA20:
93 	case CHIP_RAVEN:
94 		kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
95 		break;
96 	default:
97 		dev_info(adev->dev, "kfd not supported on this ASIC\n");
98 		return;
99 	}
100 
101 	adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev,
102 				       adev->pdev, kfd2kgd);
103 
104 	if (adev->kfd.dev)
105 		amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
106 }
107 
108 /**
109  * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
110  *                                setup amdkfd
111  *
112  * @adev: amdgpu_device pointer
113  * @aperture_base: output returning doorbell aperture base physical address
114  * @aperture_size: output returning doorbell aperture size in bytes
115  * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
116  *
117  * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
118  * takes doorbells required for its own rings and reports the setup to amdkfd.
119  * amdgpu reserved doorbells are at the start of the doorbell aperture.
120  */
121 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
122 					 phys_addr_t *aperture_base,
123 					 size_t *aperture_size,
124 					 size_t *start_offset)
125 {
126 	/*
127 	 * The first num_doorbells are used by amdgpu.
128 	 * amdkfd takes whatever's left in the aperture.
129 	 */
130 	if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
131 		*aperture_base = adev->doorbell.base;
132 		*aperture_size = adev->doorbell.size;
133 		*start_offset = adev->doorbell.num_doorbells * sizeof(u32);
134 	} else {
135 		*aperture_base = 0;
136 		*aperture_size = 0;
137 		*start_offset = 0;
138 	}
139 }
140 
141 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
142 {
143 	int i, n;
144 	int last_valid_bit;
145 
146 	if (adev->kfd.dev) {
147 		struct kgd2kfd_shared_resources gpu_resources = {
148 			.compute_vmid_bitmap = compute_vmid_bitmap,
149 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
150 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
151 			.gpuvm_size = min(adev->vm_manager.max_pfn
152 					  << AMDGPU_GPU_PAGE_SHIFT,
153 					  AMDGPU_GMC_HOLE_START),
154 			.drm_render_minor = adev->ddev->render->index
155 		};
156 
157 		/* this is going to have a few of the MSBs set that we need to
158 		 * clear */
159 		bitmap_complement(gpu_resources.queue_bitmap,
160 				  adev->gfx.mec.queue_bitmap,
161 				  KGD_MAX_QUEUES);
162 
163 		/* remove the KIQ bit as well */
164 		if (adev->gfx.kiq.ring.sched.ready)
165 			clear_bit(amdgpu_gfx_queue_to_bit(adev,
166 							  adev->gfx.kiq.ring.me - 1,
167 							  adev->gfx.kiq.ring.pipe,
168 							  adev->gfx.kiq.ring.queue),
169 				  gpu_resources.queue_bitmap);
170 
171 		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
172 		 * nbits is not compile time constant */
173 		last_valid_bit = 1 /* only first MEC can have compute queues */
174 				* adev->gfx.mec.num_pipe_per_mec
175 				* adev->gfx.mec.num_queue_per_pipe;
176 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
177 			clear_bit(i, gpu_resources.queue_bitmap);
178 
179 		amdgpu_doorbell_get_kfd_info(adev,
180 				&gpu_resources.doorbell_physical_address,
181 				&gpu_resources.doorbell_aperture_size,
182 				&gpu_resources.doorbell_start_offset);
183 
184 		if (adev->asic_type < CHIP_VEGA10) {
185 			kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
186 			return;
187 		}
188 
189 		n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8;
190 
191 		for (i = 0; i < n; i += 2) {
192 			/* On SOC15 the BIF is involved in routing
193 			 * doorbells using the low 12 bits of the
194 			 * address. Communicate the assignments to
195 			 * KFD. KFD uses two doorbell pages per
196 			 * process in case of 64-bit doorbells so we
197 			 * can use each doorbell assignment twice.
198 			 */
199 			gpu_resources.sdma_doorbell[0][i] =
200 				adev->doorbell_index.sdma_engine0 + (i >> 1);
201 			gpu_resources.sdma_doorbell[0][i+1] =
202 				adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1);
203 			gpu_resources.sdma_doorbell[1][i] =
204 				adev->doorbell_index.sdma_engine1 + (i >> 1);
205 			gpu_resources.sdma_doorbell[1][i+1] =
206 				adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1);
207 		}
208 		/* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
209 		 * SDMA, IH and VCN. So don't use them for the CP.
210 		 */
211 		gpu_resources.reserved_doorbell_mask = 0x1e0;
212 		gpu_resources.reserved_doorbell_val  = 0x0e0;
213 
214 		kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
215 	}
216 }
217 
218 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
219 {
220 	if (adev->kfd.dev) {
221 		kgd2kfd->device_exit(adev->kfd.dev);
222 		adev->kfd.dev = NULL;
223 	}
224 }
225 
226 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
227 		const void *ih_ring_entry)
228 {
229 	if (adev->kfd.dev)
230 		kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry);
231 }
232 
233 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
234 {
235 	if (adev->kfd.dev)
236 		kgd2kfd->suspend(adev->kfd.dev);
237 }
238 
239 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
240 {
241 	int r = 0;
242 
243 	if (adev->kfd.dev)
244 		r = kgd2kfd->resume(adev->kfd.dev);
245 
246 	return r;
247 }
248 
249 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
250 {
251 	int r = 0;
252 
253 	if (adev->kfd.dev)
254 		r = kgd2kfd->pre_reset(adev->kfd.dev);
255 
256 	return r;
257 }
258 
259 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
260 {
261 	int r = 0;
262 
263 	if (adev->kfd.dev)
264 		r = kgd2kfd->post_reset(adev->kfd.dev);
265 
266 	return r;
267 }
268 
269 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
270 {
271 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
272 
273 	if (amdgpu_device_should_recover_gpu(adev))
274 		amdgpu_device_gpu_recover(adev, NULL);
275 }
276 
277 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
278 				void **mem_obj, uint64_t *gpu_addr,
279 				void **cpu_ptr, bool mqd_gfx9)
280 {
281 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
282 	struct amdgpu_bo *bo = NULL;
283 	struct amdgpu_bo_param bp;
284 	int r;
285 	void *cpu_ptr_tmp = NULL;
286 
287 	memset(&bp, 0, sizeof(bp));
288 	bp.size = size;
289 	bp.byte_align = PAGE_SIZE;
290 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
291 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
292 	bp.type = ttm_bo_type_kernel;
293 	bp.resv = NULL;
294 
295 	if (mqd_gfx9)
296 		bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
297 
298 	r = amdgpu_bo_create(adev, &bp, &bo);
299 	if (r) {
300 		dev_err(adev->dev,
301 			"failed to allocate BO for amdkfd (%d)\n", r);
302 		return r;
303 	}
304 
305 	/* map the buffer */
306 	r = amdgpu_bo_reserve(bo, true);
307 	if (r) {
308 		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
309 		goto allocate_mem_reserve_bo_failed;
310 	}
311 
312 	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
313 	if (r) {
314 		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
315 		goto allocate_mem_pin_bo_failed;
316 	}
317 
318 	r = amdgpu_ttm_alloc_gart(&bo->tbo);
319 	if (r) {
320 		dev_err(adev->dev, "%p bind failed\n", bo);
321 		goto allocate_mem_kmap_bo_failed;
322 	}
323 
324 	r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
325 	if (r) {
326 		dev_err(adev->dev,
327 			"(%d) failed to map bo to kernel for amdkfd\n", r);
328 		goto allocate_mem_kmap_bo_failed;
329 	}
330 
331 	*mem_obj = bo;
332 	*gpu_addr = amdgpu_bo_gpu_offset(bo);
333 	*cpu_ptr = cpu_ptr_tmp;
334 
335 	amdgpu_bo_unreserve(bo);
336 
337 	return 0;
338 
339 allocate_mem_kmap_bo_failed:
340 	amdgpu_bo_unpin(bo);
341 allocate_mem_pin_bo_failed:
342 	amdgpu_bo_unreserve(bo);
343 allocate_mem_reserve_bo_failed:
344 	amdgpu_bo_unref(&bo);
345 
346 	return r;
347 }
348 
349 void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
350 {
351 	struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
352 
353 	amdgpu_bo_reserve(bo, true);
354 	amdgpu_bo_kunmap(bo);
355 	amdgpu_bo_unpin(bo);
356 	amdgpu_bo_unreserve(bo);
357 	amdgpu_bo_unref(&(bo));
358 }
359 
360 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
361 				      struct kfd_local_mem_info *mem_info)
362 {
363 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
364 	uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
365 					     ~((1ULL << 32) - 1);
366 	resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
367 
368 	memset(mem_info, 0, sizeof(*mem_info));
369 	if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
370 		mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
371 		mem_info->local_mem_size_private = adev->gmc.real_vram_size -
372 				adev->gmc.visible_vram_size;
373 	} else {
374 		mem_info->local_mem_size_public = 0;
375 		mem_info->local_mem_size_private = adev->gmc.real_vram_size;
376 	}
377 	mem_info->vram_width = adev->gmc.vram_width;
378 
379 	pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
380 			&adev->gmc.aper_base, &aper_limit,
381 			mem_info->local_mem_size_public,
382 			mem_info->local_mem_size_private);
383 
384 	if (amdgpu_sriov_vf(adev))
385 		mem_info->mem_clk_max = adev->clock.default_mclk / 100;
386 	else if (adev->powerplay.pp_funcs)
387 		mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
388 	else
389 		mem_info->mem_clk_max = 100;
390 }
391 
392 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
393 {
394 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
395 
396 	if (adev->gfx.funcs->get_gpu_clock_counter)
397 		return adev->gfx.funcs->get_gpu_clock_counter(adev);
398 	return 0;
399 }
400 
401 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
402 {
403 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
404 
405 	/* the sclk is in quantas of 10kHz */
406 	if (amdgpu_sriov_vf(adev))
407 		return adev->clock.default_sclk / 100;
408 	else if (adev->powerplay.pp_funcs)
409 		return amdgpu_dpm_get_sclk(adev, false) / 100;
410 	else
411 		return 100;
412 }
413 
414 void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
415 {
416 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
417 	struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
418 
419 	memset(cu_info, 0, sizeof(*cu_info));
420 	if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
421 		return;
422 
423 	cu_info->cu_active_number = acu_info.number;
424 	cu_info->cu_ao_mask = acu_info.ao_cu_mask;
425 	memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
426 	       sizeof(acu_info.bitmap));
427 	cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
428 	cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
429 	cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
430 	cu_info->simd_per_cu = acu_info.simd_per_cu;
431 	cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
432 	cu_info->wave_front_size = acu_info.wave_front_size;
433 	cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
434 	cu_info->lds_size = acu_info.lds_size;
435 }
436 
437 int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
438 				  struct kgd_dev **dma_buf_kgd,
439 				  uint64_t *bo_size, void *metadata_buffer,
440 				  size_t buffer_size, uint32_t *metadata_size,
441 				  uint32_t *flags)
442 {
443 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
444 	struct dma_buf *dma_buf;
445 	struct drm_gem_object *obj;
446 	struct amdgpu_bo *bo;
447 	uint64_t metadata_flags;
448 	int r = -EINVAL;
449 
450 	dma_buf = dma_buf_get(dma_buf_fd);
451 	if (IS_ERR(dma_buf))
452 		return PTR_ERR(dma_buf);
453 
454 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
455 		/* Can't handle non-graphics buffers */
456 		goto out_put;
457 
458 	obj = dma_buf->priv;
459 	if (obj->dev->driver != adev->ddev->driver)
460 		/* Can't handle buffers from different drivers */
461 		goto out_put;
462 
463 	adev = obj->dev->dev_private;
464 	bo = gem_to_amdgpu_bo(obj);
465 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
466 				    AMDGPU_GEM_DOMAIN_GTT)))
467 		/* Only VRAM and GTT BOs are supported */
468 		goto out_put;
469 
470 	r = 0;
471 	if (dma_buf_kgd)
472 		*dma_buf_kgd = (struct kgd_dev *)adev;
473 	if (bo_size)
474 		*bo_size = amdgpu_bo_size(bo);
475 	if (metadata_size)
476 		*metadata_size = bo->metadata_size;
477 	if (metadata_buffer)
478 		r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
479 					   metadata_size, &metadata_flags);
480 	if (flags) {
481 		*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
482 			ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
483 
484 		if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
485 			*flags |= ALLOC_MEM_FLAGS_PUBLIC;
486 	}
487 
488 out_put:
489 	dma_buf_put(dma_buf);
490 	return r;
491 }
492 
493 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
494 {
495 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
496 
497 	return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
498 }
499 
500 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
501 {
502 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
503 
504 	return adev->gmc.xgmi.hive_id;
505 }
506 
507 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
508 				uint32_t vmid, uint64_t gpu_addr,
509 				uint32_t *ib_cmd, uint32_t ib_len)
510 {
511 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
512 	struct amdgpu_job *job;
513 	struct amdgpu_ib *ib;
514 	struct amdgpu_ring *ring;
515 	struct dma_fence *f = NULL;
516 	int ret;
517 
518 	switch (engine) {
519 	case KGD_ENGINE_MEC1:
520 		ring = &adev->gfx.compute_ring[0];
521 		break;
522 	case KGD_ENGINE_SDMA1:
523 		ring = &adev->sdma.instance[0].ring;
524 		break;
525 	case KGD_ENGINE_SDMA2:
526 		ring = &adev->sdma.instance[1].ring;
527 		break;
528 	default:
529 		pr_err("Invalid engine in IB submission: %d\n", engine);
530 		ret = -EINVAL;
531 		goto err;
532 	}
533 
534 	ret = amdgpu_job_alloc(adev, 1, &job, NULL);
535 	if (ret)
536 		goto err;
537 
538 	ib = &job->ibs[0];
539 	memset(ib, 0, sizeof(struct amdgpu_ib));
540 
541 	ib->gpu_addr = gpu_addr;
542 	ib->ptr = ib_cmd;
543 	ib->length_dw = ib_len;
544 	/* This works for NO_HWS. TODO: need to handle without knowing VMID */
545 	job->vmid = vmid;
546 
547 	ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
548 	if (ret) {
549 		DRM_ERROR("amdgpu: failed to schedule IB.\n");
550 		goto err_ib_sched;
551 	}
552 
553 	ret = dma_fence_wait(f, false);
554 
555 err_ib_sched:
556 	dma_fence_put(f);
557 	amdgpu_job_free(job);
558 err:
559 	return ret;
560 }
561 
562 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
563 {
564 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
565 
566 	if (adev->powerplay.pp_funcs &&
567 	    adev->powerplay.pp_funcs->switch_power_profile)
568 		amdgpu_dpm_switch_power_profile(adev,
569 						PP_SMC_POWER_PROFILE_COMPUTE,
570 						!idle);
571 }
572 
573 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
574 {
575 	if (adev->kfd.dev) {
576 		if ((1 << vmid) & compute_vmid_bitmap)
577 			return true;
578 	}
579 
580 	return false;
581 }
582 
583 #ifndef CONFIG_HSA_AMD
584 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
585 {
586 	return false;
587 }
588 
589 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
590 {
591 }
592 
593 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
594 					struct amdgpu_vm *vm)
595 {
596 }
597 
598 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
599 {
600 	return NULL;
601 }
602 
603 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
604 {
605 	return 0;
606 }
607 
608 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
609 {
610 	return NULL;
611 }
612 
613 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
614 {
615 	return NULL;
616 }
617 
618 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
619 {
620 	return NULL;
621 }
622 #endif
623