1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/device.h>
25 #include <linux/export.h>
26 #include <linux/err.h>
27 #include <linux/fs.h>
28 #include <linux/file.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <linux/compat.h>
33 #include <uapi/linux/kfd_ioctl.h>
34 #include <linux/time.h>
35 #include <linux/mm.h>
36 #include <linux/mman.h>
37 #include <linux/ptrace.h>
38 #include <linux/dma-buf.h>
39 #include <linux/fdtable.h>
40 #include <linux/processor.h>
41 #include "kfd_priv.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_svm.h"
44 #include "amdgpu_amdkfd.h"
45 #include "kfd_smi_events.h"
46 #include "amdgpu_dma_buf.h"
47 
48 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
49 static int kfd_open(struct inode *, struct file *);
50 static int kfd_release(struct inode *, struct file *);
51 static int kfd_mmap(struct file *, struct vm_area_struct *);
52 
53 static const char kfd_dev_name[] = "kfd";
54 
55 static const struct file_operations kfd_fops = {
56 	.owner = THIS_MODULE,
57 	.unlocked_ioctl = kfd_ioctl,
58 	.compat_ioctl = compat_ptr_ioctl,
59 	.open = kfd_open,
60 	.release = kfd_release,
61 	.mmap = kfd_mmap,
62 };
63 
64 static int kfd_char_dev_major = -1;
65 static struct class *kfd_class;
66 struct device *kfd_device;
67 
68 static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id)
69 {
70 	struct kfd_process_device *pdd;
71 
72 	mutex_lock(&p->mutex);
73 	pdd = kfd_process_device_data_by_id(p, gpu_id);
74 
75 	if (pdd)
76 		return pdd;
77 
78 	mutex_unlock(&p->mutex);
79 	return NULL;
80 }
81 
82 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd)
83 {
84 	mutex_unlock(&pdd->process->mutex);
85 }
86 
87 int kfd_chardev_init(void)
88 {
89 	int err = 0;
90 
91 	kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
92 	err = kfd_char_dev_major;
93 	if (err < 0)
94 		goto err_register_chrdev;
95 
96 	kfd_class = class_create(kfd_dev_name);
97 	err = PTR_ERR(kfd_class);
98 	if (IS_ERR(kfd_class))
99 		goto err_class_create;
100 
101 	kfd_device = device_create(kfd_class, NULL,
102 					MKDEV(kfd_char_dev_major, 0),
103 					NULL, kfd_dev_name);
104 	err = PTR_ERR(kfd_device);
105 	if (IS_ERR(kfd_device))
106 		goto err_device_create;
107 
108 	return 0;
109 
110 err_device_create:
111 	class_destroy(kfd_class);
112 err_class_create:
113 	unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
114 err_register_chrdev:
115 	return err;
116 }
117 
118 void kfd_chardev_exit(void)
119 {
120 	device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
121 	class_destroy(kfd_class);
122 	unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
123 	kfd_device = NULL;
124 }
125 
126 
127 static int kfd_open(struct inode *inode, struct file *filep)
128 {
129 	struct kfd_process *process;
130 	bool is_32bit_user_mode;
131 
132 	if (iminor(inode) != 0)
133 		return -ENODEV;
134 
135 	is_32bit_user_mode = in_compat_syscall();
136 
137 	if (is_32bit_user_mode) {
138 		dev_warn(kfd_device,
139 			"Process %d (32-bit) failed to open /dev/kfd\n"
140 			"32-bit processes are not supported by amdkfd\n",
141 			current->pid);
142 		return -EPERM;
143 	}
144 
145 	process = kfd_create_process(filep);
146 	if (IS_ERR(process))
147 		return PTR_ERR(process);
148 
149 	if (kfd_is_locked()) {
150 		dev_dbg(kfd_device, "kfd is locked!\n"
151 				"process %d unreferenced", process->pasid);
152 		kfd_unref_process(process);
153 		return -EAGAIN;
154 	}
155 
156 	/* filep now owns the reference returned by kfd_create_process */
157 	filep->private_data = process;
158 
159 	dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
160 		process->pasid, process->is_32bit_user_mode);
161 
162 	return 0;
163 }
164 
165 static int kfd_release(struct inode *inode, struct file *filep)
166 {
167 	struct kfd_process *process = filep->private_data;
168 
169 	if (process)
170 		kfd_unref_process(process);
171 
172 	return 0;
173 }
174 
175 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
176 					void *data)
177 {
178 	struct kfd_ioctl_get_version_args *args = data;
179 
180 	args->major_version = KFD_IOCTL_MAJOR_VERSION;
181 	args->minor_version = KFD_IOCTL_MINOR_VERSION;
182 
183 	return 0;
184 }
185 
186 static int set_queue_properties_from_user(struct queue_properties *q_properties,
187 				struct kfd_ioctl_create_queue_args *args)
188 {
189 	if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
190 		pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
191 		return -EINVAL;
192 	}
193 
194 	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
195 		pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
196 		return -EINVAL;
197 	}
198 
199 	if ((args->ring_base_address) &&
200 		(!access_ok((const void __user *) args->ring_base_address,
201 			sizeof(uint64_t)))) {
202 		pr_err("Can't access ring base address\n");
203 		return -EFAULT;
204 	}
205 
206 	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
207 		pr_err("Ring size must be a power of 2 or 0\n");
208 		return -EINVAL;
209 	}
210 
211 	if (!access_ok((const void __user *) args->read_pointer_address,
212 			sizeof(uint32_t))) {
213 		pr_err("Can't access read pointer\n");
214 		return -EFAULT;
215 	}
216 
217 	if (!access_ok((const void __user *) args->write_pointer_address,
218 			sizeof(uint32_t))) {
219 		pr_err("Can't access write pointer\n");
220 		return -EFAULT;
221 	}
222 
223 	if (args->eop_buffer_address &&
224 		!access_ok((const void __user *) args->eop_buffer_address,
225 			sizeof(uint32_t))) {
226 		pr_debug("Can't access eop buffer");
227 		return -EFAULT;
228 	}
229 
230 	if (args->ctx_save_restore_address &&
231 		!access_ok((const void __user *) args->ctx_save_restore_address,
232 			sizeof(uint32_t))) {
233 		pr_debug("Can't access ctx save restore buffer");
234 		return -EFAULT;
235 	}
236 
237 	q_properties->is_interop = false;
238 	q_properties->is_gws = false;
239 	q_properties->queue_percent = args->queue_percentage;
240 	q_properties->priority = args->queue_priority;
241 	q_properties->queue_address = args->ring_base_address;
242 	q_properties->queue_size = args->ring_size;
243 	q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
244 	q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
245 	q_properties->eop_ring_buffer_address = args->eop_buffer_address;
246 	q_properties->eop_ring_buffer_size = args->eop_buffer_size;
247 	q_properties->ctx_save_restore_area_address =
248 			args->ctx_save_restore_address;
249 	q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
250 	q_properties->ctl_stack_size = args->ctl_stack_size;
251 	if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
252 		args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
253 		q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
254 	else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
255 		q_properties->type = KFD_QUEUE_TYPE_SDMA;
256 	else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
257 		q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
258 	else
259 		return -ENOTSUPP;
260 
261 	if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
262 		q_properties->format = KFD_QUEUE_FORMAT_AQL;
263 	else
264 		q_properties->format = KFD_QUEUE_FORMAT_PM4;
265 
266 	pr_debug("Queue Percentage: %d, %d\n",
267 			q_properties->queue_percent, args->queue_percentage);
268 
269 	pr_debug("Queue Priority: %d, %d\n",
270 			q_properties->priority, args->queue_priority);
271 
272 	pr_debug("Queue Address: 0x%llX, 0x%llX\n",
273 			q_properties->queue_address, args->ring_base_address);
274 
275 	pr_debug("Queue Size: 0x%llX, %u\n",
276 			q_properties->queue_size, args->ring_size);
277 
278 	pr_debug("Queue r/w Pointers: %px, %px\n",
279 			q_properties->read_ptr,
280 			q_properties->write_ptr);
281 
282 	pr_debug("Queue Format: %d\n", q_properties->format);
283 
284 	pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
285 
286 	pr_debug("Queue CTX save area: 0x%llX\n",
287 			q_properties->ctx_save_restore_area_address);
288 
289 	return 0;
290 }
291 
292 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
293 					void *data)
294 {
295 	struct kfd_ioctl_create_queue_args *args = data;
296 	struct kfd_dev *dev;
297 	int err = 0;
298 	unsigned int queue_id;
299 	struct kfd_process_device *pdd;
300 	struct queue_properties q_properties;
301 	uint32_t doorbell_offset_in_process = 0;
302 	struct amdgpu_bo *wptr_bo = NULL;
303 
304 	memset(&q_properties, 0, sizeof(struct queue_properties));
305 
306 	pr_debug("Creating queue ioctl\n");
307 
308 	err = set_queue_properties_from_user(&q_properties, args);
309 	if (err)
310 		return err;
311 
312 	pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
313 
314 	mutex_lock(&p->mutex);
315 
316 	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
317 	if (!pdd) {
318 		pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
319 		err = -EINVAL;
320 		goto err_pdd;
321 	}
322 	dev = pdd->dev;
323 
324 	pdd = kfd_bind_process_to_device(dev, p);
325 	if (IS_ERR(pdd)) {
326 		err = -ESRCH;
327 		goto err_bind_process;
328 	}
329 
330 	if (!pdd->doorbell_index &&
331 	    kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
332 		err = -ENOMEM;
333 		goto err_alloc_doorbells;
334 	}
335 
336 	/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
337 	 * on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
338 	 */
339 	if (dev->shared_resources.enable_mes &&
340 			((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
341 			>> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
342 		struct amdgpu_bo_va_mapping *wptr_mapping;
343 		struct amdgpu_vm *wptr_vm;
344 
345 		wptr_vm = drm_priv_to_vm(pdd->drm_priv);
346 		err = amdgpu_bo_reserve(wptr_vm->root.bo, false);
347 		if (err)
348 			goto err_wptr_map_gart;
349 
350 		wptr_mapping = amdgpu_vm_bo_lookup_mapping(
351 				wptr_vm, args->write_pointer_address >> PAGE_SHIFT);
352 		amdgpu_bo_unreserve(wptr_vm->root.bo);
353 		if (!wptr_mapping) {
354 			pr_err("Failed to lookup wptr bo\n");
355 			err = -EINVAL;
356 			goto err_wptr_map_gart;
357 		}
358 
359 		wptr_bo = wptr_mapping->bo_va->base.bo;
360 		if (wptr_bo->tbo.base.size > PAGE_SIZE) {
361 			pr_err("Requested GART mapping for wptr bo larger than one page\n");
362 			err = -EINVAL;
363 			goto err_wptr_map_gart;
364 		}
365 
366 		err = amdgpu_amdkfd_map_gtt_bo_to_gart(dev->adev, wptr_bo);
367 		if (err) {
368 			pr_err("Failed to map wptr bo to GART\n");
369 			goto err_wptr_map_gart;
370 		}
371 	}
372 
373 	pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
374 			p->pasid,
375 			dev->id);
376 
377 	err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo,
378 			NULL, NULL, NULL, &doorbell_offset_in_process);
379 	if (err != 0)
380 		goto err_create_queue;
381 
382 	args->queue_id = queue_id;
383 
384 
385 	/* Return gpu_id as doorbell offset for mmap usage */
386 	args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
387 	args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
388 	if (KFD_IS_SOC15(dev))
389 		/* On SOC15 ASICs, include the doorbell offset within the
390 		 * process doorbell frame, which is 2 pages.
391 		 */
392 		args->doorbell_offset |= doorbell_offset_in_process;
393 
394 	mutex_unlock(&p->mutex);
395 
396 	pr_debug("Queue id %d was created successfully\n", args->queue_id);
397 
398 	pr_debug("Ring buffer address == 0x%016llX\n",
399 			args->ring_base_address);
400 
401 	pr_debug("Read ptr address    == 0x%016llX\n",
402 			args->read_pointer_address);
403 
404 	pr_debug("Write ptr address   == 0x%016llX\n",
405 			args->write_pointer_address);
406 
407 	return 0;
408 
409 err_create_queue:
410 	if (wptr_bo)
411 		amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
412 err_wptr_map_gart:
413 err_alloc_doorbells:
414 err_bind_process:
415 err_pdd:
416 	mutex_unlock(&p->mutex);
417 	return err;
418 }
419 
420 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
421 					void *data)
422 {
423 	int retval;
424 	struct kfd_ioctl_destroy_queue_args *args = data;
425 
426 	pr_debug("Destroying queue id %d for pasid 0x%x\n",
427 				args->queue_id,
428 				p->pasid);
429 
430 	mutex_lock(&p->mutex);
431 
432 	retval = pqm_destroy_queue(&p->pqm, args->queue_id);
433 
434 	mutex_unlock(&p->mutex);
435 	return retval;
436 }
437 
438 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
439 					void *data)
440 {
441 	int retval;
442 	struct kfd_ioctl_update_queue_args *args = data;
443 	struct queue_properties properties;
444 
445 	if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
446 		pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
447 		return -EINVAL;
448 	}
449 
450 	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
451 		pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
452 		return -EINVAL;
453 	}
454 
455 	if ((args->ring_base_address) &&
456 		(!access_ok((const void __user *) args->ring_base_address,
457 			sizeof(uint64_t)))) {
458 		pr_err("Can't access ring base address\n");
459 		return -EFAULT;
460 	}
461 
462 	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
463 		pr_err("Ring size must be a power of 2 or 0\n");
464 		return -EINVAL;
465 	}
466 
467 	properties.queue_address = args->ring_base_address;
468 	properties.queue_size = args->ring_size;
469 	properties.queue_percent = args->queue_percentage;
470 	properties.priority = args->queue_priority;
471 
472 	pr_debug("Updating queue id %d for pasid 0x%x\n",
473 			args->queue_id, p->pasid);
474 
475 	mutex_lock(&p->mutex);
476 
477 	retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
478 
479 	mutex_unlock(&p->mutex);
480 
481 	return retval;
482 }
483 
484 static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
485 					void *data)
486 {
487 	int retval;
488 	const int max_num_cus = 1024;
489 	struct kfd_ioctl_set_cu_mask_args *args = data;
490 	struct mqd_update_info minfo = {0};
491 	uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
492 	size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
493 
494 	if ((args->num_cu_mask % 32) != 0) {
495 		pr_debug("num_cu_mask 0x%x must be a multiple of 32",
496 				args->num_cu_mask);
497 		return -EINVAL;
498 	}
499 
500 	minfo.cu_mask.count = args->num_cu_mask;
501 	if (minfo.cu_mask.count == 0) {
502 		pr_debug("CU mask cannot be 0");
503 		return -EINVAL;
504 	}
505 
506 	/* To prevent an unreasonably large CU mask size, set an arbitrary
507 	 * limit of max_num_cus bits.  We can then just drop any CU mask bits
508 	 * past max_num_cus bits and just use the first max_num_cus bits.
509 	 */
510 	if (minfo.cu_mask.count > max_num_cus) {
511 		pr_debug("CU mask cannot be greater than 1024 bits");
512 		minfo.cu_mask.count = max_num_cus;
513 		cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
514 	}
515 
516 	minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
517 	if (!minfo.cu_mask.ptr)
518 		return -ENOMEM;
519 
520 	retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
521 	if (retval) {
522 		pr_debug("Could not copy CU mask from userspace");
523 		retval = -EFAULT;
524 		goto out;
525 	}
526 
527 	minfo.update_flag = UPDATE_FLAG_CU_MASK;
528 
529 	mutex_lock(&p->mutex);
530 
531 	retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
532 
533 	mutex_unlock(&p->mutex);
534 
535 out:
536 	kfree(minfo.cu_mask.ptr);
537 	return retval;
538 }
539 
540 static int kfd_ioctl_get_queue_wave_state(struct file *filep,
541 					  struct kfd_process *p, void *data)
542 {
543 	struct kfd_ioctl_get_queue_wave_state_args *args = data;
544 	int r;
545 
546 	mutex_lock(&p->mutex);
547 
548 	r = pqm_get_wave_state(&p->pqm, args->queue_id,
549 			       (void __user *)args->ctl_stack_address,
550 			       &args->ctl_stack_used_size,
551 			       &args->save_area_used_size);
552 
553 	mutex_unlock(&p->mutex);
554 
555 	return r;
556 }
557 
558 static int kfd_ioctl_set_memory_policy(struct file *filep,
559 					struct kfd_process *p, void *data)
560 {
561 	struct kfd_ioctl_set_memory_policy_args *args = data;
562 	int err = 0;
563 	struct kfd_process_device *pdd;
564 	enum cache_policy default_policy, alternate_policy;
565 
566 	if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
567 	    && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
568 		return -EINVAL;
569 	}
570 
571 	if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
572 	    && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
573 		return -EINVAL;
574 	}
575 
576 	mutex_lock(&p->mutex);
577 	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
578 	if (!pdd) {
579 		pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
580 		err = -EINVAL;
581 		goto err_pdd;
582 	}
583 
584 	pdd = kfd_bind_process_to_device(pdd->dev, p);
585 	if (IS_ERR(pdd)) {
586 		err = -ESRCH;
587 		goto out;
588 	}
589 
590 	default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
591 			 ? cache_policy_coherent : cache_policy_noncoherent;
592 
593 	alternate_policy =
594 		(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
595 		   ? cache_policy_coherent : cache_policy_noncoherent;
596 
597 	if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
598 				&pdd->qpd,
599 				default_policy,
600 				alternate_policy,
601 				(void __user *)args->alternate_aperture_base,
602 				args->alternate_aperture_size))
603 		err = -EINVAL;
604 
605 out:
606 err_pdd:
607 	mutex_unlock(&p->mutex);
608 
609 	return err;
610 }
611 
612 static int kfd_ioctl_set_trap_handler(struct file *filep,
613 					struct kfd_process *p, void *data)
614 {
615 	struct kfd_ioctl_set_trap_handler_args *args = data;
616 	int err = 0;
617 	struct kfd_process_device *pdd;
618 
619 	mutex_lock(&p->mutex);
620 
621 	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
622 	if (!pdd) {
623 		err = -EINVAL;
624 		goto err_pdd;
625 	}
626 
627 	pdd = kfd_bind_process_to_device(pdd->dev, p);
628 	if (IS_ERR(pdd)) {
629 		err = -ESRCH;
630 		goto out;
631 	}
632 
633 	kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
634 
635 out:
636 err_pdd:
637 	mutex_unlock(&p->mutex);
638 
639 	return err;
640 }
641 
642 static int kfd_ioctl_dbg_register(struct file *filep,
643 				struct kfd_process *p, void *data)
644 {
645 	return -EPERM;
646 }
647 
648 static int kfd_ioctl_dbg_unregister(struct file *filep,
649 				struct kfd_process *p, void *data)
650 {
651 	return -EPERM;
652 }
653 
654 static int kfd_ioctl_dbg_address_watch(struct file *filep,
655 					struct kfd_process *p, void *data)
656 {
657 	return -EPERM;
658 }
659 
660 /* Parse and generate fixed size data structure for wave control */
661 static int kfd_ioctl_dbg_wave_control(struct file *filep,
662 					struct kfd_process *p, void *data)
663 {
664 	return -EPERM;
665 }
666 
667 static int kfd_ioctl_get_clock_counters(struct file *filep,
668 				struct kfd_process *p, void *data)
669 {
670 	struct kfd_ioctl_get_clock_counters_args *args = data;
671 	struct kfd_process_device *pdd;
672 
673 	mutex_lock(&p->mutex);
674 	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
675 	mutex_unlock(&p->mutex);
676 	if (pdd)
677 		/* Reading GPU clock counter from KGD */
678 		args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
679 	else
680 		/* Node without GPU resource */
681 		args->gpu_clock_counter = 0;
682 
683 	/* No access to rdtsc. Using raw monotonic time */
684 	args->cpu_clock_counter = ktime_get_raw_ns();
685 	args->system_clock_counter = ktime_get_boottime_ns();
686 
687 	/* Since the counter is in nano-seconds we use 1GHz frequency */
688 	args->system_clock_freq = 1000000000;
689 
690 	return 0;
691 }
692 
693 
694 static int kfd_ioctl_get_process_apertures(struct file *filp,
695 				struct kfd_process *p, void *data)
696 {
697 	struct kfd_ioctl_get_process_apertures_args *args = data;
698 	struct kfd_process_device_apertures *pAperture;
699 	int i;
700 
701 	dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
702 
703 	args->num_of_nodes = 0;
704 
705 	mutex_lock(&p->mutex);
706 	/* Run over all pdd of the process */
707 	for (i = 0; i < p->n_pdds; i++) {
708 		struct kfd_process_device *pdd = p->pdds[i];
709 
710 		pAperture =
711 			&args->process_apertures[args->num_of_nodes];
712 		pAperture->gpu_id = pdd->dev->id;
713 		pAperture->lds_base = pdd->lds_base;
714 		pAperture->lds_limit = pdd->lds_limit;
715 		pAperture->gpuvm_base = pdd->gpuvm_base;
716 		pAperture->gpuvm_limit = pdd->gpuvm_limit;
717 		pAperture->scratch_base = pdd->scratch_base;
718 		pAperture->scratch_limit = pdd->scratch_limit;
719 
720 		dev_dbg(kfd_device,
721 			"node id %u\n", args->num_of_nodes);
722 		dev_dbg(kfd_device,
723 			"gpu id %u\n", pdd->dev->id);
724 		dev_dbg(kfd_device,
725 			"lds_base %llX\n", pdd->lds_base);
726 		dev_dbg(kfd_device,
727 			"lds_limit %llX\n", pdd->lds_limit);
728 		dev_dbg(kfd_device,
729 			"gpuvm_base %llX\n", pdd->gpuvm_base);
730 		dev_dbg(kfd_device,
731 			"gpuvm_limit %llX\n", pdd->gpuvm_limit);
732 		dev_dbg(kfd_device,
733 			"scratch_base %llX\n", pdd->scratch_base);
734 		dev_dbg(kfd_device,
735 			"scratch_limit %llX\n", pdd->scratch_limit);
736 
737 		if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
738 			break;
739 	}
740 	mutex_unlock(&p->mutex);
741 
742 	return 0;
743 }
744 
745 static int kfd_ioctl_get_process_apertures_new(struct file *filp,
746 				struct kfd_process *p, void *data)
747 {
748 	struct kfd_ioctl_get_process_apertures_new_args *args = data;
749 	struct kfd_process_device_apertures *pa;
750 	int ret;
751 	int i;
752 
753 	dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
754 
755 	if (args->num_of_nodes == 0) {
756 		/* Return number of nodes, so that user space can alloacate
757 		 * sufficient memory
758 		 */
759 		mutex_lock(&p->mutex);
760 		args->num_of_nodes = p->n_pdds;
761 		goto out_unlock;
762 	}
763 
764 	/* Fill in process-aperture information for all available
765 	 * nodes, but not more than args->num_of_nodes as that is
766 	 * the amount of memory allocated by user
767 	 */
768 	pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
769 				args->num_of_nodes), GFP_KERNEL);
770 	if (!pa)
771 		return -ENOMEM;
772 
773 	mutex_lock(&p->mutex);
774 
775 	if (!p->n_pdds) {
776 		args->num_of_nodes = 0;
777 		kfree(pa);
778 		goto out_unlock;
779 	}
780 
781 	/* Run over all pdd of the process */
782 	for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
783 		struct kfd_process_device *pdd = p->pdds[i];
784 
785 		pa[i].gpu_id = pdd->dev->id;
786 		pa[i].lds_base = pdd->lds_base;
787 		pa[i].lds_limit = pdd->lds_limit;
788 		pa[i].gpuvm_base = pdd->gpuvm_base;
789 		pa[i].gpuvm_limit = pdd->gpuvm_limit;
790 		pa[i].scratch_base = pdd->scratch_base;
791 		pa[i].scratch_limit = pdd->scratch_limit;
792 
793 		dev_dbg(kfd_device,
794 			"gpu id %u\n", pdd->dev->id);
795 		dev_dbg(kfd_device,
796 			"lds_base %llX\n", pdd->lds_base);
797 		dev_dbg(kfd_device,
798 			"lds_limit %llX\n", pdd->lds_limit);
799 		dev_dbg(kfd_device,
800 			"gpuvm_base %llX\n", pdd->gpuvm_base);
801 		dev_dbg(kfd_device,
802 			"gpuvm_limit %llX\n", pdd->gpuvm_limit);
803 		dev_dbg(kfd_device,
804 			"scratch_base %llX\n", pdd->scratch_base);
805 		dev_dbg(kfd_device,
806 			"scratch_limit %llX\n", pdd->scratch_limit);
807 	}
808 	mutex_unlock(&p->mutex);
809 
810 	args->num_of_nodes = i;
811 	ret = copy_to_user(
812 			(void __user *)args->kfd_process_device_apertures_ptr,
813 			pa,
814 			(i * sizeof(struct kfd_process_device_apertures)));
815 	kfree(pa);
816 	return ret ? -EFAULT : 0;
817 
818 out_unlock:
819 	mutex_unlock(&p->mutex);
820 	return 0;
821 }
822 
823 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
824 					void *data)
825 {
826 	struct kfd_ioctl_create_event_args *args = data;
827 	int err;
828 
829 	/* For dGPUs the event page is allocated in user mode. The
830 	 * handle is passed to KFD with the first call to this IOCTL
831 	 * through the event_page_offset field.
832 	 */
833 	if (args->event_page_offset) {
834 		mutex_lock(&p->mutex);
835 		err = kfd_kmap_event_page(p, args->event_page_offset);
836 		mutex_unlock(&p->mutex);
837 		if (err)
838 			return err;
839 	}
840 
841 	err = kfd_event_create(filp, p, args->event_type,
842 				args->auto_reset != 0, args->node_id,
843 				&args->event_id, &args->event_trigger_data,
844 				&args->event_page_offset,
845 				&args->event_slot_index);
846 
847 	pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__);
848 	return err;
849 }
850 
851 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
852 					void *data)
853 {
854 	struct kfd_ioctl_destroy_event_args *args = data;
855 
856 	return kfd_event_destroy(p, args->event_id);
857 }
858 
859 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
860 				void *data)
861 {
862 	struct kfd_ioctl_set_event_args *args = data;
863 
864 	return kfd_set_event(p, args->event_id);
865 }
866 
867 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
868 				void *data)
869 {
870 	struct kfd_ioctl_reset_event_args *args = data;
871 
872 	return kfd_reset_event(p, args->event_id);
873 }
874 
875 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
876 				void *data)
877 {
878 	struct kfd_ioctl_wait_events_args *args = data;
879 
880 	return kfd_wait_on_events(p, args->num_events,
881 			(void __user *)args->events_ptr,
882 			(args->wait_for_all != 0),
883 			&args->timeout, &args->wait_result);
884 }
885 static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
886 					struct kfd_process *p, void *data)
887 {
888 	struct kfd_ioctl_set_scratch_backing_va_args *args = data;
889 	struct kfd_process_device *pdd;
890 	struct kfd_dev *dev;
891 	long err;
892 
893 	mutex_lock(&p->mutex);
894 	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
895 	if (!pdd) {
896 		err = -EINVAL;
897 		goto err_pdd;
898 	}
899 	dev = pdd->dev;
900 
901 	pdd = kfd_bind_process_to_device(dev, p);
902 	if (IS_ERR(pdd)) {
903 		err = PTR_ERR(pdd);
904 		goto bind_process_to_device_fail;
905 	}
906 
907 	pdd->qpd.sh_hidden_private_base = args->va_addr;
908 
909 	mutex_unlock(&p->mutex);
910 
911 	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
912 	    pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
913 		dev->kfd2kgd->set_scratch_backing_va(
914 			dev->adev, args->va_addr, pdd->qpd.vmid);
915 
916 	return 0;
917 
918 bind_process_to_device_fail:
919 err_pdd:
920 	mutex_unlock(&p->mutex);
921 	return err;
922 }
923 
924 static int kfd_ioctl_get_tile_config(struct file *filep,
925 		struct kfd_process *p, void *data)
926 {
927 	struct kfd_ioctl_get_tile_config_args *args = data;
928 	struct kfd_process_device *pdd;
929 	struct tile_config config;
930 	int err = 0;
931 
932 	mutex_lock(&p->mutex);
933 	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
934 	mutex_unlock(&p->mutex);
935 	if (!pdd)
936 		return -EINVAL;
937 
938 	amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
939 
940 	args->gb_addr_config = config.gb_addr_config;
941 	args->num_banks = config.num_banks;
942 	args->num_ranks = config.num_ranks;
943 
944 	if (args->num_tile_configs > config.num_tile_configs)
945 		args->num_tile_configs = config.num_tile_configs;
946 	err = copy_to_user((void __user *)args->tile_config_ptr,
947 			config.tile_config_ptr,
948 			args->num_tile_configs * sizeof(uint32_t));
949 	if (err) {
950 		args->num_tile_configs = 0;
951 		return -EFAULT;
952 	}
953 
954 	if (args->num_macro_tile_configs > config.num_macro_tile_configs)
955 		args->num_macro_tile_configs =
956 				config.num_macro_tile_configs;
957 	err = copy_to_user((void __user *)args->macro_tile_config_ptr,
958 			config.macro_tile_config_ptr,
959 			args->num_macro_tile_configs * sizeof(uint32_t));
960 	if (err) {
961 		args->num_macro_tile_configs = 0;
962 		return -EFAULT;
963 	}
964 
965 	return 0;
966 }
967 
968 static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
969 				void *data)
970 {
971 	struct kfd_ioctl_acquire_vm_args *args = data;
972 	struct kfd_process_device *pdd;
973 	struct file *drm_file;
974 	int ret;
975 
976 	drm_file = fget(args->drm_fd);
977 	if (!drm_file)
978 		return -EINVAL;
979 
980 	mutex_lock(&p->mutex);
981 	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
982 	if (!pdd) {
983 		ret = -EINVAL;
984 		goto err_pdd;
985 	}
986 
987 	if (pdd->drm_file) {
988 		ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
989 		goto err_drm_file;
990 	}
991 
992 	ret = kfd_process_device_init_vm(pdd, drm_file);
993 	if (ret)
994 		goto err_unlock;
995 
996 	/* On success, the PDD keeps the drm_file reference */
997 	mutex_unlock(&p->mutex);
998 
999 	return 0;
1000 
1001 err_unlock:
1002 err_pdd:
1003 err_drm_file:
1004 	mutex_unlock(&p->mutex);
1005 	fput(drm_file);
1006 	return ret;
1007 }
1008 
1009 bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1010 {
1011 	if (debug_largebar) {
1012 		pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1013 		return true;
1014 	}
1015 
1016 	if (dev->use_iommu_v2)
1017 		return false;
1018 
1019 	if (dev->local_mem_info.local_mem_size_private == 0 &&
1020 			dev->local_mem_info.local_mem_size_public > 0)
1021 		return true;
1022 	return false;
1023 }
1024 
1025 static int kfd_ioctl_get_available_memory(struct file *filep,
1026 					  struct kfd_process *p, void *data)
1027 {
1028 	struct kfd_ioctl_get_available_memory_args *args = data;
1029 	struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
1030 
1031 	if (!pdd)
1032 		return -EINVAL;
1033 	args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev);
1034 	kfd_unlock_pdd(pdd);
1035 	return 0;
1036 }
1037 
1038 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1039 					struct kfd_process *p, void *data)
1040 {
1041 	struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1042 	struct kfd_process_device *pdd;
1043 	void *mem;
1044 	struct kfd_dev *dev;
1045 	int idr_handle;
1046 	long err;
1047 	uint64_t offset = args->mmap_offset;
1048 	uint32_t flags = args->flags;
1049 
1050 	if (args->size == 0)
1051 		return -EINVAL;
1052 
1053 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1054 	/* Flush pending deferred work to avoid racing with deferred actions
1055 	 * from previous memory map changes (e.g. munmap).
1056 	 */
1057 	svm_range_list_lock_and_flush_work(&p->svms, current->mm);
1058 	mutex_lock(&p->svms.lock);
1059 	mmap_write_unlock(current->mm);
1060 	if (interval_tree_iter_first(&p->svms.objects,
1061 				     args->va_addr >> PAGE_SHIFT,
1062 				     (args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
1063 		pr_err("Address: 0x%llx already allocated by SVM\n",
1064 			args->va_addr);
1065 		mutex_unlock(&p->svms.lock);
1066 		return -EADDRINUSE;
1067 	}
1068 
1069 	/* When register user buffer check if it has been registered by svm by
1070 	 * buffer cpu virtual address.
1071 	 */
1072 	if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
1073 	    interval_tree_iter_first(&p->svms.objects,
1074 				     args->mmap_offset >> PAGE_SHIFT,
1075 				     (args->mmap_offset  + args->size - 1) >> PAGE_SHIFT)) {
1076 		pr_err("User Buffer Address: 0x%llx already allocated by SVM\n",
1077 			args->mmap_offset);
1078 		mutex_unlock(&p->svms.lock);
1079 		return -EADDRINUSE;
1080 	}
1081 
1082 	mutex_unlock(&p->svms.lock);
1083 #endif
1084 	mutex_lock(&p->mutex);
1085 	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1086 	if (!pdd) {
1087 		err = -EINVAL;
1088 		goto err_pdd;
1089 	}
1090 
1091 	dev = pdd->dev;
1092 
1093 	if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1094 		(flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1095 		!kfd_dev_is_large_bar(dev)) {
1096 		pr_err("Alloc host visible vram on small bar is not allowed\n");
1097 		err = -EINVAL;
1098 		goto err_large_bar;
1099 	}
1100 
1101 	pdd = kfd_bind_process_to_device(dev, p);
1102 	if (IS_ERR(pdd)) {
1103 		err = PTR_ERR(pdd);
1104 		goto err_unlock;
1105 	}
1106 
1107 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1108 		if (args->size != kfd_doorbell_process_slice(dev)) {
1109 			err = -EINVAL;
1110 			goto err_unlock;
1111 		}
1112 		offset = kfd_get_process_doorbells(pdd);
1113 		if (!offset) {
1114 			err = -ENOMEM;
1115 			goto err_unlock;
1116 		}
1117 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1118 		if (args->size != PAGE_SIZE) {
1119 			err = -EINVAL;
1120 			goto err_unlock;
1121 		}
1122 		offset = dev->adev->rmmio_remap.bus_addr;
1123 		if (!offset) {
1124 			err = -ENOMEM;
1125 			goto err_unlock;
1126 		}
1127 	}
1128 
1129 	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1130 		dev->adev, args->va_addr, args->size,
1131 		pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
1132 		flags, false);
1133 
1134 	if (err)
1135 		goto err_unlock;
1136 
1137 	idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1138 	if (idr_handle < 0) {
1139 		err = -EFAULT;
1140 		goto err_free;
1141 	}
1142 
1143 	/* Update the VRAM usage count */
1144 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1145 		uint64_t size = args->size;
1146 
1147 		if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
1148 			size >>= 1;
1149 		WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
1150 	}
1151 
1152 	mutex_unlock(&p->mutex);
1153 
1154 	args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1155 	args->mmap_offset = offset;
1156 
1157 	/* MMIO is mapped through kfd device
1158 	 * Generate a kfd mmap offset
1159 	 */
1160 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1161 		args->mmap_offset = KFD_MMAP_TYPE_MMIO
1162 					| KFD_MMAP_GPU_ID(args->gpu_id);
1163 
1164 	return 0;
1165 
1166 err_free:
1167 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
1168 					       pdd->drm_priv, NULL);
1169 err_unlock:
1170 err_pdd:
1171 err_large_bar:
1172 	mutex_unlock(&p->mutex);
1173 	return err;
1174 }
1175 
1176 static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1177 					struct kfd_process *p, void *data)
1178 {
1179 	struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1180 	struct kfd_process_device *pdd;
1181 	void *mem;
1182 	int ret;
1183 	uint64_t size = 0;
1184 
1185 	mutex_lock(&p->mutex);
1186 	/*
1187 	 * Safeguard to prevent user space from freeing signal BO.
1188 	 * It will be freed at process termination.
1189 	 */
1190 	if (p->signal_handle && (p->signal_handle == args->handle)) {
1191 		pr_err("Free signal BO is not allowed\n");
1192 		ret = -EPERM;
1193 		goto err_unlock;
1194 	}
1195 
1196 	pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1197 	if (!pdd) {
1198 		pr_err("Process device data doesn't exist\n");
1199 		ret = -EINVAL;
1200 		goto err_pdd;
1201 	}
1202 
1203 	mem = kfd_process_device_translate_handle(
1204 		pdd, GET_IDR_HANDLE(args->handle));
1205 	if (!mem) {
1206 		ret = -EINVAL;
1207 		goto err_unlock;
1208 	}
1209 
1210 	ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
1211 				(struct kgd_mem *)mem, pdd->drm_priv, &size);
1212 
1213 	/* If freeing the buffer failed, leave the handle in place for
1214 	 * clean-up during process tear-down.
1215 	 */
1216 	if (!ret)
1217 		kfd_process_device_remove_obj_handle(
1218 			pdd, GET_IDR_HANDLE(args->handle));
1219 
1220 	WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
1221 
1222 err_unlock:
1223 err_pdd:
1224 	mutex_unlock(&p->mutex);
1225 	return ret;
1226 }
1227 
1228 static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1229 					struct kfd_process *p, void *data)
1230 {
1231 	struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1232 	struct kfd_process_device *pdd, *peer_pdd;
1233 	void *mem;
1234 	struct kfd_dev *dev;
1235 	long err = 0;
1236 	int i;
1237 	uint32_t *devices_arr = NULL;
1238 
1239 	if (!args->n_devices) {
1240 		pr_debug("Device IDs array empty\n");
1241 		return -EINVAL;
1242 	}
1243 	if (args->n_success > args->n_devices) {
1244 		pr_debug("n_success exceeds n_devices\n");
1245 		return -EINVAL;
1246 	}
1247 
1248 	devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1249 				    GFP_KERNEL);
1250 	if (!devices_arr)
1251 		return -ENOMEM;
1252 
1253 	err = copy_from_user(devices_arr,
1254 			     (void __user *)args->device_ids_array_ptr,
1255 			     args->n_devices * sizeof(*devices_arr));
1256 	if (err != 0) {
1257 		err = -EFAULT;
1258 		goto copy_from_user_failed;
1259 	}
1260 
1261 	mutex_lock(&p->mutex);
1262 	pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1263 	if (!pdd) {
1264 		err = -EINVAL;
1265 		goto get_process_device_data_failed;
1266 	}
1267 	dev = pdd->dev;
1268 
1269 	pdd = kfd_bind_process_to_device(dev, p);
1270 	if (IS_ERR(pdd)) {
1271 		err = PTR_ERR(pdd);
1272 		goto bind_process_to_device_failed;
1273 	}
1274 
1275 	mem = kfd_process_device_translate_handle(pdd,
1276 						GET_IDR_HANDLE(args->handle));
1277 	if (!mem) {
1278 		err = -ENOMEM;
1279 		goto get_mem_obj_from_handle_failed;
1280 	}
1281 
1282 	for (i = args->n_success; i < args->n_devices; i++) {
1283 		peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1284 		if (!peer_pdd) {
1285 			pr_debug("Getting device by id failed for 0x%x\n",
1286 				 devices_arr[i]);
1287 			err = -EINVAL;
1288 			goto get_mem_obj_from_handle_failed;
1289 		}
1290 
1291 		peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p);
1292 		if (IS_ERR(peer_pdd)) {
1293 			err = PTR_ERR(peer_pdd);
1294 			goto get_mem_obj_from_handle_failed;
1295 		}
1296 
1297 		err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1298 			peer_pdd->dev->adev, (struct kgd_mem *)mem,
1299 			peer_pdd->drm_priv);
1300 		if (err) {
1301 			struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
1302 
1303 			dev_err(dev->adev->dev,
1304 			       "Failed to map peer:%04x:%02x:%02x.%d mem_domain:%d\n",
1305 			       pci_domain_nr(pdev->bus),
1306 			       pdev->bus->number,
1307 			       PCI_SLOT(pdev->devfn),
1308 			       PCI_FUNC(pdev->devfn),
1309 			       ((struct kgd_mem *)mem)->domain);
1310 			goto map_memory_to_gpu_failed;
1311 		}
1312 		args->n_success = i+1;
1313 	}
1314 
1315 	err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
1316 	if (err) {
1317 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
1318 		goto sync_memory_failed;
1319 	}
1320 
1321 	mutex_unlock(&p->mutex);
1322 
1323 	/* Flush TLBs after waiting for the page table updates to complete */
1324 	for (i = 0; i < args->n_devices; i++) {
1325 		peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1326 		if (WARN_ON_ONCE(!peer_pdd))
1327 			continue;
1328 		kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
1329 	}
1330 	kfree(devices_arr);
1331 
1332 	return err;
1333 
1334 get_process_device_data_failed:
1335 bind_process_to_device_failed:
1336 get_mem_obj_from_handle_failed:
1337 map_memory_to_gpu_failed:
1338 sync_memory_failed:
1339 	mutex_unlock(&p->mutex);
1340 copy_from_user_failed:
1341 	kfree(devices_arr);
1342 
1343 	return err;
1344 }
1345 
1346 static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1347 					struct kfd_process *p, void *data)
1348 {
1349 	struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1350 	struct kfd_process_device *pdd, *peer_pdd;
1351 	void *mem;
1352 	long err = 0;
1353 	uint32_t *devices_arr = NULL, i;
1354 	bool flush_tlb;
1355 
1356 	if (!args->n_devices) {
1357 		pr_debug("Device IDs array empty\n");
1358 		return -EINVAL;
1359 	}
1360 	if (args->n_success > args->n_devices) {
1361 		pr_debug("n_success exceeds n_devices\n");
1362 		return -EINVAL;
1363 	}
1364 
1365 	devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1366 				    GFP_KERNEL);
1367 	if (!devices_arr)
1368 		return -ENOMEM;
1369 
1370 	err = copy_from_user(devices_arr,
1371 			     (void __user *)args->device_ids_array_ptr,
1372 			     args->n_devices * sizeof(*devices_arr));
1373 	if (err != 0) {
1374 		err = -EFAULT;
1375 		goto copy_from_user_failed;
1376 	}
1377 
1378 	mutex_lock(&p->mutex);
1379 	pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1380 	if (!pdd) {
1381 		err = -EINVAL;
1382 		goto bind_process_to_device_failed;
1383 	}
1384 
1385 	mem = kfd_process_device_translate_handle(pdd,
1386 						GET_IDR_HANDLE(args->handle));
1387 	if (!mem) {
1388 		err = -ENOMEM;
1389 		goto get_mem_obj_from_handle_failed;
1390 	}
1391 
1392 	for (i = args->n_success; i < args->n_devices; i++) {
1393 		peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1394 		if (!peer_pdd) {
1395 			err = -EINVAL;
1396 			goto get_mem_obj_from_handle_failed;
1397 		}
1398 		err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1399 			peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
1400 		if (err) {
1401 			pr_err("Failed to unmap from gpu %d/%d\n",
1402 			       i, args->n_devices);
1403 			goto unmap_memory_from_gpu_failed;
1404 		}
1405 		args->n_success = i+1;
1406 	}
1407 
1408 	flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
1409 	if (flush_tlb) {
1410 		err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
1411 				(struct kgd_mem *) mem, true);
1412 		if (err) {
1413 			pr_debug("Sync memory failed, wait interrupted by user signal\n");
1414 			goto sync_memory_failed;
1415 		}
1416 	}
1417 	mutex_unlock(&p->mutex);
1418 
1419 	if (flush_tlb) {
1420 		/* Flush TLBs after waiting for the page table updates to complete */
1421 		for (i = 0; i < args->n_devices; i++) {
1422 			peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
1423 			if (WARN_ON_ONCE(!peer_pdd))
1424 				continue;
1425 			kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
1426 		}
1427 	}
1428 	kfree(devices_arr);
1429 
1430 	return 0;
1431 
1432 bind_process_to_device_failed:
1433 get_mem_obj_from_handle_failed:
1434 unmap_memory_from_gpu_failed:
1435 sync_memory_failed:
1436 	mutex_unlock(&p->mutex);
1437 copy_from_user_failed:
1438 	kfree(devices_arr);
1439 	return err;
1440 }
1441 
1442 static int kfd_ioctl_alloc_queue_gws(struct file *filep,
1443 		struct kfd_process *p, void *data)
1444 {
1445 	int retval;
1446 	struct kfd_ioctl_alloc_queue_gws_args *args = data;
1447 	struct queue *q;
1448 	struct kfd_dev *dev;
1449 
1450 	mutex_lock(&p->mutex);
1451 	q = pqm_get_user_queue(&p->pqm, args->queue_id);
1452 
1453 	if (q) {
1454 		dev = q->device;
1455 	} else {
1456 		retval = -EINVAL;
1457 		goto out_unlock;
1458 	}
1459 
1460 	if (!dev->gws) {
1461 		retval = -ENODEV;
1462 		goto out_unlock;
1463 	}
1464 
1465 	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1466 		retval = -ENODEV;
1467 		goto out_unlock;
1468 	}
1469 
1470 	retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
1471 	mutex_unlock(&p->mutex);
1472 
1473 	args->first_gws = 0;
1474 	return retval;
1475 
1476 out_unlock:
1477 	mutex_unlock(&p->mutex);
1478 	return retval;
1479 }
1480 
1481 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1482 		struct kfd_process *p, void *data)
1483 {
1484 	struct kfd_ioctl_get_dmabuf_info_args *args = data;
1485 	struct kfd_dev *dev = NULL;
1486 	struct amdgpu_device *dmabuf_adev;
1487 	void *metadata_buffer = NULL;
1488 	uint32_t flags;
1489 	unsigned int i;
1490 	int r;
1491 
1492 	/* Find a KFD GPU device that supports the get_dmabuf_info query */
1493 	for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1494 		if (dev)
1495 			break;
1496 	if (!dev)
1497 		return -EINVAL;
1498 
1499 	if (args->metadata_ptr) {
1500 		metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1501 		if (!metadata_buffer)
1502 			return -ENOMEM;
1503 	}
1504 
1505 	/* Get dmabuf info from KGD */
1506 	r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
1507 					  &dmabuf_adev, &args->size,
1508 					  metadata_buffer, args->metadata_size,
1509 					  &args->metadata_size, &flags);
1510 	if (r)
1511 		goto exit;
1512 
1513 	/* Reverse-lookup gpu_id from kgd pointer */
1514 	dev = kfd_device_by_adev(dmabuf_adev);
1515 	if (!dev) {
1516 		r = -EINVAL;
1517 		goto exit;
1518 	}
1519 	args->gpu_id = dev->id;
1520 	args->flags = flags;
1521 
1522 	/* Copy metadata buffer to user mode */
1523 	if (metadata_buffer) {
1524 		r = copy_to_user((void __user *)args->metadata_ptr,
1525 				 metadata_buffer, args->metadata_size);
1526 		if (r != 0)
1527 			r = -EFAULT;
1528 	}
1529 
1530 exit:
1531 	kfree(metadata_buffer);
1532 
1533 	return r;
1534 }
1535 
1536 static int kfd_ioctl_import_dmabuf(struct file *filep,
1537 				   struct kfd_process *p, void *data)
1538 {
1539 	struct kfd_ioctl_import_dmabuf_args *args = data;
1540 	struct kfd_process_device *pdd;
1541 	struct dma_buf *dmabuf;
1542 	int idr_handle;
1543 	uint64_t size;
1544 	void *mem;
1545 	int r;
1546 
1547 	dmabuf = dma_buf_get(args->dmabuf_fd);
1548 	if (IS_ERR(dmabuf))
1549 		return PTR_ERR(dmabuf);
1550 
1551 	mutex_lock(&p->mutex);
1552 	pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1553 	if (!pdd) {
1554 		r = -EINVAL;
1555 		goto err_unlock;
1556 	}
1557 
1558 	pdd = kfd_bind_process_to_device(pdd->dev, p);
1559 	if (IS_ERR(pdd)) {
1560 		r = PTR_ERR(pdd);
1561 		goto err_unlock;
1562 	}
1563 
1564 	r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf,
1565 					      args->va_addr, pdd->drm_priv,
1566 					      (struct kgd_mem **)&mem, &size,
1567 					      NULL);
1568 	if (r)
1569 		goto err_unlock;
1570 
1571 	idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1572 	if (idr_handle < 0) {
1573 		r = -EFAULT;
1574 		goto err_free;
1575 	}
1576 
1577 	mutex_unlock(&p->mutex);
1578 	dma_buf_put(dmabuf);
1579 
1580 	args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1581 
1582 	return 0;
1583 
1584 err_free:
1585 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
1586 					       pdd->drm_priv, NULL);
1587 err_unlock:
1588 	mutex_unlock(&p->mutex);
1589 	dma_buf_put(dmabuf);
1590 	return r;
1591 }
1592 
1593 static int kfd_ioctl_export_dmabuf(struct file *filep,
1594 				   struct kfd_process *p, void *data)
1595 {
1596 	struct kfd_ioctl_export_dmabuf_args *args = data;
1597 	struct kfd_process_device *pdd;
1598 	struct dma_buf *dmabuf;
1599 	struct kfd_dev *dev;
1600 	void *mem;
1601 	int ret = 0;
1602 
1603 	dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1604 	if (!dev)
1605 		return -EINVAL;
1606 
1607 	mutex_lock(&p->mutex);
1608 
1609 	pdd = kfd_get_process_device_data(dev, p);
1610 	if (!pdd) {
1611 		ret = -EINVAL;
1612 		goto err_unlock;
1613 	}
1614 
1615 	mem = kfd_process_device_translate_handle(pdd,
1616 						GET_IDR_HANDLE(args->handle));
1617 	if (!mem) {
1618 		ret = -EINVAL;
1619 		goto err_unlock;
1620 	}
1621 
1622 	ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
1623 	mutex_unlock(&p->mutex);
1624 	if (ret)
1625 		goto err_out;
1626 
1627 	ret = dma_buf_fd(dmabuf, args->flags);
1628 	if (ret < 0) {
1629 		dma_buf_put(dmabuf);
1630 		goto err_out;
1631 	}
1632 	/* dma_buf_fd assigns the reference count to the fd, no need to
1633 	 * put the reference here.
1634 	 */
1635 	args->dmabuf_fd = ret;
1636 
1637 	return 0;
1638 
1639 err_unlock:
1640 	mutex_unlock(&p->mutex);
1641 err_out:
1642 	return ret;
1643 }
1644 
1645 /* Handle requests for watching SMI events */
1646 static int kfd_ioctl_smi_events(struct file *filep,
1647 				struct kfd_process *p, void *data)
1648 {
1649 	struct kfd_ioctl_smi_events_args *args = data;
1650 	struct kfd_process_device *pdd;
1651 
1652 	mutex_lock(&p->mutex);
1653 
1654 	pdd = kfd_process_device_data_by_id(p, args->gpuid);
1655 	mutex_unlock(&p->mutex);
1656 	if (!pdd)
1657 		return -EINVAL;
1658 
1659 	return kfd_smi_event_open(pdd->dev, &args->anon_fd);
1660 }
1661 
1662 #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
1663 
1664 static int kfd_ioctl_set_xnack_mode(struct file *filep,
1665 				    struct kfd_process *p, void *data)
1666 {
1667 	struct kfd_ioctl_set_xnack_mode_args *args = data;
1668 	int r = 0;
1669 
1670 	mutex_lock(&p->mutex);
1671 	if (args->xnack_enabled >= 0) {
1672 		if (!list_empty(&p->pqm.queues)) {
1673 			pr_debug("Process has user queues running\n");
1674 			r = -EBUSY;
1675 			goto out_unlock;
1676 		}
1677 
1678 		if (p->xnack_enabled == args->xnack_enabled)
1679 			goto out_unlock;
1680 
1681 		if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
1682 			r = -EPERM;
1683 			goto out_unlock;
1684 		}
1685 
1686 		r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
1687 	} else {
1688 		args->xnack_enabled = p->xnack_enabled;
1689 	}
1690 
1691 out_unlock:
1692 	mutex_unlock(&p->mutex);
1693 
1694 	return r;
1695 }
1696 
1697 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1698 {
1699 	struct kfd_ioctl_svm_args *args = data;
1700 	int r = 0;
1701 
1702 	pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n",
1703 		 args->start_addr, args->size, args->op, args->nattr);
1704 
1705 	if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK))
1706 		return -EINVAL;
1707 	if (!args->start_addr || !args->size)
1708 		return -EINVAL;
1709 
1710 	r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
1711 		      args->attrs);
1712 
1713 	return r;
1714 }
1715 #else
1716 static int kfd_ioctl_set_xnack_mode(struct file *filep,
1717 				    struct kfd_process *p, void *data)
1718 {
1719 	return -EPERM;
1720 }
1721 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
1722 {
1723 	return -EPERM;
1724 }
1725 #endif
1726 
1727 static int criu_checkpoint_process(struct kfd_process *p,
1728 			     uint8_t __user *user_priv_data,
1729 			     uint64_t *priv_offset)
1730 {
1731 	struct kfd_criu_process_priv_data process_priv;
1732 	int ret;
1733 
1734 	memset(&process_priv, 0, sizeof(process_priv));
1735 
1736 	process_priv.version = KFD_CRIU_PRIV_VERSION;
1737 	/* For CR, we don't consider negative xnack mode which is used for
1738 	 * querying without changing it, here 0 simply means disabled and 1
1739 	 * means enabled so retry for finding a valid PTE.
1740 	 */
1741 	process_priv.xnack_mode = p->xnack_enabled ? 1 : 0;
1742 
1743 	ret = copy_to_user(user_priv_data + *priv_offset,
1744 				&process_priv, sizeof(process_priv));
1745 
1746 	if (ret) {
1747 		pr_err("Failed to copy process information to user\n");
1748 		ret = -EFAULT;
1749 	}
1750 
1751 	*priv_offset += sizeof(process_priv);
1752 	return ret;
1753 }
1754 
1755 static int criu_checkpoint_devices(struct kfd_process *p,
1756 			     uint32_t num_devices,
1757 			     uint8_t __user *user_addr,
1758 			     uint8_t __user *user_priv_data,
1759 			     uint64_t *priv_offset)
1760 {
1761 	struct kfd_criu_device_priv_data *device_priv = NULL;
1762 	struct kfd_criu_device_bucket *device_buckets = NULL;
1763 	int ret = 0, i;
1764 
1765 	device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL);
1766 	if (!device_buckets) {
1767 		ret = -ENOMEM;
1768 		goto exit;
1769 	}
1770 
1771 	device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL);
1772 	if (!device_priv) {
1773 		ret = -ENOMEM;
1774 		goto exit;
1775 	}
1776 
1777 	for (i = 0; i < num_devices; i++) {
1778 		struct kfd_process_device *pdd = p->pdds[i];
1779 
1780 		device_buckets[i].user_gpu_id = pdd->user_gpu_id;
1781 		device_buckets[i].actual_gpu_id = pdd->dev->id;
1782 
1783 		/*
1784 		 * priv_data does not contain useful information for now and is reserved for
1785 		 * future use, so we do not set its contents.
1786 		 */
1787 	}
1788 
1789 	ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets));
1790 	if (ret) {
1791 		pr_err("Failed to copy device information to user\n");
1792 		ret = -EFAULT;
1793 		goto exit;
1794 	}
1795 
1796 	ret = copy_to_user(user_priv_data + *priv_offset,
1797 			   device_priv,
1798 			   num_devices * sizeof(*device_priv));
1799 	if (ret) {
1800 		pr_err("Failed to copy device information to user\n");
1801 		ret = -EFAULT;
1802 	}
1803 	*priv_offset += num_devices * sizeof(*device_priv);
1804 
1805 exit:
1806 	kvfree(device_buckets);
1807 	kvfree(device_priv);
1808 	return ret;
1809 }
1810 
1811 static uint32_t get_process_num_bos(struct kfd_process *p)
1812 {
1813 	uint32_t num_of_bos = 0;
1814 	int i;
1815 
1816 	/* Run over all PDDs of the process */
1817 	for (i = 0; i < p->n_pdds; i++) {
1818 		struct kfd_process_device *pdd = p->pdds[i];
1819 		void *mem;
1820 		int id;
1821 
1822 		idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1823 			struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
1824 
1825 			if ((uint64_t)kgd_mem->va > pdd->gpuvm_base)
1826 				num_of_bos++;
1827 		}
1828 	}
1829 	return num_of_bos;
1830 }
1831 
1832 static int criu_get_prime_handle(struct drm_gem_object *gobj, int flags,
1833 				      u32 *shared_fd)
1834 {
1835 	struct dma_buf *dmabuf;
1836 	int ret;
1837 
1838 	dmabuf = amdgpu_gem_prime_export(gobj, flags);
1839 	if (IS_ERR(dmabuf)) {
1840 		ret = PTR_ERR(dmabuf);
1841 		pr_err("dmabuf export failed for the BO\n");
1842 		return ret;
1843 	}
1844 
1845 	ret = dma_buf_fd(dmabuf, flags);
1846 	if (ret < 0) {
1847 		pr_err("dmabuf create fd failed, ret:%d\n", ret);
1848 		goto out_free_dmabuf;
1849 	}
1850 
1851 	*shared_fd = ret;
1852 	return 0;
1853 
1854 out_free_dmabuf:
1855 	dma_buf_put(dmabuf);
1856 	return ret;
1857 }
1858 
1859 static int criu_checkpoint_bos(struct kfd_process *p,
1860 			       uint32_t num_bos,
1861 			       uint8_t __user *user_bos,
1862 			       uint8_t __user *user_priv_data,
1863 			       uint64_t *priv_offset)
1864 {
1865 	struct kfd_criu_bo_bucket *bo_buckets;
1866 	struct kfd_criu_bo_priv_data *bo_privs;
1867 	int ret = 0, pdd_index, bo_index = 0, id;
1868 	void *mem;
1869 
1870 	bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL);
1871 	if (!bo_buckets)
1872 		return -ENOMEM;
1873 
1874 	bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL);
1875 	if (!bo_privs) {
1876 		ret = -ENOMEM;
1877 		goto exit;
1878 	}
1879 
1880 	for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
1881 		struct kfd_process_device *pdd = p->pdds[pdd_index];
1882 		struct amdgpu_bo *dumper_bo;
1883 		struct kgd_mem *kgd_mem;
1884 
1885 		idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1886 			struct kfd_criu_bo_bucket *bo_bucket;
1887 			struct kfd_criu_bo_priv_data *bo_priv;
1888 			int i, dev_idx = 0;
1889 
1890 			if (!mem) {
1891 				ret = -ENOMEM;
1892 				goto exit;
1893 			}
1894 
1895 			kgd_mem = (struct kgd_mem *)mem;
1896 			dumper_bo = kgd_mem->bo;
1897 
1898 			if ((uint64_t)kgd_mem->va <= pdd->gpuvm_base)
1899 				continue;
1900 
1901 			bo_bucket = &bo_buckets[bo_index];
1902 			bo_priv = &bo_privs[bo_index];
1903 
1904 			bo_bucket->gpu_id = pdd->user_gpu_id;
1905 			bo_bucket->addr = (uint64_t)kgd_mem->va;
1906 			bo_bucket->size = amdgpu_bo_size(dumper_bo);
1907 			bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags;
1908 			bo_priv->idr_handle = id;
1909 
1910 			if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1911 				ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo,
1912 								&bo_priv->user_addr);
1913 				if (ret) {
1914 					pr_err("Failed to obtain user address for user-pointer bo\n");
1915 					goto exit;
1916 				}
1917 			}
1918 			if (bo_bucket->alloc_flags
1919 			    & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
1920 				ret = criu_get_prime_handle(&dumper_bo->tbo.base,
1921 						bo_bucket->alloc_flags &
1922 						KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
1923 						&bo_bucket->dmabuf_fd);
1924 				if (ret)
1925 					goto exit;
1926 			} else {
1927 				bo_bucket->dmabuf_fd = KFD_INVALID_FD;
1928 			}
1929 
1930 			if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
1931 				bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL |
1932 					KFD_MMAP_GPU_ID(pdd->dev->id);
1933 			else if (bo_bucket->alloc_flags &
1934 				KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1935 				bo_bucket->offset = KFD_MMAP_TYPE_MMIO |
1936 					KFD_MMAP_GPU_ID(pdd->dev->id);
1937 			else
1938 				bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
1939 
1940 			for (i = 0; i < p->n_pdds; i++) {
1941 				if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem))
1942 					bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
1943 			}
1944 
1945 			pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
1946 					"gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x",
1947 					bo_bucket->size,
1948 					bo_bucket->addr,
1949 					bo_bucket->offset,
1950 					bo_bucket->gpu_id,
1951 					bo_bucket->alloc_flags,
1952 					bo_priv->idr_handle);
1953 			bo_index++;
1954 		}
1955 	}
1956 
1957 	ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets));
1958 	if (ret) {
1959 		pr_err("Failed to copy BO information to user\n");
1960 		ret = -EFAULT;
1961 		goto exit;
1962 	}
1963 
1964 	ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs));
1965 	if (ret) {
1966 		pr_err("Failed to copy BO priv information to user\n");
1967 		ret = -EFAULT;
1968 		goto exit;
1969 	}
1970 
1971 	*priv_offset += num_bos * sizeof(*bo_privs);
1972 
1973 exit:
1974 	while (ret && bo_index--) {
1975 		if (bo_buckets[bo_index].alloc_flags
1976 		    & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
1977 			close_fd(bo_buckets[bo_index].dmabuf_fd);
1978 	}
1979 
1980 	kvfree(bo_buckets);
1981 	kvfree(bo_privs);
1982 	return ret;
1983 }
1984 
1985 static int criu_get_process_object_info(struct kfd_process *p,
1986 					uint32_t *num_devices,
1987 					uint32_t *num_bos,
1988 					uint32_t *num_objects,
1989 					uint64_t *objs_priv_size)
1990 {
1991 	uint64_t queues_priv_data_size, svm_priv_data_size, priv_size;
1992 	uint32_t num_queues, num_events, num_svm_ranges;
1993 	int ret;
1994 
1995 	*num_devices = p->n_pdds;
1996 	*num_bos = get_process_num_bos(p);
1997 
1998 	ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size);
1999 	if (ret)
2000 		return ret;
2001 
2002 	num_events = kfd_get_num_events(p);
2003 
2004 	ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
2005 	if (ret)
2006 		return ret;
2007 
2008 	*num_objects = num_queues + num_events + num_svm_ranges;
2009 
2010 	if (objs_priv_size) {
2011 		priv_size = sizeof(struct kfd_criu_process_priv_data);
2012 		priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data);
2013 		priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data);
2014 		priv_size += queues_priv_data_size;
2015 		priv_size += num_events * sizeof(struct kfd_criu_event_priv_data);
2016 		priv_size += svm_priv_data_size;
2017 		*objs_priv_size = priv_size;
2018 	}
2019 	return 0;
2020 }
2021 
2022 static int criu_checkpoint(struct file *filep,
2023 			   struct kfd_process *p,
2024 			   struct kfd_ioctl_criu_args *args)
2025 {
2026 	int ret;
2027 	uint32_t num_devices, num_bos, num_objects;
2028 	uint64_t priv_size, priv_offset = 0, bo_priv_offset;
2029 
2030 	if (!args->devices || !args->bos || !args->priv_data)
2031 		return -EINVAL;
2032 
2033 	mutex_lock(&p->mutex);
2034 
2035 	if (!p->n_pdds) {
2036 		pr_err("No pdd for given process\n");
2037 		ret = -ENODEV;
2038 		goto exit_unlock;
2039 	}
2040 
2041 	/* Confirm all process queues are evicted */
2042 	if (!p->queues_paused) {
2043 		pr_err("Cannot dump process when queues are not in evicted state\n");
2044 		/* CRIU plugin did not call op PROCESS_INFO before checkpointing */
2045 		ret = -EINVAL;
2046 		goto exit_unlock;
2047 	}
2048 
2049 	ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size);
2050 	if (ret)
2051 		goto exit_unlock;
2052 
2053 	if (num_devices != args->num_devices ||
2054 	    num_bos != args->num_bos ||
2055 	    num_objects != args->num_objects ||
2056 	    priv_size != args->priv_data_size) {
2057 
2058 		ret = -EINVAL;
2059 		goto exit_unlock;
2060 	}
2061 
2062 	/* each function will store private data inside priv_data and adjust priv_offset */
2063 	ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset);
2064 	if (ret)
2065 		goto exit_unlock;
2066 
2067 	ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices,
2068 				(uint8_t __user *)args->priv_data, &priv_offset);
2069 	if (ret)
2070 		goto exit_unlock;
2071 
2072 	/* Leave room for BOs in the private data. They need to be restored
2073 	 * before events, but we checkpoint them last to simplify the error
2074 	 * handling.
2075 	 */
2076 	bo_priv_offset = priv_offset;
2077 	priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
2078 
2079 	if (num_objects) {
2080 		ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
2081 						 &priv_offset);
2082 		if (ret)
2083 			goto exit_unlock;
2084 
2085 		ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
2086 						 &priv_offset);
2087 		if (ret)
2088 			goto exit_unlock;
2089 
2090 		ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
2091 		if (ret)
2092 			goto exit_unlock;
2093 	}
2094 
2095 	/* This must be the last thing in this function that can fail.
2096 	 * Otherwise we leak dmabuf file descriptors.
2097 	 */
2098 	ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
2099 			   (uint8_t __user *)args->priv_data, &bo_priv_offset);
2100 
2101 exit_unlock:
2102 	mutex_unlock(&p->mutex);
2103 	if (ret)
2104 		pr_err("Failed to dump CRIU ret:%d\n", ret);
2105 	else
2106 		pr_debug("CRIU dump ret:%d\n", ret);
2107 
2108 	return ret;
2109 }
2110 
2111 static int criu_restore_process(struct kfd_process *p,
2112 				struct kfd_ioctl_criu_args *args,
2113 				uint64_t *priv_offset,
2114 				uint64_t max_priv_data_size)
2115 {
2116 	int ret = 0;
2117 	struct kfd_criu_process_priv_data process_priv;
2118 
2119 	if (*priv_offset + sizeof(process_priv) > max_priv_data_size)
2120 		return -EINVAL;
2121 
2122 	ret = copy_from_user(&process_priv,
2123 				(void __user *)(args->priv_data + *priv_offset),
2124 				sizeof(process_priv));
2125 	if (ret) {
2126 		pr_err("Failed to copy process private information from user\n");
2127 		ret = -EFAULT;
2128 		goto exit;
2129 	}
2130 	*priv_offset += sizeof(process_priv);
2131 
2132 	if (process_priv.version != KFD_CRIU_PRIV_VERSION) {
2133 		pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n",
2134 			process_priv.version, KFD_CRIU_PRIV_VERSION);
2135 		return -EINVAL;
2136 	}
2137 
2138 	pr_debug("Setting XNACK mode\n");
2139 	if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) {
2140 		pr_err("xnack mode cannot be set\n");
2141 		ret = -EPERM;
2142 		goto exit;
2143 	} else {
2144 		pr_debug("set xnack mode: %d\n", process_priv.xnack_mode);
2145 		p->xnack_enabled = process_priv.xnack_mode;
2146 	}
2147 
2148 exit:
2149 	return ret;
2150 }
2151 
2152 static int criu_restore_devices(struct kfd_process *p,
2153 				struct kfd_ioctl_criu_args *args,
2154 				uint64_t *priv_offset,
2155 				uint64_t max_priv_data_size)
2156 {
2157 	struct kfd_criu_device_bucket *device_buckets;
2158 	struct kfd_criu_device_priv_data *device_privs;
2159 	int ret = 0;
2160 	uint32_t i;
2161 
2162 	if (args->num_devices != p->n_pdds)
2163 		return -EINVAL;
2164 
2165 	if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
2166 		return -EINVAL;
2167 
2168 	device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL);
2169 	if (!device_buckets)
2170 		return -ENOMEM;
2171 
2172 	ret = copy_from_user(device_buckets, (void __user *)args->devices,
2173 				args->num_devices * sizeof(*device_buckets));
2174 	if (ret) {
2175 		pr_err("Failed to copy devices buckets from user\n");
2176 		ret = -EFAULT;
2177 		goto exit;
2178 	}
2179 
2180 	for (i = 0; i < args->num_devices; i++) {
2181 		struct kfd_dev *dev;
2182 		struct kfd_process_device *pdd;
2183 		struct file *drm_file;
2184 
2185 		/* device private data is not currently used */
2186 
2187 		if (!device_buckets[i].user_gpu_id) {
2188 			pr_err("Invalid user gpu_id\n");
2189 			ret = -EINVAL;
2190 			goto exit;
2191 		}
2192 
2193 		dev = kfd_device_by_id(device_buckets[i].actual_gpu_id);
2194 		if (!dev) {
2195 			pr_err("Failed to find device with gpu_id = %x\n",
2196 				device_buckets[i].actual_gpu_id);
2197 			ret = -EINVAL;
2198 			goto exit;
2199 		}
2200 
2201 		pdd = kfd_get_process_device_data(dev, p);
2202 		if (!pdd) {
2203 			pr_err("Failed to get pdd for gpu_id = %x\n",
2204 					device_buckets[i].actual_gpu_id);
2205 			ret = -EINVAL;
2206 			goto exit;
2207 		}
2208 		pdd->user_gpu_id = device_buckets[i].user_gpu_id;
2209 
2210 		drm_file = fget(device_buckets[i].drm_fd);
2211 		if (!drm_file) {
2212 			pr_err("Invalid render node file descriptor sent from plugin (%d)\n",
2213 				device_buckets[i].drm_fd);
2214 			ret = -EINVAL;
2215 			goto exit;
2216 		}
2217 
2218 		if (pdd->drm_file) {
2219 			ret = -EINVAL;
2220 			goto exit;
2221 		}
2222 
2223 		/* create the vm using render nodes for kfd pdd */
2224 		if (kfd_process_device_init_vm(pdd, drm_file)) {
2225 			pr_err("could not init vm for given pdd\n");
2226 			/* On success, the PDD keeps the drm_file reference */
2227 			fput(drm_file);
2228 			ret = -EINVAL;
2229 			goto exit;
2230 		}
2231 		/*
2232 		 * pdd now already has the vm bound to render node so below api won't create a new
2233 		 * exclusive kfd mapping but use existing one with renderDXXX but is still needed
2234 		 * for iommu v2 binding  and runtime pm.
2235 		 */
2236 		pdd = kfd_bind_process_to_device(dev, p);
2237 		if (IS_ERR(pdd)) {
2238 			ret = PTR_ERR(pdd);
2239 			goto exit;
2240 		}
2241 
2242 		if (!pdd->doorbell_index &&
2243 		    kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
2244 			ret = -ENOMEM;
2245 			goto exit;
2246 		}
2247 	}
2248 
2249 	/*
2250 	 * We are not copying device private data from user as we are not using the data for now,
2251 	 * but we still adjust for its private data.
2252 	 */
2253 	*priv_offset += args->num_devices * sizeof(*device_privs);
2254 
2255 exit:
2256 	kfree(device_buckets);
2257 	return ret;
2258 }
2259 
2260 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
2261 				      struct kfd_criu_bo_bucket *bo_bucket,
2262 				      struct kfd_criu_bo_priv_data *bo_priv,
2263 				      struct kgd_mem **kgd_mem)
2264 {
2265 	int idr_handle;
2266 	int ret;
2267 	const bool criu_resume = true;
2268 	u64 offset;
2269 
2270 	if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
2271 		if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev))
2272 			return -EINVAL;
2273 
2274 		offset = kfd_get_process_doorbells(pdd);
2275 		if (!offset)
2276 			return -ENOMEM;
2277 	} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2278 		/* MMIO BOs need remapped bus address */
2279 		if (bo_bucket->size != PAGE_SIZE) {
2280 			pr_err("Invalid page size\n");
2281 			return -EINVAL;
2282 		}
2283 		offset = pdd->dev->adev->rmmio_remap.bus_addr;
2284 		if (!offset) {
2285 			pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
2286 			return -ENOMEM;
2287 		}
2288 	} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
2289 		offset = bo_priv->user_addr;
2290 	}
2291 	/* Create the BO */
2292 	ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
2293 						      bo_bucket->size, pdd->drm_priv, kgd_mem,
2294 						      &offset, bo_bucket->alloc_flags, criu_resume);
2295 	if (ret) {
2296 		pr_err("Could not create the BO\n");
2297 		return ret;
2298 	}
2299 	pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n",
2300 		 bo_bucket->size, bo_bucket->addr, offset);
2301 
2302 	/* Restore previous IDR handle */
2303 	pr_debug("Restoring old IDR handle for the BO");
2304 	idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
2305 			       bo_priv->idr_handle + 1, GFP_KERNEL);
2306 
2307 	if (idr_handle < 0) {
2308 		pr_err("Could not allocate idr\n");
2309 		amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
2310 						       NULL);
2311 		return -ENOMEM;
2312 	}
2313 
2314 	if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
2315 		bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
2316 	if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
2317 		bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
2318 	} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
2319 		bo_bucket->restored_offset = offset;
2320 	} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
2321 		bo_bucket->restored_offset = offset;
2322 		/* Update the VRAM usage count */
2323 		WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
2324 	}
2325 	return 0;
2326 }
2327 
2328 static int criu_restore_bo(struct kfd_process *p,
2329 			   struct kfd_criu_bo_bucket *bo_bucket,
2330 			   struct kfd_criu_bo_priv_data *bo_priv)
2331 {
2332 	struct kfd_process_device *pdd;
2333 	struct kgd_mem *kgd_mem;
2334 	int ret;
2335 	int j;
2336 
2337 	pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n",
2338 		 bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags,
2339 		 bo_priv->idr_handle);
2340 
2341 	pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
2342 	if (!pdd) {
2343 		pr_err("Failed to get pdd\n");
2344 		return -ENODEV;
2345 	}
2346 
2347 	ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
2348 	if (ret)
2349 		return ret;
2350 
2351 	/* now map these BOs to GPU/s */
2352 	for (j = 0; j < p->n_pdds; j++) {
2353 		struct kfd_dev *peer;
2354 		struct kfd_process_device *peer_pdd;
2355 
2356 		if (!bo_priv->mapped_gpuids[j])
2357 			break;
2358 
2359 		peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
2360 		if (!peer_pdd)
2361 			return -EINVAL;
2362 
2363 		peer = peer_pdd->dev;
2364 
2365 		peer_pdd = kfd_bind_process_to_device(peer, p);
2366 		if (IS_ERR(peer_pdd))
2367 			return PTR_ERR(peer_pdd);
2368 
2369 		ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
2370 							    peer_pdd->drm_priv);
2371 		if (ret) {
2372 			pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
2373 			return ret;
2374 		}
2375 	}
2376 
2377 	pr_debug("map memory was successful for the BO\n");
2378 	/* create the dmabuf object and export the bo */
2379 	if (bo_bucket->alloc_flags
2380 	    & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
2381 		ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base, DRM_RDWR,
2382 					    &bo_bucket->dmabuf_fd);
2383 		if (ret)
2384 			return ret;
2385 	} else {
2386 		bo_bucket->dmabuf_fd = KFD_INVALID_FD;
2387 	}
2388 
2389 	return 0;
2390 }
2391 
2392 static int criu_restore_bos(struct kfd_process *p,
2393 			    struct kfd_ioctl_criu_args *args,
2394 			    uint64_t *priv_offset,
2395 			    uint64_t max_priv_data_size)
2396 {
2397 	struct kfd_criu_bo_bucket *bo_buckets = NULL;
2398 	struct kfd_criu_bo_priv_data *bo_privs = NULL;
2399 	int ret = 0;
2400 	uint32_t i = 0;
2401 
2402 	if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
2403 		return -EINVAL;
2404 
2405 	/* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */
2406 	amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info);
2407 
2408 	bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL);
2409 	if (!bo_buckets)
2410 		return -ENOMEM;
2411 
2412 	ret = copy_from_user(bo_buckets, (void __user *)args->bos,
2413 			     args->num_bos * sizeof(*bo_buckets));
2414 	if (ret) {
2415 		pr_err("Failed to copy BOs information from user\n");
2416 		ret = -EFAULT;
2417 		goto exit;
2418 	}
2419 
2420 	bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL);
2421 	if (!bo_privs) {
2422 		ret = -ENOMEM;
2423 		goto exit;
2424 	}
2425 
2426 	ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset,
2427 			     args->num_bos * sizeof(*bo_privs));
2428 	if (ret) {
2429 		pr_err("Failed to copy BOs information from user\n");
2430 		ret = -EFAULT;
2431 		goto exit;
2432 	}
2433 	*priv_offset += args->num_bos * sizeof(*bo_privs);
2434 
2435 	/* Create and map new BOs */
2436 	for (; i < args->num_bos; i++) {
2437 		ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]);
2438 		if (ret) {
2439 			pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
2440 			goto exit;
2441 		}
2442 	} /* done */
2443 
2444 	/* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
2445 	ret = copy_to_user((void __user *)args->bos,
2446 				bo_buckets,
2447 				(args->num_bos * sizeof(*bo_buckets)));
2448 	if (ret)
2449 		ret = -EFAULT;
2450 
2451 exit:
2452 	while (ret && i--) {
2453 		if (bo_buckets[i].alloc_flags
2454 		   & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
2455 			close_fd(bo_buckets[i].dmabuf_fd);
2456 	}
2457 	kvfree(bo_buckets);
2458 	kvfree(bo_privs);
2459 	return ret;
2460 }
2461 
2462 static int criu_restore_objects(struct file *filep,
2463 				struct kfd_process *p,
2464 				struct kfd_ioctl_criu_args *args,
2465 				uint64_t *priv_offset,
2466 				uint64_t max_priv_data_size)
2467 {
2468 	int ret = 0;
2469 	uint32_t i;
2470 
2471 	BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type));
2472 	BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type));
2473 	BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type));
2474 
2475 	for (i = 0; i < args->num_objects; i++) {
2476 		uint32_t object_type;
2477 
2478 		if (*priv_offset + sizeof(object_type) > max_priv_data_size) {
2479 			pr_err("Invalid private data size\n");
2480 			return -EINVAL;
2481 		}
2482 
2483 		ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset));
2484 		if (ret) {
2485 			pr_err("Failed to copy private information from user\n");
2486 			goto exit;
2487 		}
2488 
2489 		switch (object_type) {
2490 		case KFD_CRIU_OBJECT_TYPE_QUEUE:
2491 			ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data,
2492 						     priv_offset, max_priv_data_size);
2493 			if (ret)
2494 				goto exit;
2495 			break;
2496 		case KFD_CRIU_OBJECT_TYPE_EVENT:
2497 			ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data,
2498 						     priv_offset, max_priv_data_size);
2499 			if (ret)
2500 				goto exit;
2501 			break;
2502 		case KFD_CRIU_OBJECT_TYPE_SVM_RANGE:
2503 			ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data,
2504 						     priv_offset, max_priv_data_size);
2505 			if (ret)
2506 				goto exit;
2507 			break;
2508 		default:
2509 			pr_err("Invalid object type:%u at index:%d\n", object_type, i);
2510 			ret = -EINVAL;
2511 			goto exit;
2512 		}
2513 	}
2514 exit:
2515 	return ret;
2516 }
2517 
2518 static int criu_restore(struct file *filep,
2519 			struct kfd_process *p,
2520 			struct kfd_ioctl_criu_args *args)
2521 {
2522 	uint64_t priv_offset = 0;
2523 	int ret = 0;
2524 
2525 	pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
2526 		 args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
2527 
2528 	if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
2529 	    !args->num_devices || !args->num_bos)
2530 		return -EINVAL;
2531 
2532 	mutex_lock(&p->mutex);
2533 
2534 	/*
2535 	 * Set the process to evicted state to avoid running any new queues before all the memory
2536 	 * mappings are ready.
2537 	 */
2538 	ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_RESTORE);
2539 	if (ret)
2540 		goto exit_unlock;
2541 
2542 	/* Each function will adjust priv_offset based on how many bytes they consumed */
2543 	ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size);
2544 	if (ret)
2545 		goto exit_unlock;
2546 
2547 	ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size);
2548 	if (ret)
2549 		goto exit_unlock;
2550 
2551 	ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size);
2552 	if (ret)
2553 		goto exit_unlock;
2554 
2555 	ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size);
2556 	if (ret)
2557 		goto exit_unlock;
2558 
2559 	if (priv_offset != args->priv_data_size) {
2560 		pr_err("Invalid private data size\n");
2561 		ret = -EINVAL;
2562 	}
2563 
2564 exit_unlock:
2565 	mutex_unlock(&p->mutex);
2566 	if (ret)
2567 		pr_err("Failed to restore CRIU ret:%d\n", ret);
2568 	else
2569 		pr_debug("CRIU restore successful\n");
2570 
2571 	return ret;
2572 }
2573 
2574 static int criu_unpause(struct file *filep,
2575 			struct kfd_process *p,
2576 			struct kfd_ioctl_criu_args *args)
2577 {
2578 	int ret;
2579 
2580 	mutex_lock(&p->mutex);
2581 
2582 	if (!p->queues_paused) {
2583 		mutex_unlock(&p->mutex);
2584 		return -EINVAL;
2585 	}
2586 
2587 	ret = kfd_process_restore_queues(p);
2588 	if (ret)
2589 		pr_err("Failed to unpause queues ret:%d\n", ret);
2590 	else
2591 		p->queues_paused = false;
2592 
2593 	mutex_unlock(&p->mutex);
2594 
2595 	return ret;
2596 }
2597 
2598 static int criu_resume(struct file *filep,
2599 			struct kfd_process *p,
2600 			struct kfd_ioctl_criu_args *args)
2601 {
2602 	struct kfd_process *target = NULL;
2603 	struct pid *pid = NULL;
2604 	int ret = 0;
2605 
2606 	pr_debug("Inside %s, target pid for criu restore: %d\n", __func__,
2607 		 args->pid);
2608 
2609 	pid = find_get_pid(args->pid);
2610 	if (!pid) {
2611 		pr_err("Cannot find pid info for %i\n", args->pid);
2612 		return -ESRCH;
2613 	}
2614 
2615 	pr_debug("calling kfd_lookup_process_by_pid\n");
2616 	target = kfd_lookup_process_by_pid(pid);
2617 
2618 	put_pid(pid);
2619 
2620 	if (!target) {
2621 		pr_debug("Cannot find process info for %i\n", args->pid);
2622 		return -ESRCH;
2623 	}
2624 
2625 	mutex_lock(&target->mutex);
2626 	ret = kfd_criu_resume_svm(target);
2627 	if (ret) {
2628 		pr_err("kfd_criu_resume_svm failed for %i\n", args->pid);
2629 		goto exit;
2630 	}
2631 
2632 	ret =  amdgpu_amdkfd_criu_resume(target->kgd_process_info);
2633 	if (ret)
2634 		pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid);
2635 
2636 exit:
2637 	mutex_unlock(&target->mutex);
2638 
2639 	kfd_unref_process(target);
2640 	return ret;
2641 }
2642 
2643 static int criu_process_info(struct file *filep,
2644 				struct kfd_process *p,
2645 				struct kfd_ioctl_criu_args *args)
2646 {
2647 	int ret = 0;
2648 
2649 	mutex_lock(&p->mutex);
2650 
2651 	if (!p->n_pdds) {
2652 		pr_err("No pdd for given process\n");
2653 		ret = -ENODEV;
2654 		goto err_unlock;
2655 	}
2656 
2657 	ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_CHECKPOINT);
2658 	if (ret)
2659 		goto err_unlock;
2660 
2661 	p->queues_paused = true;
2662 
2663 	args->pid = task_pid_nr_ns(p->lead_thread,
2664 					task_active_pid_ns(p->lead_thread));
2665 
2666 	ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos,
2667 					   &args->num_objects, &args->priv_data_size);
2668 	if (ret)
2669 		goto err_unlock;
2670 
2671 	dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n",
2672 				args->num_devices, args->num_bos, args->num_objects,
2673 				args->priv_data_size);
2674 
2675 err_unlock:
2676 	if (ret) {
2677 		kfd_process_restore_queues(p);
2678 		p->queues_paused = false;
2679 	}
2680 	mutex_unlock(&p->mutex);
2681 	return ret;
2682 }
2683 
2684 static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
2685 {
2686 	struct kfd_ioctl_criu_args *args = data;
2687 	int ret;
2688 
2689 	dev_dbg(kfd_device, "CRIU operation: %d\n", args->op);
2690 	switch (args->op) {
2691 	case KFD_CRIU_OP_PROCESS_INFO:
2692 		ret = criu_process_info(filep, p, args);
2693 		break;
2694 	case KFD_CRIU_OP_CHECKPOINT:
2695 		ret = criu_checkpoint(filep, p, args);
2696 		break;
2697 	case KFD_CRIU_OP_UNPAUSE:
2698 		ret = criu_unpause(filep, p, args);
2699 		break;
2700 	case KFD_CRIU_OP_RESTORE:
2701 		ret = criu_restore(filep, p, args);
2702 		break;
2703 	case KFD_CRIU_OP_RESUME:
2704 		ret = criu_resume(filep, p, args);
2705 		break;
2706 	default:
2707 		dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op);
2708 		ret = -EINVAL;
2709 		break;
2710 	}
2711 
2712 	if (ret)
2713 		dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret);
2714 
2715 	return ret;
2716 }
2717 
2718 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
2719 	[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
2720 			    .cmd_drv = 0, .name = #ioctl}
2721 
2722 /** Ioctl table */
2723 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
2724 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
2725 			kfd_ioctl_get_version, 0),
2726 
2727 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
2728 			kfd_ioctl_create_queue, 0),
2729 
2730 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
2731 			kfd_ioctl_destroy_queue, 0),
2732 
2733 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
2734 			kfd_ioctl_set_memory_policy, 0),
2735 
2736 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
2737 			kfd_ioctl_get_clock_counters, 0),
2738 
2739 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
2740 			kfd_ioctl_get_process_apertures, 0),
2741 
2742 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
2743 			kfd_ioctl_update_queue, 0),
2744 
2745 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
2746 			kfd_ioctl_create_event, 0),
2747 
2748 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
2749 			kfd_ioctl_destroy_event, 0),
2750 
2751 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
2752 			kfd_ioctl_set_event, 0),
2753 
2754 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
2755 			kfd_ioctl_reset_event, 0),
2756 
2757 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
2758 			kfd_ioctl_wait_events, 0),
2759 
2760 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED,
2761 			kfd_ioctl_dbg_register, 0),
2762 
2763 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED,
2764 			kfd_ioctl_dbg_unregister, 0),
2765 
2766 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED,
2767 			kfd_ioctl_dbg_address_watch, 0),
2768 
2769 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED,
2770 			kfd_ioctl_dbg_wave_control, 0),
2771 
2772 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
2773 			kfd_ioctl_set_scratch_backing_va, 0),
2774 
2775 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
2776 			kfd_ioctl_get_tile_config, 0),
2777 
2778 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
2779 			kfd_ioctl_set_trap_handler, 0),
2780 
2781 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
2782 			kfd_ioctl_get_process_apertures_new, 0),
2783 
2784 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
2785 			kfd_ioctl_acquire_vm, 0),
2786 
2787 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
2788 			kfd_ioctl_alloc_memory_of_gpu, 0),
2789 
2790 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
2791 			kfd_ioctl_free_memory_of_gpu, 0),
2792 
2793 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
2794 			kfd_ioctl_map_memory_to_gpu, 0),
2795 
2796 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
2797 			kfd_ioctl_unmap_memory_from_gpu, 0),
2798 
2799 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
2800 			kfd_ioctl_set_cu_mask, 0),
2801 
2802 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
2803 			kfd_ioctl_get_queue_wave_state, 0),
2804 
2805 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
2806 				kfd_ioctl_get_dmabuf_info, 0),
2807 
2808 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
2809 				kfd_ioctl_import_dmabuf, 0),
2810 
2811 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
2812 			kfd_ioctl_alloc_queue_gws, 0),
2813 
2814 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS,
2815 			kfd_ioctl_smi_events, 0),
2816 
2817 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0),
2818 
2819 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE,
2820 			kfd_ioctl_set_xnack_mode, 0),
2821 
2822 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP,
2823 			kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE),
2824 
2825 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
2826 			kfd_ioctl_get_available_memory, 0),
2827 
2828 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
2829 				kfd_ioctl_export_dmabuf, 0),
2830 };
2831 
2832 #define AMDKFD_CORE_IOCTL_COUNT	ARRAY_SIZE(amdkfd_ioctls)
2833 
2834 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2835 {
2836 	struct kfd_process *process;
2837 	amdkfd_ioctl_t *func;
2838 	const struct amdkfd_ioctl_desc *ioctl = NULL;
2839 	unsigned int nr = _IOC_NR(cmd);
2840 	char stack_kdata[128];
2841 	char *kdata = NULL;
2842 	unsigned int usize, asize;
2843 	int retcode = -EINVAL;
2844 	bool ptrace_attached = false;
2845 
2846 	if (nr >= AMDKFD_CORE_IOCTL_COUNT)
2847 		goto err_i1;
2848 
2849 	if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
2850 		u32 amdkfd_size;
2851 
2852 		ioctl = &amdkfd_ioctls[nr];
2853 
2854 		amdkfd_size = _IOC_SIZE(ioctl->cmd);
2855 		usize = asize = _IOC_SIZE(cmd);
2856 		if (amdkfd_size > asize)
2857 			asize = amdkfd_size;
2858 
2859 		cmd = ioctl->cmd;
2860 	} else
2861 		goto err_i1;
2862 
2863 	dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
2864 
2865 	/* Get the process struct from the filep. Only the process
2866 	 * that opened /dev/kfd can use the file descriptor. Child
2867 	 * processes need to create their own KFD device context.
2868 	 */
2869 	process = filep->private_data;
2870 
2871 	rcu_read_lock();
2872 	if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) &&
2873 	    ptrace_parent(process->lead_thread) == current)
2874 		ptrace_attached = true;
2875 	rcu_read_unlock();
2876 
2877 	if (process->lead_thread != current->group_leader
2878 	    && !ptrace_attached) {
2879 		dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
2880 		retcode = -EBADF;
2881 		goto err_i1;
2882 	}
2883 
2884 	/* Do not trust userspace, use our own definition */
2885 	func = ioctl->func;
2886 
2887 	if (unlikely(!func)) {
2888 		dev_dbg(kfd_device, "no function\n");
2889 		retcode = -EINVAL;
2890 		goto err_i1;
2891 	}
2892 
2893 	/*
2894 	 * Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support
2895 	 * CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a
2896 	 * more priviledged access.
2897 	 */
2898 	if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) {
2899 		if (!capable(CAP_CHECKPOINT_RESTORE) &&
2900 						!capable(CAP_SYS_ADMIN)) {
2901 			retcode = -EACCES;
2902 			goto err_i1;
2903 		}
2904 	}
2905 
2906 	if (cmd & (IOC_IN | IOC_OUT)) {
2907 		if (asize <= sizeof(stack_kdata)) {
2908 			kdata = stack_kdata;
2909 		} else {
2910 			kdata = kmalloc(asize, GFP_KERNEL);
2911 			if (!kdata) {
2912 				retcode = -ENOMEM;
2913 				goto err_i1;
2914 			}
2915 		}
2916 		if (asize > usize)
2917 			memset(kdata + usize, 0, asize - usize);
2918 	}
2919 
2920 	if (cmd & IOC_IN) {
2921 		if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
2922 			retcode = -EFAULT;
2923 			goto err_i1;
2924 		}
2925 	} else if (cmd & IOC_OUT) {
2926 		memset(kdata, 0, usize);
2927 	}
2928 
2929 	retcode = func(filep, process, kdata);
2930 
2931 	if (cmd & IOC_OUT)
2932 		if (copy_to_user((void __user *)arg, kdata, usize) != 0)
2933 			retcode = -EFAULT;
2934 
2935 err_i1:
2936 	if (!ioctl)
2937 		dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
2938 			  task_pid_nr(current), cmd, nr);
2939 
2940 	if (kdata != stack_kdata)
2941 		kfree(kdata);
2942 
2943 	if (retcode)
2944 		dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
2945 				nr, arg, retcode);
2946 
2947 	return retcode;
2948 }
2949 
2950 static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
2951 		      struct vm_area_struct *vma)
2952 {
2953 	phys_addr_t address;
2954 
2955 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2956 		return -EINVAL;
2957 
2958 	address = dev->adev->rmmio_remap.bus_addr;
2959 
2960 	vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
2961 				VM_DONTDUMP | VM_PFNMAP);
2962 
2963 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2964 
2965 	pr_debug("pasid 0x%x mapping mmio page\n"
2966 		 "     target user address == 0x%08llX\n"
2967 		 "     physical address    == 0x%08llX\n"
2968 		 "     vm_flags            == 0x%04lX\n"
2969 		 "     size                == 0x%04lX\n",
2970 		 process->pasid, (unsigned long long) vma->vm_start,
2971 		 address, vma->vm_flags, PAGE_SIZE);
2972 
2973 	return io_remap_pfn_range(vma,
2974 				vma->vm_start,
2975 				address >> PAGE_SHIFT,
2976 				PAGE_SIZE,
2977 				vma->vm_page_prot);
2978 }
2979 
2980 
2981 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
2982 {
2983 	struct kfd_process *process;
2984 	struct kfd_dev *dev = NULL;
2985 	unsigned long mmap_offset;
2986 	unsigned int gpu_id;
2987 
2988 	process = kfd_get_process(current);
2989 	if (IS_ERR(process))
2990 		return PTR_ERR(process);
2991 
2992 	mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
2993 	gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
2994 	if (gpu_id)
2995 		dev = kfd_device_by_id(gpu_id);
2996 
2997 	switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
2998 	case KFD_MMAP_TYPE_DOORBELL:
2999 		if (!dev)
3000 			return -ENODEV;
3001 		return kfd_doorbell_mmap(dev, process, vma);
3002 
3003 	case KFD_MMAP_TYPE_EVENTS:
3004 		return kfd_event_mmap(process, vma);
3005 
3006 	case KFD_MMAP_TYPE_RESERVED_MEM:
3007 		if (!dev)
3008 			return -ENODEV;
3009 		return kfd_reserved_mem_mmap(dev, process, vma);
3010 	case KFD_MMAP_TYPE_MMIO:
3011 		if (!dev)
3012 			return -ENODEV;
3013 		return kfd_mmio_mmap(dev, process, vma);
3014 	}
3015 
3016 	return -EFAULT;
3017 }
3018