1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_priv.h"
29 #include "kfd_kernel_queue.h"
30 #include "amdgpu_amdkfd.h"
31 
32 static inline struct process_queue_node *get_queue_by_qid(
33 			struct process_queue_manager *pqm, unsigned int qid)
34 {
35 	struct process_queue_node *pqn;
36 
37 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
38 		if ((pqn->q && pqn->q->properties.queue_id == qid) ||
39 		    (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
40 			return pqn;
41 	}
42 
43 	return NULL;
44 }
45 
46 static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
47 				    unsigned int qid)
48 {
49 	if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
50 		return -EINVAL;
51 
52 	if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
53 		pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
54 		return -ENOSPC;
55 	}
56 
57 	return 0;
58 }
59 
60 static int find_available_queue_slot(struct process_queue_manager *pqm,
61 					unsigned int *qid)
62 {
63 	unsigned long found;
64 
65 	found = find_first_zero_bit(pqm->queue_slot_bitmap,
66 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
67 
68 	pr_debug("The new slot id %lu\n", found);
69 
70 	if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
71 		pr_info("Cannot open more queues for process with pasid 0x%x\n",
72 				pqm->process->pasid);
73 		return -ENOMEM;
74 	}
75 
76 	set_bit(found, pqm->queue_slot_bitmap);
77 	*qid = found;
78 
79 	return 0;
80 }
81 
82 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
83 {
84 	struct kfd_node *dev = pdd->dev;
85 
86 	if (pdd->already_dequeued)
87 		return;
88 
89 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
90 	pdd->already_dequeued = true;
91 }
92 
93 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
94 			void *gws)
95 {
96 	struct kfd_node *dev = NULL;
97 	struct process_queue_node *pqn;
98 	struct kfd_process_device *pdd;
99 	struct kgd_mem *mem = NULL;
100 	int ret;
101 
102 	pqn = get_queue_by_qid(pqm, qid);
103 	if (!pqn) {
104 		pr_err("Queue id does not match any known queue\n");
105 		return -EINVAL;
106 	}
107 
108 	if (pqn->q)
109 		dev = pqn->q->device;
110 	if (WARN_ON(!dev))
111 		return -ENODEV;
112 
113 	pdd = kfd_get_process_device_data(dev, pqm->process);
114 	if (!pdd) {
115 		pr_err("Process device data doesn't exist\n");
116 		return -EINVAL;
117 	}
118 
119 	/* Only allow one queue per process can have GWS assigned */
120 	if (gws && pdd->qpd.num_gws)
121 		return -EBUSY;
122 
123 	if (!gws && pdd->qpd.num_gws == 0)
124 		return -EINVAL;
125 
126 	if (gws)
127 		ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
128 			gws, &mem);
129 	else
130 		ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
131 			pqn->q->gws);
132 	if (unlikely(ret))
133 		return ret;
134 
135 	pqn->q->gws = mem;
136 	pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
137 
138 	return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
139 							pqn->q, NULL);
140 }
141 
142 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
143 {
144 	int i;
145 
146 	for (i = 0; i < p->n_pdds; i++)
147 		kfd_process_dequeue_from_device(p->pdds[i]);
148 }
149 
150 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
151 {
152 	INIT_LIST_HEAD(&pqm->queues);
153 	pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
154 					       GFP_KERNEL);
155 	if (!pqm->queue_slot_bitmap)
156 		return -ENOMEM;
157 	pqm->process = p;
158 
159 	return 0;
160 }
161 
162 void pqm_uninit(struct process_queue_manager *pqm)
163 {
164 	struct process_queue_node *pqn, *next;
165 
166 	list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
167 		if (pqn->q && pqn->q->gws)
168 			amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
169 				pqn->q->gws);
170 		kfd_procfs_del_queue(pqn->q);
171 		uninit_queue(pqn->q);
172 		list_del(&pqn->process_queue_list);
173 		kfree(pqn);
174 	}
175 
176 	bitmap_free(pqm->queue_slot_bitmap);
177 	pqm->queue_slot_bitmap = NULL;
178 }
179 
180 static int init_user_queue(struct process_queue_manager *pqm,
181 				struct kfd_node *dev, struct queue **q,
182 				struct queue_properties *q_properties,
183 				struct file *f, struct amdgpu_bo *wptr_bo,
184 				unsigned int qid)
185 {
186 	int retval;
187 
188 	/* Doorbell initialized in user space*/
189 	q_properties->doorbell_ptr = NULL;
190 	q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
191 
192 	/* let DQM handle it*/
193 	q_properties->vmid = 0;
194 	q_properties->queue_id = qid;
195 
196 	retval = init_queue(q, q_properties);
197 	if (retval != 0)
198 		return retval;
199 
200 	(*q)->device = dev;
201 	(*q)->process = pqm->process;
202 
203 	if (dev->kfd->shared_resources.enable_mes) {
204 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
205 						AMDGPU_MES_GANG_CTX_SIZE,
206 						&(*q)->gang_ctx_bo,
207 						&(*q)->gang_ctx_gpu_addr,
208 						&(*q)->gang_ctx_cpu_ptr,
209 						false);
210 		if (retval) {
211 			pr_err("failed to allocate gang context bo\n");
212 			goto cleanup;
213 		}
214 		memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
215 		(*q)->wptr_bo = wptr_bo;
216 	}
217 
218 	pr_debug("PQM After init queue");
219 	return 0;
220 
221 cleanup:
222 	uninit_queue(*q);
223 	*q = NULL;
224 	return retval;
225 }
226 
227 int pqm_create_queue(struct process_queue_manager *pqm,
228 			    struct kfd_node *dev,
229 			    struct file *f,
230 			    struct queue_properties *properties,
231 			    unsigned int *qid,
232 			    struct amdgpu_bo *wptr_bo,
233 			    const struct kfd_criu_queue_priv_data *q_data,
234 			    const void *restore_mqd,
235 			    const void *restore_ctl_stack,
236 			    uint32_t *p_doorbell_offset_in_process)
237 {
238 	int retval;
239 	struct kfd_process_device *pdd;
240 	struct queue *q;
241 	struct process_queue_node *pqn;
242 	struct kernel_queue *kq;
243 	enum kfd_queue_type type = properties->type;
244 	unsigned int max_queues = 127; /* HWS limit */
245 
246 	/*
247 	 * On GFX 9.4.3, increase the number of queues that
248 	 * can be created to 255. No HWS limit on GFX 9.4.3.
249 	 */
250 	if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))
251 		max_queues = 255;
252 
253 	q = NULL;
254 	kq = NULL;
255 
256 	pdd = kfd_get_process_device_data(dev, pqm->process);
257 	if (!pdd) {
258 		pr_err("Process device data doesn't exist\n");
259 		return -1;
260 	}
261 
262 	/*
263 	 * for debug process, verify that it is within the static queues limit
264 	 * currently limit is set to half of the total avail HQD slots
265 	 * If we are just about to create DIQ, the is_debug flag is not set yet
266 	 * Hence we also check the type as well
267 	 */
268 	if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
269 		max_queues = dev->kfd->device_info.max_no_of_hqd/2;
270 
271 	if (pdd->qpd.queue_count >= max_queues)
272 		return -ENOSPC;
273 
274 	if (q_data) {
275 		retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
276 		*qid = q_data->q_id;
277 	} else
278 		retval = find_available_queue_slot(pqm, qid);
279 
280 	if (retval != 0)
281 		return retval;
282 
283 	if (list_empty(&pdd->qpd.queues_list) &&
284 	    list_empty(&pdd->qpd.priv_queue_list))
285 		dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
286 
287 	pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
288 	if (!pqn) {
289 		retval = -ENOMEM;
290 		goto err_allocate_pqn;
291 	}
292 
293 	switch (type) {
294 	case KFD_QUEUE_TYPE_SDMA:
295 	case KFD_QUEUE_TYPE_SDMA_XGMI:
296 		/* SDMA queues are always allocated statically no matter
297 		 * which scheduler mode is used. We also do not need to
298 		 * check whether a SDMA queue can be allocated here, because
299 		 * allocate_sdma_queue() in create_queue() has the
300 		 * corresponding check logic.
301 		 */
302 		retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
303 		if (retval != 0)
304 			goto err_create_queue;
305 		pqn->q = q;
306 		pqn->kq = NULL;
307 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
308 						    restore_mqd, restore_ctl_stack);
309 		print_queue(q);
310 		break;
311 
312 	case KFD_QUEUE_TYPE_COMPUTE:
313 		/* check if there is over subscription */
314 		if ((dev->dqm->sched_policy ==
315 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
316 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
317 		(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
318 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
319 			retval = -EPERM;
320 			goto err_create_queue;
321 		}
322 
323 		retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
324 		if (retval != 0)
325 			goto err_create_queue;
326 		pqn->q = q;
327 		pqn->kq = NULL;
328 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
329 						    restore_mqd, restore_ctl_stack);
330 		print_queue(q);
331 		break;
332 	case KFD_QUEUE_TYPE_DIQ:
333 		kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
334 		if (!kq) {
335 			retval = -ENOMEM;
336 			goto err_create_queue;
337 		}
338 		kq->queue->properties.queue_id = *qid;
339 		pqn->kq = kq;
340 		pqn->q = NULL;
341 		retval = kfd_process_drain_interrupts(pdd);
342 		if (retval)
343 			break;
344 
345 		retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
346 							kq, &pdd->qpd);
347 		break;
348 	default:
349 		WARN(1, "Invalid queue type %d", type);
350 		retval = -EINVAL;
351 	}
352 
353 	if (retval != 0) {
354 		pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
355 			pqm->process->pasid, type, retval);
356 		goto err_create_queue;
357 	}
358 
359 	if (q && p_doorbell_offset_in_process)
360 		/* Return the doorbell offset within the doorbell page
361 		 * to the caller so it can be passed up to user mode
362 		 * (in bytes).
363 		 * There are always 1024 doorbells per process, so in case
364 		 * of 8-byte doorbells, there are two doorbell pages per
365 		 * process.
366 		 */
367 		*p_doorbell_offset_in_process =
368 			(q->properties.doorbell_off * sizeof(uint32_t)) &
369 			(kfd_doorbell_process_slice(dev->kfd) - 1);
370 
371 	pr_debug("PQM After DQM create queue\n");
372 
373 	list_add(&pqn->process_queue_list, &pqm->queues);
374 
375 	if (q) {
376 		pr_debug("PQM done creating queue\n");
377 		kfd_procfs_add_queue(q);
378 		print_queue_properties(&q->properties);
379 	}
380 
381 	return retval;
382 
383 err_create_queue:
384 	uninit_queue(q);
385 	if (kq)
386 		kernel_queue_uninit(kq, false);
387 	kfree(pqn);
388 err_allocate_pqn:
389 	/* check if queues list is empty unregister process from device */
390 	clear_bit(*qid, pqm->queue_slot_bitmap);
391 	if (list_empty(&pdd->qpd.queues_list) &&
392 	    list_empty(&pdd->qpd.priv_queue_list))
393 		dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
394 	return retval;
395 }
396 
397 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
398 {
399 	struct process_queue_node *pqn;
400 	struct kfd_process_device *pdd;
401 	struct device_queue_manager *dqm;
402 	struct kfd_node *dev;
403 	int retval;
404 
405 	dqm = NULL;
406 
407 	retval = 0;
408 
409 	pqn = get_queue_by_qid(pqm, qid);
410 	if (!pqn) {
411 		pr_err("Queue id does not match any known queue\n");
412 		return -EINVAL;
413 	}
414 
415 	dev = NULL;
416 	if (pqn->kq)
417 		dev = pqn->kq->dev;
418 	if (pqn->q)
419 		dev = pqn->q->device;
420 	if (WARN_ON(!dev))
421 		return -ENODEV;
422 
423 	pdd = kfd_get_process_device_data(dev, pqm->process);
424 	if (!pdd) {
425 		pr_err("Process device data doesn't exist\n");
426 		return -1;
427 	}
428 
429 	if (pqn->kq) {
430 		/* destroy kernel queue (DIQ) */
431 		dqm = pqn->kq->dev->dqm;
432 		dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
433 		kernel_queue_uninit(pqn->kq, false);
434 	}
435 
436 	if (pqn->q) {
437 		kfd_procfs_del_queue(pqn->q);
438 		dqm = pqn->q->device->dqm;
439 		retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
440 		if (retval) {
441 			pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
442 				pqm->process->pasid,
443 				pqn->q->properties.queue_id, retval);
444 			if (retval != -ETIME)
445 				goto err_destroy_queue;
446 		}
447 
448 		if (pqn->q->gws) {
449 			amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
450 				pqn->q->gws);
451 			pdd->qpd.num_gws = 0;
452 		}
453 
454 		if (dev->kfd->shared_resources.enable_mes) {
455 			amdgpu_amdkfd_free_gtt_mem(dev->adev,
456 						   pqn->q->gang_ctx_bo);
457 			if (pqn->q->wptr_bo)
458 				amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
459 
460 		}
461 		uninit_queue(pqn->q);
462 	}
463 
464 	list_del(&pqn->process_queue_list);
465 	kfree(pqn);
466 	clear_bit(qid, pqm->queue_slot_bitmap);
467 
468 	if (list_empty(&pdd->qpd.queues_list) &&
469 	    list_empty(&pdd->qpd.priv_queue_list))
470 		dqm->ops.unregister_process(dqm, &pdd->qpd);
471 
472 err_destroy_queue:
473 	return retval;
474 }
475 
476 int pqm_update_queue_properties(struct process_queue_manager *pqm,
477 				unsigned int qid, struct queue_properties *p)
478 {
479 	int retval;
480 	struct process_queue_node *pqn;
481 
482 	pqn = get_queue_by_qid(pqm, qid);
483 	if (!pqn) {
484 		pr_debug("No queue %d exists for update operation\n", qid);
485 		return -EFAULT;
486 	}
487 
488 	pqn->q->properties.queue_address = p->queue_address;
489 	pqn->q->properties.queue_size = p->queue_size;
490 	pqn->q->properties.queue_percent = p->queue_percent;
491 	pqn->q->properties.priority = p->priority;
492 	pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
493 
494 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
495 							pqn->q, NULL);
496 	if (retval != 0)
497 		return retval;
498 
499 	return 0;
500 }
501 
502 int pqm_update_mqd(struct process_queue_manager *pqm,
503 				unsigned int qid, struct mqd_update_info *minfo)
504 {
505 	int retval;
506 	struct process_queue_node *pqn;
507 
508 	pqn = get_queue_by_qid(pqm, qid);
509 	if (!pqn) {
510 		pr_debug("No queue %d exists for update operation\n", qid);
511 		return -EFAULT;
512 	}
513 
514 	/* CUs are masked for debugger requirements so deny user mask  */
515 	if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
516 		return -EBUSY;
517 
518 	/* ASICs that have WGPs must enforce pairwise enabled mask checks. */
519 	if (minfo && minfo->cu_mask.ptr &&
520 			KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
521 		int i;
522 
523 		for (i = 0; i < minfo->cu_mask.count; i += 2) {
524 			uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3;
525 
526 			if (cu_pair && cu_pair != 0x3) {
527 				pr_debug("CUs must be adjacent pairwise enabled.\n");
528 				return -EINVAL;
529 			}
530 		}
531 	}
532 
533 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
534 							pqn->q, minfo);
535 	if (retval != 0)
536 		return retval;
537 
538 	if (minfo && minfo->cu_mask.ptr)
539 		pqn->q->properties.is_user_cu_masked = true;
540 
541 	return 0;
542 }
543 
544 struct kernel_queue *pqm_get_kernel_queue(
545 					struct process_queue_manager *pqm,
546 					unsigned int qid)
547 {
548 	struct process_queue_node *pqn;
549 
550 	pqn = get_queue_by_qid(pqm, qid);
551 	if (pqn && pqn->kq)
552 		return pqn->kq;
553 
554 	return NULL;
555 }
556 
557 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
558 					unsigned int qid)
559 {
560 	struct process_queue_node *pqn;
561 
562 	pqn = get_queue_by_qid(pqm, qid);
563 	return pqn ? pqn->q : NULL;
564 }
565 
566 int pqm_get_wave_state(struct process_queue_manager *pqm,
567 		       unsigned int qid,
568 		       void __user *ctl_stack,
569 		       u32 *ctl_stack_used_size,
570 		       u32 *save_area_used_size)
571 {
572 	struct process_queue_node *pqn;
573 
574 	pqn = get_queue_by_qid(pqm, qid);
575 	if (!pqn) {
576 		pr_debug("amdkfd: No queue %d exists for operation\n",
577 			 qid);
578 		return -EFAULT;
579 	}
580 
581 	return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
582 						       pqn->q,
583 						       ctl_stack,
584 						       ctl_stack_used_size,
585 						       save_area_used_size);
586 }
587 
588 int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
589 			   uint64_t exception_clear_mask,
590 			   void __user *buf,
591 			   int *num_qss_entries,
592 			   uint32_t *entry_size)
593 {
594 	struct process_queue_node *pqn;
595 	struct kfd_queue_snapshot_entry src;
596 	uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
597 	int r = 0;
598 
599 	*num_qss_entries = 0;
600 	if (!(*entry_size))
601 		return -EINVAL;
602 
603 	*entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
604 	mutex_lock(&pqm->process->event_mutex);
605 
606 	memset(&src, 0, sizeof(src));
607 
608 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
609 		if (!pqn->q)
610 			continue;
611 
612 		if (*num_qss_entries < tmp_qss_entries) {
613 			set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
614 
615 			if (copy_to_user(buf, &src, *entry_size)) {
616 				r = -EFAULT;
617 				break;
618 			}
619 			buf += tmp_entry_size;
620 		}
621 		*num_qss_entries += 1;
622 	}
623 
624 	mutex_unlock(&pqm->process->event_mutex);
625 	return r;
626 }
627 
628 static int get_queue_data_sizes(struct kfd_process_device *pdd,
629 				struct queue *q,
630 				uint32_t *mqd_size,
631 				uint32_t *ctl_stack_size)
632 {
633 	int ret;
634 
635 	ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
636 					    q->properties.queue_id,
637 					    mqd_size,
638 					    ctl_stack_size);
639 	if (ret)
640 		pr_err("Failed to get queue dump info (%d)\n", ret);
641 
642 	return ret;
643 }
644 
645 int kfd_process_get_queue_info(struct kfd_process *p,
646 			       uint32_t *num_queues,
647 			       uint64_t *priv_data_sizes)
648 {
649 	uint32_t extra_data_sizes = 0;
650 	struct queue *q;
651 	int i;
652 	int ret;
653 
654 	*num_queues = 0;
655 
656 	/* Run over all PDDs of the process */
657 	for (i = 0; i < p->n_pdds; i++) {
658 		struct kfd_process_device *pdd = p->pdds[i];
659 
660 		list_for_each_entry(q, &pdd->qpd.queues_list, list) {
661 			if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
662 				q->properties.type == KFD_QUEUE_TYPE_SDMA ||
663 				q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
664 				uint32_t mqd_size, ctl_stack_size;
665 
666 				*num_queues = *num_queues + 1;
667 
668 				ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
669 				if (ret)
670 					return ret;
671 
672 				extra_data_sizes += mqd_size + ctl_stack_size;
673 			} else {
674 				pr_err("Unsupported queue type (%d)\n", q->properties.type);
675 				return -EOPNOTSUPP;
676 			}
677 		}
678 	}
679 	*priv_data_sizes = extra_data_sizes +
680 				(*num_queues * sizeof(struct kfd_criu_queue_priv_data));
681 
682 	return 0;
683 }
684 
685 static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
686 			      unsigned int qid,
687 			      void *mqd,
688 			      void *ctl_stack)
689 {
690 	struct process_queue_node *pqn;
691 
692 	pqn = get_queue_by_qid(pqm, qid);
693 	if (!pqn) {
694 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
695 		return -EFAULT;
696 	}
697 
698 	if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
699 		pr_err("amdkfd: queue dumping not supported on this device\n");
700 		return -EOPNOTSUPP;
701 	}
702 
703 	return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
704 						       pqn->q, mqd, ctl_stack);
705 }
706 
707 static int criu_checkpoint_queue(struct kfd_process_device *pdd,
708 			   struct queue *q,
709 			   struct kfd_criu_queue_priv_data *q_data)
710 {
711 	uint8_t *mqd, *ctl_stack;
712 	int ret;
713 
714 	mqd = (void *)(q_data + 1);
715 	ctl_stack = mqd + q_data->mqd_size;
716 
717 	q_data->gpu_id = pdd->user_gpu_id;
718 	q_data->type = q->properties.type;
719 	q_data->format = q->properties.format;
720 	q_data->q_id =  q->properties.queue_id;
721 	q_data->q_address = q->properties.queue_address;
722 	q_data->q_size = q->properties.queue_size;
723 	q_data->priority = q->properties.priority;
724 	q_data->q_percent = q->properties.queue_percent;
725 	q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
726 	q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
727 	q_data->doorbell_id = q->doorbell_id;
728 
729 	q_data->sdma_id = q->sdma_id;
730 
731 	q_data->eop_ring_buffer_address =
732 		q->properties.eop_ring_buffer_address;
733 
734 	q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
735 
736 	q_data->ctx_save_restore_area_address =
737 		q->properties.ctx_save_restore_area_address;
738 
739 	q_data->ctx_save_restore_area_size =
740 		q->properties.ctx_save_restore_area_size;
741 
742 	q_data->gws = !!q->gws;
743 
744 	ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
745 	if (ret) {
746 		pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
747 		return ret;
748 	}
749 
750 	pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
751 	return ret;
752 }
753 
754 static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
755 				   uint8_t __user *user_priv,
756 				   unsigned int *q_index,
757 				   uint64_t *queues_priv_data_offset)
758 {
759 	unsigned int q_private_data_size = 0;
760 	uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
761 	struct queue *q;
762 	int ret = 0;
763 
764 	list_for_each_entry(q, &pdd->qpd.queues_list, list) {
765 		struct kfd_criu_queue_priv_data *q_data;
766 		uint64_t q_data_size;
767 		uint32_t mqd_size;
768 		uint32_t ctl_stack_size;
769 
770 		if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
771 			q->properties.type != KFD_QUEUE_TYPE_SDMA &&
772 			q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
773 
774 			pr_err("Unsupported queue type (%d)\n", q->properties.type);
775 			ret = -EOPNOTSUPP;
776 			break;
777 		}
778 
779 		ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
780 		if (ret)
781 			break;
782 
783 		q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
784 
785 		/* Increase local buffer space if needed */
786 		if (q_private_data_size < q_data_size) {
787 			kfree(q_private_data);
788 
789 			q_private_data = kzalloc(q_data_size, GFP_KERNEL);
790 			if (!q_private_data) {
791 				ret = -ENOMEM;
792 				break;
793 			}
794 			q_private_data_size = q_data_size;
795 		}
796 
797 		q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
798 
799 		/* data stored in this order: priv_data, mqd, ctl_stack */
800 		q_data->mqd_size = mqd_size;
801 		q_data->ctl_stack_size = ctl_stack_size;
802 
803 		ret = criu_checkpoint_queue(pdd, q, q_data);
804 		if (ret)
805 			break;
806 
807 		q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
808 
809 		ret = copy_to_user(user_priv + *queues_priv_data_offset,
810 				q_data, q_data_size);
811 		if (ret) {
812 			ret = -EFAULT;
813 			break;
814 		}
815 		*queues_priv_data_offset += q_data_size;
816 		*q_index = *q_index + 1;
817 	}
818 
819 	kfree(q_private_data);
820 
821 	return ret;
822 }
823 
824 int kfd_criu_checkpoint_queues(struct kfd_process *p,
825 			 uint8_t __user *user_priv_data,
826 			 uint64_t *priv_data_offset)
827 {
828 	int ret = 0, pdd_index, q_index = 0;
829 
830 	for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
831 		struct kfd_process_device *pdd = p->pdds[pdd_index];
832 
833 		/*
834 		 * criu_checkpoint_queues_device will copy data to user and update q_index and
835 		 * queues_priv_data_offset
836 		 */
837 		ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
838 					      priv_data_offset);
839 
840 		if (ret)
841 			break;
842 	}
843 
844 	return ret;
845 }
846 
847 static void set_queue_properties_from_criu(struct queue_properties *qp,
848 					  struct kfd_criu_queue_priv_data *q_data)
849 {
850 	qp->is_interop = false;
851 	qp->queue_percent = q_data->q_percent;
852 	qp->priority = q_data->priority;
853 	qp->queue_address = q_data->q_address;
854 	qp->queue_size = q_data->q_size;
855 	qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
856 	qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
857 	qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
858 	qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
859 	qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
860 	qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
861 	qp->ctl_stack_size = q_data->ctl_stack_size;
862 	qp->type = q_data->type;
863 	qp->format = q_data->format;
864 }
865 
866 int kfd_criu_restore_queue(struct kfd_process *p,
867 			   uint8_t __user *user_priv_ptr,
868 			   uint64_t *priv_data_offset,
869 			   uint64_t max_priv_data_size)
870 {
871 	uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
872 	struct kfd_criu_queue_priv_data *q_data;
873 	struct kfd_process_device *pdd;
874 	uint64_t q_extra_data_size;
875 	struct queue_properties qp;
876 	unsigned int queue_id;
877 	int ret = 0;
878 
879 	if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
880 		return -EINVAL;
881 
882 	q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
883 	if (!q_data)
884 		return -ENOMEM;
885 
886 	ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
887 	if (ret) {
888 		ret = -EFAULT;
889 		goto exit;
890 	}
891 
892 	*priv_data_offset += sizeof(*q_data);
893 	q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size;
894 
895 	if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
896 		ret = -EINVAL;
897 		goto exit;
898 	}
899 
900 	q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
901 	if (!q_extra_data) {
902 		ret = -ENOMEM;
903 		goto exit;
904 	}
905 
906 	ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
907 	if (ret) {
908 		ret = -EFAULT;
909 		goto exit;
910 	}
911 
912 	*priv_data_offset += q_extra_data_size;
913 
914 	pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
915 	if (!pdd) {
916 		pr_err("Failed to get pdd\n");
917 		ret = -EINVAL;
918 		goto exit;
919 	}
920 
921 	if (!pdd->doorbell_index &&
922 	    kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) {
923 		ret = -ENOMEM;
924 		goto exit;
925 	}
926 
927 	/* data stored in this order: mqd, ctl_stack */
928 	mqd = q_extra_data;
929 	ctl_stack = mqd + q_data->mqd_size;
930 
931 	memset(&qp, 0, sizeof(qp));
932 	set_queue_properties_from_criu(&qp, q_data);
933 
934 	print_queue_properties(&qp);
935 
936 	ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, NULL, q_data, mqd, ctl_stack,
937 				NULL);
938 	if (ret) {
939 		pr_err("Failed to create new queue err:%d\n", ret);
940 		goto exit;
941 	}
942 
943 	if (q_data->gws)
944 		ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
945 
946 exit:
947 	if (ret)
948 		pr_err("Failed to restore queue (%d)\n", ret);
949 	else
950 		pr_debug("Queue id %d was restored successfully\n", queue_id);
951 
952 	kfree(q_data);
953 
954 	return ret;
955 }
956 
957 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
958 				  unsigned int qid,
959 				  uint32_t *mqd_size,
960 				  uint32_t *ctl_stack_size)
961 {
962 	struct process_queue_node *pqn;
963 
964 	pqn = get_queue_by_qid(pqm, qid);
965 	if (!pqn) {
966 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
967 		return -EFAULT;
968 	}
969 
970 	if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
971 		pr_err("amdkfd: queue dumping not supported on this device\n");
972 		return -EOPNOTSUPP;
973 	}
974 
975 	pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
976 						       pqn->q, mqd_size,
977 						       ctl_stack_size);
978 	return 0;
979 }
980 
981 #if defined(CONFIG_DEBUG_FS)
982 
983 int pqm_debugfs_mqds(struct seq_file *m, void *data)
984 {
985 	struct process_queue_manager *pqm = data;
986 	struct process_queue_node *pqn;
987 	struct queue *q;
988 	enum KFD_MQD_TYPE mqd_type;
989 	struct mqd_manager *mqd_mgr;
990 	int r = 0, xcc, num_xccs = 1;
991 	void *mqd;
992 	uint64_t size = 0;
993 
994 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
995 		if (pqn->q) {
996 			q = pqn->q;
997 			switch (q->properties.type) {
998 			case KFD_QUEUE_TYPE_SDMA:
999 			case KFD_QUEUE_TYPE_SDMA_XGMI:
1000 				seq_printf(m, "  SDMA queue on device %x\n",
1001 					   q->device->id);
1002 				mqd_type = KFD_MQD_TYPE_SDMA;
1003 				break;
1004 			case KFD_QUEUE_TYPE_COMPUTE:
1005 				seq_printf(m, "  Compute queue on device %x\n",
1006 					   q->device->id);
1007 				mqd_type = KFD_MQD_TYPE_CP;
1008 				num_xccs = NUM_XCC(q->device->xcc_mask);
1009 				break;
1010 			default:
1011 				seq_printf(m,
1012 				"  Bad user queue type %d on device %x\n",
1013 					   q->properties.type, q->device->id);
1014 				continue;
1015 			}
1016 			mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
1017 			size = mqd_mgr->mqd_stride(mqd_mgr,
1018 							&q->properties);
1019 		} else if (pqn->kq) {
1020 			q = pqn->kq->queue;
1021 			mqd_mgr = pqn->kq->mqd_mgr;
1022 			switch (q->properties.type) {
1023 			case KFD_QUEUE_TYPE_DIQ:
1024 				seq_printf(m, "  DIQ on device %x\n",
1025 					   pqn->kq->dev->id);
1026 				break;
1027 			default:
1028 				seq_printf(m,
1029 				"  Bad kernel queue type %d on device %x\n",
1030 					   q->properties.type,
1031 					   pqn->kq->dev->id);
1032 				continue;
1033 			}
1034 		} else {
1035 			seq_printf(m,
1036 		"  Weird: Queue node with neither kernel nor user queue\n");
1037 			continue;
1038 		}
1039 
1040 		for (xcc = 0; xcc < num_xccs; xcc++) {
1041 			mqd = q->mqd + size * xcc;
1042 			r = mqd_mgr->debugfs_show_mqd(m, mqd);
1043 			if (r != 0)
1044 				break;
1045 		}
1046 	}
1047 
1048 	return r;
1049 }
1050 
1051 #endif
1052