1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_priv.h"
29 #include "kfd_kernel_queue.h"
30 #include "amdgpu_amdkfd.h"
31 
32 static inline struct process_queue_node *get_queue_by_qid(
33 			struct process_queue_manager *pqm, unsigned int qid)
34 {
35 	struct process_queue_node *pqn;
36 
37 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
38 		if ((pqn->q && pqn->q->properties.queue_id == qid) ||
39 		    (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
40 			return pqn;
41 	}
42 
43 	return NULL;
44 }
45 
46 static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
47 				    unsigned int qid)
48 {
49 	if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
50 		return -EINVAL;
51 
52 	if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
53 		pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
54 		return -ENOSPC;
55 	}
56 
57 	return 0;
58 }
59 
60 static int find_available_queue_slot(struct process_queue_manager *pqm,
61 					unsigned int *qid)
62 {
63 	unsigned long found;
64 
65 	found = find_first_zero_bit(pqm->queue_slot_bitmap,
66 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
67 
68 	pr_debug("The new slot id %lu\n", found);
69 
70 	if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
71 		pr_info("Cannot open more queues for process with pasid 0x%x\n",
72 				pqm->process->pasid);
73 		return -ENOMEM;
74 	}
75 
76 	set_bit(found, pqm->queue_slot_bitmap);
77 	*qid = found;
78 
79 	return 0;
80 }
81 
82 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
83 {
84 	struct kfd_node *dev = pdd->dev;
85 
86 	if (pdd->already_dequeued)
87 		return;
88 
89 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
90 	pdd->already_dequeued = true;
91 }
92 
93 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
94 			void *gws)
95 {
96 	struct kfd_node *dev = NULL;
97 	struct process_queue_node *pqn;
98 	struct kfd_process_device *pdd;
99 	struct kgd_mem *mem = NULL;
100 	int ret;
101 
102 	pqn = get_queue_by_qid(pqm, qid);
103 	if (!pqn) {
104 		pr_err("Queue id does not match any known queue\n");
105 		return -EINVAL;
106 	}
107 
108 	if (pqn->q)
109 		dev = pqn->q->device;
110 	if (WARN_ON(!dev))
111 		return -ENODEV;
112 
113 	pdd = kfd_get_process_device_data(dev, pqm->process);
114 	if (!pdd) {
115 		pr_err("Process device data doesn't exist\n");
116 		return -EINVAL;
117 	}
118 
119 	/* Only allow one queue per process can have GWS assigned */
120 	if (gws && pdd->qpd.num_gws)
121 		return -EBUSY;
122 
123 	if (!gws && pdd->qpd.num_gws == 0)
124 		return -EINVAL;
125 
126 	if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3)) {
127 		if (gws)
128 			ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
129 				gws, &mem);
130 		else
131 			ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
132 				pqn->q->gws);
133 		if (unlikely(ret))
134 			return ret;
135 		pqn->q->gws = mem;
136 	} else {
137 		/*
138 		 * Intentionally set GWS to a non-NULL value
139 		 * for GFX 9.4.3.
140 		 */
141 		pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
142 	}
143 
144 	pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
145 
146 	return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
147 							pqn->q, NULL);
148 }
149 
150 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
151 {
152 	int i;
153 
154 	for (i = 0; i < p->n_pdds; i++)
155 		kfd_process_dequeue_from_device(p->pdds[i]);
156 }
157 
158 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
159 {
160 	INIT_LIST_HEAD(&pqm->queues);
161 	pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
162 					       GFP_KERNEL);
163 	if (!pqm->queue_slot_bitmap)
164 		return -ENOMEM;
165 	pqm->process = p;
166 
167 	return 0;
168 }
169 
170 void pqm_uninit(struct process_queue_manager *pqm)
171 {
172 	struct process_queue_node *pqn, *next;
173 
174 	list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
175 		if (pqn->q && pqn->q->gws &&
176 		    KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3))
177 			amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
178 				pqn->q->gws);
179 		kfd_procfs_del_queue(pqn->q);
180 		uninit_queue(pqn->q);
181 		list_del(&pqn->process_queue_list);
182 		kfree(pqn);
183 	}
184 
185 	bitmap_free(pqm->queue_slot_bitmap);
186 	pqm->queue_slot_bitmap = NULL;
187 }
188 
189 static int init_user_queue(struct process_queue_manager *pqm,
190 				struct kfd_node *dev, struct queue **q,
191 				struct queue_properties *q_properties,
192 				struct file *f, struct amdgpu_bo *wptr_bo,
193 				unsigned int qid)
194 {
195 	int retval;
196 
197 	/* Doorbell initialized in user space*/
198 	q_properties->doorbell_ptr = NULL;
199 	q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
200 
201 	/* let DQM handle it*/
202 	q_properties->vmid = 0;
203 	q_properties->queue_id = qid;
204 
205 	retval = init_queue(q, q_properties);
206 	if (retval != 0)
207 		return retval;
208 
209 	(*q)->device = dev;
210 	(*q)->process = pqm->process;
211 
212 	if (dev->kfd->shared_resources.enable_mes) {
213 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
214 						AMDGPU_MES_GANG_CTX_SIZE,
215 						&(*q)->gang_ctx_bo,
216 						&(*q)->gang_ctx_gpu_addr,
217 						&(*q)->gang_ctx_cpu_ptr,
218 						false);
219 		if (retval) {
220 			pr_err("failed to allocate gang context bo\n");
221 			goto cleanup;
222 		}
223 		memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
224 		(*q)->wptr_bo = wptr_bo;
225 	}
226 
227 	pr_debug("PQM After init queue");
228 	return 0;
229 
230 cleanup:
231 	uninit_queue(*q);
232 	*q = NULL;
233 	return retval;
234 }
235 
236 int pqm_create_queue(struct process_queue_manager *pqm,
237 			    struct kfd_node *dev,
238 			    struct file *f,
239 			    struct queue_properties *properties,
240 			    unsigned int *qid,
241 			    struct amdgpu_bo *wptr_bo,
242 			    const struct kfd_criu_queue_priv_data *q_data,
243 			    const void *restore_mqd,
244 			    const void *restore_ctl_stack,
245 			    uint32_t *p_doorbell_offset_in_process)
246 {
247 	int retval;
248 	struct kfd_process_device *pdd;
249 	struct queue *q;
250 	struct process_queue_node *pqn;
251 	struct kernel_queue *kq;
252 	enum kfd_queue_type type = properties->type;
253 	unsigned int max_queues = 127; /* HWS limit */
254 
255 	/*
256 	 * On GFX 9.4.3, increase the number of queues that
257 	 * can be created to 255. No HWS limit on GFX 9.4.3.
258 	 */
259 	if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))
260 		max_queues = 255;
261 
262 	q = NULL;
263 	kq = NULL;
264 
265 	pdd = kfd_get_process_device_data(dev, pqm->process);
266 	if (!pdd) {
267 		pr_err("Process device data doesn't exist\n");
268 		return -1;
269 	}
270 
271 	/*
272 	 * for debug process, verify that it is within the static queues limit
273 	 * currently limit is set to half of the total avail HQD slots
274 	 * If we are just about to create DIQ, the is_debug flag is not set yet
275 	 * Hence we also check the type as well
276 	 */
277 	if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
278 		max_queues = dev->kfd->device_info.max_no_of_hqd/2;
279 
280 	if (pdd->qpd.queue_count >= max_queues)
281 		return -ENOSPC;
282 
283 	if (q_data) {
284 		retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
285 		*qid = q_data->q_id;
286 	} else
287 		retval = find_available_queue_slot(pqm, qid);
288 
289 	if (retval != 0)
290 		return retval;
291 
292 	if (list_empty(&pdd->qpd.queues_list) &&
293 	    list_empty(&pdd->qpd.priv_queue_list))
294 		dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
295 
296 	pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
297 	if (!pqn) {
298 		retval = -ENOMEM;
299 		goto err_allocate_pqn;
300 	}
301 
302 	switch (type) {
303 	case KFD_QUEUE_TYPE_SDMA:
304 	case KFD_QUEUE_TYPE_SDMA_XGMI:
305 		/* SDMA queues are always allocated statically no matter
306 		 * which scheduler mode is used. We also do not need to
307 		 * check whether a SDMA queue can be allocated here, because
308 		 * allocate_sdma_queue() in create_queue() has the
309 		 * corresponding check logic.
310 		 */
311 		retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
312 		if (retval != 0)
313 			goto err_create_queue;
314 		pqn->q = q;
315 		pqn->kq = NULL;
316 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
317 						    restore_mqd, restore_ctl_stack);
318 		print_queue(q);
319 		break;
320 
321 	case KFD_QUEUE_TYPE_COMPUTE:
322 		/* check if there is over subscription */
323 		if ((dev->dqm->sched_policy ==
324 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
325 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
326 		(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
327 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
328 			retval = -EPERM;
329 			goto err_create_queue;
330 		}
331 
332 		retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
333 		if (retval != 0)
334 			goto err_create_queue;
335 		pqn->q = q;
336 		pqn->kq = NULL;
337 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
338 						    restore_mqd, restore_ctl_stack);
339 		print_queue(q);
340 		break;
341 	case KFD_QUEUE_TYPE_DIQ:
342 		kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
343 		if (!kq) {
344 			retval = -ENOMEM;
345 			goto err_create_queue;
346 		}
347 		kq->queue->properties.queue_id = *qid;
348 		pqn->kq = kq;
349 		pqn->q = NULL;
350 		retval = kfd_process_drain_interrupts(pdd);
351 		if (retval)
352 			break;
353 
354 		retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
355 							kq, &pdd->qpd);
356 		break;
357 	default:
358 		WARN(1, "Invalid queue type %d", type);
359 		retval = -EINVAL;
360 	}
361 
362 	if (retval != 0) {
363 		pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
364 			pqm->process->pasid, type, retval);
365 		goto err_create_queue;
366 	}
367 
368 	if (q && p_doorbell_offset_in_process)
369 		/* Return the doorbell offset within the doorbell page
370 		 * to the caller so it can be passed up to user mode
371 		 * (in bytes).
372 		 * There are always 1024 doorbells per process, so in case
373 		 * of 8-byte doorbells, there are two doorbell pages per
374 		 * process.
375 		 */
376 		*p_doorbell_offset_in_process =
377 			(q->properties.doorbell_off * sizeof(uint32_t)) &
378 			(kfd_doorbell_process_slice(dev->kfd) - 1);
379 
380 	pr_debug("PQM After DQM create queue\n");
381 
382 	list_add(&pqn->process_queue_list, &pqm->queues);
383 
384 	if (q) {
385 		pr_debug("PQM done creating queue\n");
386 		kfd_procfs_add_queue(q);
387 		print_queue_properties(&q->properties);
388 	}
389 
390 	return retval;
391 
392 err_create_queue:
393 	uninit_queue(q);
394 	if (kq)
395 		kernel_queue_uninit(kq, false);
396 	kfree(pqn);
397 err_allocate_pqn:
398 	/* check if queues list is empty unregister process from device */
399 	clear_bit(*qid, pqm->queue_slot_bitmap);
400 	if (list_empty(&pdd->qpd.queues_list) &&
401 	    list_empty(&pdd->qpd.priv_queue_list))
402 		dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
403 	return retval;
404 }
405 
406 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
407 {
408 	struct process_queue_node *pqn;
409 	struct kfd_process_device *pdd;
410 	struct device_queue_manager *dqm;
411 	struct kfd_node *dev;
412 	int retval;
413 
414 	dqm = NULL;
415 
416 	retval = 0;
417 
418 	pqn = get_queue_by_qid(pqm, qid);
419 	if (!pqn) {
420 		pr_err("Queue id does not match any known queue\n");
421 		return -EINVAL;
422 	}
423 
424 	dev = NULL;
425 	if (pqn->kq)
426 		dev = pqn->kq->dev;
427 	if (pqn->q)
428 		dev = pqn->q->device;
429 	if (WARN_ON(!dev))
430 		return -ENODEV;
431 
432 	pdd = kfd_get_process_device_data(dev, pqm->process);
433 	if (!pdd) {
434 		pr_err("Process device data doesn't exist\n");
435 		return -1;
436 	}
437 
438 	if (pqn->kq) {
439 		/* destroy kernel queue (DIQ) */
440 		dqm = pqn->kq->dev->dqm;
441 		dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
442 		kernel_queue_uninit(pqn->kq, false);
443 	}
444 
445 	if (pqn->q) {
446 		kfd_procfs_del_queue(pqn->q);
447 		dqm = pqn->q->device->dqm;
448 		retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
449 		if (retval) {
450 			pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
451 				pqm->process->pasid,
452 				pqn->q->properties.queue_id, retval);
453 			if (retval != -ETIME)
454 				goto err_destroy_queue;
455 		}
456 
457 		if (pqn->q->gws) {
458 			if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3))
459 				amdgpu_amdkfd_remove_gws_from_process(
460 						pqm->process->kgd_process_info,
461 						pqn->q->gws);
462 			pdd->qpd.num_gws = 0;
463 		}
464 
465 		if (dev->kfd->shared_resources.enable_mes) {
466 			amdgpu_amdkfd_free_gtt_mem(dev->adev,
467 						   pqn->q->gang_ctx_bo);
468 			if (pqn->q->wptr_bo)
469 				amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
470 
471 		}
472 		uninit_queue(pqn->q);
473 	}
474 
475 	list_del(&pqn->process_queue_list);
476 	kfree(pqn);
477 	clear_bit(qid, pqm->queue_slot_bitmap);
478 
479 	if (list_empty(&pdd->qpd.queues_list) &&
480 	    list_empty(&pdd->qpd.priv_queue_list))
481 		dqm->ops.unregister_process(dqm, &pdd->qpd);
482 
483 err_destroy_queue:
484 	return retval;
485 }
486 
487 int pqm_update_queue_properties(struct process_queue_manager *pqm,
488 				unsigned int qid, struct queue_properties *p)
489 {
490 	int retval;
491 	struct process_queue_node *pqn;
492 
493 	pqn = get_queue_by_qid(pqm, qid);
494 	if (!pqn) {
495 		pr_debug("No queue %d exists for update operation\n", qid);
496 		return -EFAULT;
497 	}
498 
499 	pqn->q->properties.queue_address = p->queue_address;
500 	pqn->q->properties.queue_size = p->queue_size;
501 	pqn->q->properties.queue_percent = p->queue_percent;
502 	pqn->q->properties.priority = p->priority;
503 	pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
504 
505 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
506 							pqn->q, NULL);
507 	if (retval != 0)
508 		return retval;
509 
510 	return 0;
511 }
512 
513 int pqm_update_mqd(struct process_queue_manager *pqm,
514 				unsigned int qid, struct mqd_update_info *minfo)
515 {
516 	int retval;
517 	struct process_queue_node *pqn;
518 
519 	pqn = get_queue_by_qid(pqm, qid);
520 	if (!pqn) {
521 		pr_debug("No queue %d exists for update operation\n", qid);
522 		return -EFAULT;
523 	}
524 
525 	/* CUs are masked for debugger requirements so deny user mask  */
526 	if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
527 		return -EBUSY;
528 
529 	/* ASICs that have WGPs must enforce pairwise enabled mask checks. */
530 	if (minfo && minfo->cu_mask.ptr &&
531 			KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
532 		int i;
533 
534 		for (i = 0; i < minfo->cu_mask.count; i += 2) {
535 			uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3;
536 
537 			if (cu_pair && cu_pair != 0x3) {
538 				pr_debug("CUs must be adjacent pairwise enabled.\n");
539 				return -EINVAL;
540 			}
541 		}
542 	}
543 
544 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
545 							pqn->q, minfo);
546 	if (retval != 0)
547 		return retval;
548 
549 	if (minfo && minfo->cu_mask.ptr)
550 		pqn->q->properties.is_user_cu_masked = true;
551 
552 	return 0;
553 }
554 
555 struct kernel_queue *pqm_get_kernel_queue(
556 					struct process_queue_manager *pqm,
557 					unsigned int qid)
558 {
559 	struct process_queue_node *pqn;
560 
561 	pqn = get_queue_by_qid(pqm, qid);
562 	if (pqn && pqn->kq)
563 		return pqn->kq;
564 
565 	return NULL;
566 }
567 
568 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
569 					unsigned int qid)
570 {
571 	struct process_queue_node *pqn;
572 
573 	pqn = get_queue_by_qid(pqm, qid);
574 	return pqn ? pqn->q : NULL;
575 }
576 
577 int pqm_get_wave_state(struct process_queue_manager *pqm,
578 		       unsigned int qid,
579 		       void __user *ctl_stack,
580 		       u32 *ctl_stack_used_size,
581 		       u32 *save_area_used_size)
582 {
583 	struct process_queue_node *pqn;
584 
585 	pqn = get_queue_by_qid(pqm, qid);
586 	if (!pqn) {
587 		pr_debug("amdkfd: No queue %d exists for operation\n",
588 			 qid);
589 		return -EFAULT;
590 	}
591 
592 	return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
593 						       pqn->q,
594 						       ctl_stack,
595 						       ctl_stack_used_size,
596 						       save_area_used_size);
597 }
598 
599 int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
600 			   uint64_t exception_clear_mask,
601 			   void __user *buf,
602 			   int *num_qss_entries,
603 			   uint32_t *entry_size)
604 {
605 	struct process_queue_node *pqn;
606 	struct kfd_queue_snapshot_entry src;
607 	uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
608 	int r = 0;
609 
610 	*num_qss_entries = 0;
611 	if (!(*entry_size))
612 		return -EINVAL;
613 
614 	*entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
615 	mutex_lock(&pqm->process->event_mutex);
616 
617 	memset(&src, 0, sizeof(src));
618 
619 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
620 		if (!pqn->q)
621 			continue;
622 
623 		if (*num_qss_entries < tmp_qss_entries) {
624 			set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
625 
626 			if (copy_to_user(buf, &src, *entry_size)) {
627 				r = -EFAULT;
628 				break;
629 			}
630 			buf += tmp_entry_size;
631 		}
632 		*num_qss_entries += 1;
633 	}
634 
635 	mutex_unlock(&pqm->process->event_mutex);
636 	return r;
637 }
638 
639 static int get_queue_data_sizes(struct kfd_process_device *pdd,
640 				struct queue *q,
641 				uint32_t *mqd_size,
642 				uint32_t *ctl_stack_size)
643 {
644 	int ret;
645 
646 	ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
647 					    q->properties.queue_id,
648 					    mqd_size,
649 					    ctl_stack_size);
650 	if (ret)
651 		pr_err("Failed to get queue dump info (%d)\n", ret);
652 
653 	return ret;
654 }
655 
656 int kfd_process_get_queue_info(struct kfd_process *p,
657 			       uint32_t *num_queues,
658 			       uint64_t *priv_data_sizes)
659 {
660 	uint32_t extra_data_sizes = 0;
661 	struct queue *q;
662 	int i;
663 	int ret;
664 
665 	*num_queues = 0;
666 
667 	/* Run over all PDDs of the process */
668 	for (i = 0; i < p->n_pdds; i++) {
669 		struct kfd_process_device *pdd = p->pdds[i];
670 
671 		list_for_each_entry(q, &pdd->qpd.queues_list, list) {
672 			if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
673 				q->properties.type == KFD_QUEUE_TYPE_SDMA ||
674 				q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
675 				uint32_t mqd_size, ctl_stack_size;
676 
677 				*num_queues = *num_queues + 1;
678 
679 				ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
680 				if (ret)
681 					return ret;
682 
683 				extra_data_sizes += mqd_size + ctl_stack_size;
684 			} else {
685 				pr_err("Unsupported queue type (%d)\n", q->properties.type);
686 				return -EOPNOTSUPP;
687 			}
688 		}
689 	}
690 	*priv_data_sizes = extra_data_sizes +
691 				(*num_queues * sizeof(struct kfd_criu_queue_priv_data));
692 
693 	return 0;
694 }
695 
696 static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
697 			      unsigned int qid,
698 			      void *mqd,
699 			      void *ctl_stack)
700 {
701 	struct process_queue_node *pqn;
702 
703 	pqn = get_queue_by_qid(pqm, qid);
704 	if (!pqn) {
705 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
706 		return -EFAULT;
707 	}
708 
709 	if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
710 		pr_err("amdkfd: queue dumping not supported on this device\n");
711 		return -EOPNOTSUPP;
712 	}
713 
714 	return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
715 						       pqn->q, mqd, ctl_stack);
716 }
717 
718 static int criu_checkpoint_queue(struct kfd_process_device *pdd,
719 			   struct queue *q,
720 			   struct kfd_criu_queue_priv_data *q_data)
721 {
722 	uint8_t *mqd, *ctl_stack;
723 	int ret;
724 
725 	mqd = (void *)(q_data + 1);
726 	ctl_stack = mqd + q_data->mqd_size;
727 
728 	q_data->gpu_id = pdd->user_gpu_id;
729 	q_data->type = q->properties.type;
730 	q_data->format = q->properties.format;
731 	q_data->q_id =  q->properties.queue_id;
732 	q_data->q_address = q->properties.queue_address;
733 	q_data->q_size = q->properties.queue_size;
734 	q_data->priority = q->properties.priority;
735 	q_data->q_percent = q->properties.queue_percent;
736 	q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
737 	q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
738 	q_data->doorbell_id = q->doorbell_id;
739 
740 	q_data->sdma_id = q->sdma_id;
741 
742 	q_data->eop_ring_buffer_address =
743 		q->properties.eop_ring_buffer_address;
744 
745 	q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
746 
747 	q_data->ctx_save_restore_area_address =
748 		q->properties.ctx_save_restore_area_address;
749 
750 	q_data->ctx_save_restore_area_size =
751 		q->properties.ctx_save_restore_area_size;
752 
753 	q_data->gws = !!q->gws;
754 
755 	ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
756 	if (ret) {
757 		pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
758 		return ret;
759 	}
760 
761 	pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
762 	return ret;
763 }
764 
765 static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
766 				   uint8_t __user *user_priv,
767 				   unsigned int *q_index,
768 				   uint64_t *queues_priv_data_offset)
769 {
770 	unsigned int q_private_data_size = 0;
771 	uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
772 	struct queue *q;
773 	int ret = 0;
774 
775 	list_for_each_entry(q, &pdd->qpd.queues_list, list) {
776 		struct kfd_criu_queue_priv_data *q_data;
777 		uint64_t q_data_size;
778 		uint32_t mqd_size;
779 		uint32_t ctl_stack_size;
780 
781 		if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
782 			q->properties.type != KFD_QUEUE_TYPE_SDMA &&
783 			q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
784 
785 			pr_err("Unsupported queue type (%d)\n", q->properties.type);
786 			ret = -EOPNOTSUPP;
787 			break;
788 		}
789 
790 		ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
791 		if (ret)
792 			break;
793 
794 		q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
795 
796 		/* Increase local buffer space if needed */
797 		if (q_private_data_size < q_data_size) {
798 			kfree(q_private_data);
799 
800 			q_private_data = kzalloc(q_data_size, GFP_KERNEL);
801 			if (!q_private_data) {
802 				ret = -ENOMEM;
803 				break;
804 			}
805 			q_private_data_size = q_data_size;
806 		}
807 
808 		q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
809 
810 		/* data stored in this order: priv_data, mqd, ctl_stack */
811 		q_data->mqd_size = mqd_size;
812 		q_data->ctl_stack_size = ctl_stack_size;
813 
814 		ret = criu_checkpoint_queue(pdd, q, q_data);
815 		if (ret)
816 			break;
817 
818 		q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
819 
820 		ret = copy_to_user(user_priv + *queues_priv_data_offset,
821 				q_data, q_data_size);
822 		if (ret) {
823 			ret = -EFAULT;
824 			break;
825 		}
826 		*queues_priv_data_offset += q_data_size;
827 		*q_index = *q_index + 1;
828 	}
829 
830 	kfree(q_private_data);
831 
832 	return ret;
833 }
834 
835 int kfd_criu_checkpoint_queues(struct kfd_process *p,
836 			 uint8_t __user *user_priv_data,
837 			 uint64_t *priv_data_offset)
838 {
839 	int ret = 0, pdd_index, q_index = 0;
840 
841 	for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
842 		struct kfd_process_device *pdd = p->pdds[pdd_index];
843 
844 		/*
845 		 * criu_checkpoint_queues_device will copy data to user and update q_index and
846 		 * queues_priv_data_offset
847 		 */
848 		ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
849 					      priv_data_offset);
850 
851 		if (ret)
852 			break;
853 	}
854 
855 	return ret;
856 }
857 
858 static void set_queue_properties_from_criu(struct queue_properties *qp,
859 					  struct kfd_criu_queue_priv_data *q_data)
860 {
861 	qp->is_interop = false;
862 	qp->queue_percent = q_data->q_percent;
863 	qp->priority = q_data->priority;
864 	qp->queue_address = q_data->q_address;
865 	qp->queue_size = q_data->q_size;
866 	qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
867 	qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
868 	qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
869 	qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
870 	qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
871 	qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
872 	qp->ctl_stack_size = q_data->ctl_stack_size;
873 	qp->type = q_data->type;
874 	qp->format = q_data->format;
875 }
876 
877 int kfd_criu_restore_queue(struct kfd_process *p,
878 			   uint8_t __user *user_priv_ptr,
879 			   uint64_t *priv_data_offset,
880 			   uint64_t max_priv_data_size)
881 {
882 	uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
883 	struct kfd_criu_queue_priv_data *q_data;
884 	struct kfd_process_device *pdd;
885 	uint64_t q_extra_data_size;
886 	struct queue_properties qp;
887 	unsigned int queue_id;
888 	int ret = 0;
889 
890 	if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
891 		return -EINVAL;
892 
893 	q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
894 	if (!q_data)
895 		return -ENOMEM;
896 
897 	ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
898 	if (ret) {
899 		ret = -EFAULT;
900 		goto exit;
901 	}
902 
903 	*priv_data_offset += sizeof(*q_data);
904 	q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size;
905 
906 	if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
907 		ret = -EINVAL;
908 		goto exit;
909 	}
910 
911 	q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
912 	if (!q_extra_data) {
913 		ret = -ENOMEM;
914 		goto exit;
915 	}
916 
917 	ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
918 	if (ret) {
919 		ret = -EFAULT;
920 		goto exit;
921 	}
922 
923 	*priv_data_offset += q_extra_data_size;
924 
925 	pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
926 	if (!pdd) {
927 		pr_err("Failed to get pdd\n");
928 		ret = -EINVAL;
929 		goto exit;
930 	}
931 
932 	if (!pdd->doorbell_index &&
933 	    kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) {
934 		ret = -ENOMEM;
935 		goto exit;
936 	}
937 
938 	/* data stored in this order: mqd, ctl_stack */
939 	mqd = q_extra_data;
940 	ctl_stack = mqd + q_data->mqd_size;
941 
942 	memset(&qp, 0, sizeof(qp));
943 	set_queue_properties_from_criu(&qp, q_data);
944 
945 	print_queue_properties(&qp);
946 
947 	ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, NULL, q_data, mqd, ctl_stack,
948 				NULL);
949 	if (ret) {
950 		pr_err("Failed to create new queue err:%d\n", ret);
951 		goto exit;
952 	}
953 
954 	if (q_data->gws)
955 		ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
956 
957 exit:
958 	if (ret)
959 		pr_err("Failed to restore queue (%d)\n", ret);
960 	else
961 		pr_debug("Queue id %d was restored successfully\n", queue_id);
962 
963 	kfree(q_data);
964 
965 	return ret;
966 }
967 
968 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
969 				  unsigned int qid,
970 				  uint32_t *mqd_size,
971 				  uint32_t *ctl_stack_size)
972 {
973 	struct process_queue_node *pqn;
974 
975 	pqn = get_queue_by_qid(pqm, qid);
976 	if (!pqn) {
977 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
978 		return -EFAULT;
979 	}
980 
981 	if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
982 		pr_err("amdkfd: queue dumping not supported on this device\n");
983 		return -EOPNOTSUPP;
984 	}
985 
986 	pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
987 						       pqn->q, mqd_size,
988 						       ctl_stack_size);
989 	return 0;
990 }
991 
992 #if defined(CONFIG_DEBUG_FS)
993 
994 int pqm_debugfs_mqds(struct seq_file *m, void *data)
995 {
996 	struct process_queue_manager *pqm = data;
997 	struct process_queue_node *pqn;
998 	struct queue *q;
999 	enum KFD_MQD_TYPE mqd_type;
1000 	struct mqd_manager *mqd_mgr;
1001 	int r = 0, xcc, num_xccs = 1;
1002 	void *mqd;
1003 	uint64_t size = 0;
1004 
1005 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
1006 		if (pqn->q) {
1007 			q = pqn->q;
1008 			switch (q->properties.type) {
1009 			case KFD_QUEUE_TYPE_SDMA:
1010 			case KFD_QUEUE_TYPE_SDMA_XGMI:
1011 				seq_printf(m, "  SDMA queue on device %x\n",
1012 					   q->device->id);
1013 				mqd_type = KFD_MQD_TYPE_SDMA;
1014 				break;
1015 			case KFD_QUEUE_TYPE_COMPUTE:
1016 				seq_printf(m, "  Compute queue on device %x\n",
1017 					   q->device->id);
1018 				mqd_type = KFD_MQD_TYPE_CP;
1019 				num_xccs = NUM_XCC(q->device->xcc_mask);
1020 				break;
1021 			default:
1022 				seq_printf(m,
1023 				"  Bad user queue type %d on device %x\n",
1024 					   q->properties.type, q->device->id);
1025 				continue;
1026 			}
1027 			mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
1028 			size = mqd_mgr->mqd_stride(mqd_mgr,
1029 							&q->properties);
1030 		} else if (pqn->kq) {
1031 			q = pqn->kq->queue;
1032 			mqd_mgr = pqn->kq->mqd_mgr;
1033 			switch (q->properties.type) {
1034 			case KFD_QUEUE_TYPE_DIQ:
1035 				seq_printf(m, "  DIQ on device %x\n",
1036 					   pqn->kq->dev->id);
1037 				break;
1038 			default:
1039 				seq_printf(m,
1040 				"  Bad kernel queue type %d on device %x\n",
1041 					   q->properties.type,
1042 					   pqn->kq->dev->id);
1043 				continue;
1044 			}
1045 		} else {
1046 			seq_printf(m,
1047 		"  Weird: Queue node with neither kernel nor user queue\n");
1048 			continue;
1049 		}
1050 
1051 		for (xcc = 0; xcc < num_xccs; xcc++) {
1052 			mqd = q->mqd + size * xcc;
1053 			r = mqd_mgr->debugfs_show_mqd(m, mqd);
1054 			if (r != 0)
1055 				break;
1056 		}
1057 	}
1058 
1059 	return r;
1060 }
1061 
1062 #endif
1063