xref: /openbmc/linux/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c (revision b694e3c604e999343258c49e574abd7be012e726)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_priv.h"
29 #include "kfd_kernel_queue.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_reset.h"
32 
get_queue_by_qid(struct process_queue_manager * pqm,unsigned int qid)33 static inline struct process_queue_node *get_queue_by_qid(
34 			struct process_queue_manager *pqm, unsigned int qid)
35 {
36 	struct process_queue_node *pqn;
37 
38 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
39 		if ((pqn->q && pqn->q->properties.queue_id == qid) ||
40 		    (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
41 			return pqn;
42 	}
43 
44 	return NULL;
45 }
46 
assign_queue_slot_by_qid(struct process_queue_manager * pqm,unsigned int qid)47 static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
48 				    unsigned int qid)
49 {
50 	if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
51 		return -EINVAL;
52 
53 	if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
54 		pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
55 		return -ENOSPC;
56 	}
57 
58 	return 0;
59 }
60 
find_available_queue_slot(struct process_queue_manager * pqm,unsigned int * qid)61 static int find_available_queue_slot(struct process_queue_manager *pqm,
62 					unsigned int *qid)
63 {
64 	unsigned long found;
65 
66 	found = find_first_zero_bit(pqm->queue_slot_bitmap,
67 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
68 
69 	pr_debug("The new slot id %lu\n", found);
70 
71 	if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
72 		pr_info("Cannot open more queues for process with pasid 0x%x\n",
73 				pqm->process->pasid);
74 		return -ENOMEM;
75 	}
76 
77 	set_bit(found, pqm->queue_slot_bitmap);
78 	*qid = found;
79 
80 	return 0;
81 }
82 
kfd_process_dequeue_from_device(struct kfd_process_device * pdd)83 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
84 {
85 	struct kfd_node *dev = pdd->dev;
86 
87 	if (pdd->already_dequeued)
88 		return;
89 	/* The MES context flush needs to filter out the case which the
90 	 * KFD process is created without setting up the MES context and
91 	 * queue for creating a compute queue.
92 	 */
93 	dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
94 	if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr &&
95 	    down_read_trylock(&dev->adev->reset_domain->sem)) {
96 		amdgpu_mes_flush_shader_debugger(dev->adev,
97 						 pdd->proc_ctx_gpu_addr);
98 		up_read(&dev->adev->reset_domain->sem);
99 	}
100 	pdd->already_dequeued = true;
101 }
102 
pqm_set_gws(struct process_queue_manager * pqm,unsigned int qid,void * gws)103 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
104 			void *gws)
105 {
106 	struct kfd_node *dev = NULL;
107 	struct process_queue_node *pqn;
108 	struct kfd_process_device *pdd;
109 	struct kgd_mem *mem = NULL;
110 	int ret;
111 
112 	pqn = get_queue_by_qid(pqm, qid);
113 	if (!pqn) {
114 		pr_err("Queue id does not match any known queue\n");
115 		return -EINVAL;
116 	}
117 
118 	if (pqn->q)
119 		dev = pqn->q->device;
120 	if (WARN_ON(!dev))
121 		return -ENODEV;
122 
123 	pdd = kfd_get_process_device_data(dev, pqm->process);
124 	if (!pdd) {
125 		pr_err("Process device data doesn't exist\n");
126 		return -EINVAL;
127 	}
128 
129 	/* Only allow one queue per process can have GWS assigned */
130 	if (gws && pdd->qpd.num_gws)
131 		return -EBUSY;
132 
133 	if (!gws && pdd->qpd.num_gws == 0)
134 		return -EINVAL;
135 
136 	if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) && !dev->kfd->shared_resources.enable_mes) {
137 		if (gws)
138 			ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
139 				gws, &mem);
140 		else
141 			ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
142 				pqn->q->gws);
143 		if (unlikely(ret))
144 			return ret;
145 		pqn->q->gws = mem;
146 	} else {
147 		/*
148 		 * Intentionally set GWS to a non-NULL value
149 		 * for devices that do not use GWS for global wave
150 		 * synchronization but require the formality
151 		 * of setting GWS for cooperative groups.
152 		 */
153 		pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
154 	}
155 
156 	pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
157 
158 	return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
159 							pqn->q, NULL);
160 }
161 
kfd_process_dequeue_from_all_devices(struct kfd_process * p)162 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
163 {
164 	int i;
165 
166 	for (i = 0; i < p->n_pdds; i++)
167 		kfd_process_dequeue_from_device(p->pdds[i]);
168 }
169 
pqm_init(struct process_queue_manager * pqm,struct kfd_process * p)170 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
171 {
172 	INIT_LIST_HEAD(&pqm->queues);
173 	pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
174 					       GFP_KERNEL);
175 	if (!pqm->queue_slot_bitmap)
176 		return -ENOMEM;
177 	pqm->process = p;
178 
179 	return 0;
180 }
181 
pqm_clean_queue_resource(struct process_queue_manager * pqm,struct process_queue_node * pqn)182 static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
183 				     struct process_queue_node *pqn)
184 {
185 	struct kfd_node *dev;
186 	struct kfd_process_device *pdd;
187 
188 	dev = pqn->q->device;
189 
190 	pdd = kfd_get_process_device_data(dev, pqm->process);
191 	if (!pdd) {
192 		pr_err("Process device data doesn't exist\n");
193 		return;
194 	}
195 
196 	if (pqn->q->gws) {
197 		if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
198 		    !dev->kfd->shared_resources.enable_mes)
199 			amdgpu_amdkfd_remove_gws_from_process(
200 				pqm->process->kgd_process_info, pqn->q->gws);
201 		pdd->qpd.num_gws = 0;
202 	}
203 
204 	if (dev->kfd->shared_resources.enable_mes) {
205 		amdgpu_amdkfd_free_gtt_mem(dev->adev, &pqn->q->gang_ctx_bo);
206 		if (pqn->q->wptr_bo)
207 			amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&pqn->q->wptr_bo);
208 	}
209 }
210 
pqm_uninit(struct process_queue_manager * pqm)211 void pqm_uninit(struct process_queue_manager *pqm)
212 {
213 	struct process_queue_node *pqn, *next;
214 
215 	list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
216 		if (pqn->q)
217 			pqm_clean_queue_resource(pqm, pqn);
218 
219 		kfd_procfs_del_queue(pqn->q);
220 		uninit_queue(pqn->q);
221 		list_del(&pqn->process_queue_list);
222 		kfree(pqn);
223 	}
224 
225 	bitmap_free(pqm->queue_slot_bitmap);
226 	pqm->queue_slot_bitmap = NULL;
227 }
228 
init_user_queue(struct process_queue_manager * pqm,struct kfd_node * dev,struct queue ** q,struct queue_properties * q_properties,struct file * f,struct amdgpu_bo * wptr_bo,unsigned int qid)229 static int init_user_queue(struct process_queue_manager *pqm,
230 				struct kfd_node *dev, struct queue **q,
231 				struct queue_properties *q_properties,
232 				struct file *f, struct amdgpu_bo *wptr_bo,
233 				unsigned int qid)
234 {
235 	int retval;
236 
237 	/* Doorbell initialized in user space*/
238 	q_properties->doorbell_ptr = NULL;
239 	q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
240 
241 	/* let DQM handle it*/
242 	q_properties->vmid = 0;
243 	q_properties->queue_id = qid;
244 
245 	retval = init_queue(q, q_properties);
246 	if (retval != 0)
247 		return retval;
248 
249 	(*q)->device = dev;
250 	(*q)->process = pqm->process;
251 
252 	if (dev->kfd->shared_resources.enable_mes) {
253 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
254 						AMDGPU_MES_GANG_CTX_SIZE,
255 						&(*q)->gang_ctx_bo,
256 						&(*q)->gang_ctx_gpu_addr,
257 						&(*q)->gang_ctx_cpu_ptr,
258 						false);
259 		if (retval) {
260 			pr_err("failed to allocate gang context bo\n");
261 			goto cleanup;
262 		}
263 		memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
264 		(*q)->wptr_bo = wptr_bo;
265 	}
266 
267 	pr_debug("PQM After init queue");
268 	return 0;
269 
270 cleanup:
271 	uninit_queue(*q);
272 	*q = NULL;
273 	return retval;
274 }
275 
pqm_create_queue(struct process_queue_manager * pqm,struct kfd_node * dev,struct file * f,struct queue_properties * properties,unsigned int * qid,struct amdgpu_bo * wptr_bo,const struct kfd_criu_queue_priv_data * q_data,const void * restore_mqd,const void * restore_ctl_stack,uint32_t * p_doorbell_offset_in_process)276 int pqm_create_queue(struct process_queue_manager *pqm,
277 			    struct kfd_node *dev,
278 			    struct file *f,
279 			    struct queue_properties *properties,
280 			    unsigned int *qid,
281 			    struct amdgpu_bo *wptr_bo,
282 			    const struct kfd_criu_queue_priv_data *q_data,
283 			    const void *restore_mqd,
284 			    const void *restore_ctl_stack,
285 			    uint32_t *p_doorbell_offset_in_process)
286 {
287 	int retval;
288 	struct kfd_process_device *pdd;
289 	struct queue *q;
290 	struct process_queue_node *pqn;
291 	struct kernel_queue *kq;
292 	enum kfd_queue_type type = properties->type;
293 	unsigned int max_queues = 127; /* HWS limit */
294 
295 	/*
296 	 * On GFX 9.4.3, increase the number of queues that
297 	 * can be created to 255. No HWS limit on GFX 9.4.3.
298 	 */
299 	if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))
300 		max_queues = 255;
301 
302 	q = NULL;
303 	kq = NULL;
304 
305 	pdd = kfd_get_process_device_data(dev, pqm->process);
306 	if (!pdd) {
307 		pr_err("Process device data doesn't exist\n");
308 		return -1;
309 	}
310 
311 	/*
312 	 * for debug process, verify that it is within the static queues limit
313 	 * currently limit is set to half of the total avail HQD slots
314 	 * If we are just about to create DIQ, the is_debug flag is not set yet
315 	 * Hence we also check the type as well
316 	 */
317 	if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
318 		max_queues = dev->kfd->device_info.max_no_of_hqd/2;
319 
320 	if (pdd->qpd.queue_count >= max_queues)
321 		return -ENOSPC;
322 
323 	if (q_data) {
324 		retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
325 		*qid = q_data->q_id;
326 	} else
327 		retval = find_available_queue_slot(pqm, qid);
328 
329 	if (retval != 0)
330 		return retval;
331 
332 	/* Register process if this is the first queue */
333 	if (list_empty(&pdd->qpd.queues_list) &&
334 	    list_empty(&pdd->qpd.priv_queue_list))
335 		dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
336 
337 	/* Allocate proc_ctx_bo only if MES is enabled and this is the first queue */
338 	if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) {
339 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
340 						     AMDGPU_MES_PROC_CTX_SIZE,
341 						     &pdd->proc_ctx_bo,
342 						     &pdd->proc_ctx_gpu_addr,
343 						     &pdd->proc_ctx_cpu_ptr,
344 						     false);
345 		if (retval) {
346 			dev_err(dev->adev->dev, "failed to allocate process context bo\n");
347 			return retval;
348 		}
349 		memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
350 	}
351 
352 	pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
353 	if (!pqn) {
354 		retval = -ENOMEM;
355 		goto err_allocate_pqn;
356 	}
357 
358 	switch (type) {
359 	case KFD_QUEUE_TYPE_SDMA:
360 	case KFD_QUEUE_TYPE_SDMA_XGMI:
361 		/* SDMA queues are always allocated statically no matter
362 		 * which scheduler mode is used. We also do not need to
363 		 * check whether a SDMA queue can be allocated here, because
364 		 * allocate_sdma_queue() in create_queue() has the
365 		 * corresponding check logic.
366 		 */
367 		retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
368 		if (retval != 0)
369 			goto err_create_queue;
370 		pqn->q = q;
371 		pqn->kq = NULL;
372 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
373 						    restore_mqd, restore_ctl_stack);
374 		print_queue(q);
375 		break;
376 
377 	case KFD_QUEUE_TYPE_COMPUTE:
378 		/* check if there is over subscription */
379 		if ((dev->dqm->sched_policy ==
380 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
381 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
382 		(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
383 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
384 			retval = -EPERM;
385 			goto err_create_queue;
386 		}
387 
388 		retval = init_user_queue(pqm, dev, &q, properties, f, wptr_bo, *qid);
389 		if (retval != 0)
390 			goto err_create_queue;
391 		pqn->q = q;
392 		pqn->kq = NULL;
393 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
394 						    restore_mqd, restore_ctl_stack);
395 		print_queue(q);
396 		break;
397 	case KFD_QUEUE_TYPE_DIQ:
398 		kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
399 		if (!kq) {
400 			retval = -ENOMEM;
401 			goto err_create_queue;
402 		}
403 		kq->queue->properties.queue_id = *qid;
404 		pqn->kq = kq;
405 		pqn->q = NULL;
406 		retval = kfd_process_drain_interrupts(pdd);
407 		if (retval)
408 			break;
409 
410 		retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
411 							kq, &pdd->qpd);
412 		break;
413 	default:
414 		WARN(1, "Invalid queue type %d", type);
415 		retval = -EINVAL;
416 	}
417 
418 	if (retval != 0) {
419 		pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
420 			pqm->process->pasid, type, retval);
421 		goto err_create_queue;
422 	}
423 
424 	if (q && p_doorbell_offset_in_process) {
425 		/* Return the doorbell offset within the doorbell page
426 		 * to the caller so it can be passed up to user mode
427 		 * (in bytes).
428 		 * relative doorbell index = Absolute doorbell index -
429 		 * absolute index of first doorbell in the page.
430 		 */
431 		uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev,
432 								       pdd->qpd.proc_doorbells,
433 								       0,
434 								       pdd->dev->kfd->device_info.doorbell_size);
435 
436 		*p_doorbell_offset_in_process = (q->properties.doorbell_off
437 						- first_db_index) * sizeof(uint32_t);
438 	}
439 
440 	pr_debug("PQM After DQM create queue\n");
441 
442 	list_add(&pqn->process_queue_list, &pqm->queues);
443 
444 	if (q) {
445 		pr_debug("PQM done creating queue\n");
446 		kfd_procfs_add_queue(q);
447 		print_queue_properties(&q->properties);
448 	}
449 
450 	return retval;
451 
452 err_create_queue:
453 	uninit_queue(q);
454 	if (kq)
455 		kernel_queue_uninit(kq, false);
456 	kfree(pqn);
457 err_allocate_pqn:
458 	/* check if queues list is empty unregister process from device */
459 	clear_bit(*qid, pqm->queue_slot_bitmap);
460 	if (list_empty(&pdd->qpd.queues_list) &&
461 	    list_empty(&pdd->qpd.priv_queue_list))
462 		dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
463 	return retval;
464 }
465 
pqm_destroy_queue(struct process_queue_manager * pqm,unsigned int qid)466 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
467 {
468 	struct process_queue_node *pqn;
469 	struct kfd_process_device *pdd;
470 	struct device_queue_manager *dqm;
471 	struct kfd_node *dev;
472 	int retval;
473 
474 	dqm = NULL;
475 
476 	retval = 0;
477 
478 	pqn = get_queue_by_qid(pqm, qid);
479 	if (!pqn) {
480 		pr_err("Queue id does not match any known queue\n");
481 		return -EINVAL;
482 	}
483 
484 	dev = NULL;
485 	if (pqn->kq)
486 		dev = pqn->kq->dev;
487 	if (pqn->q)
488 		dev = pqn->q->device;
489 	if (WARN_ON(!dev))
490 		return -ENODEV;
491 
492 	pdd = kfd_get_process_device_data(dev, pqm->process);
493 	if (!pdd) {
494 		pr_err("Process device data doesn't exist\n");
495 		return -1;
496 	}
497 
498 	if (pqn->kq) {
499 		/* destroy kernel queue (DIQ) */
500 		dqm = pqn->kq->dev->dqm;
501 		dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
502 		kernel_queue_uninit(pqn->kq, false);
503 	}
504 
505 	if (pqn->q) {
506 		kfd_procfs_del_queue(pqn->q);
507 		dqm = pqn->q->device->dqm;
508 		retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
509 		if (retval) {
510 			pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
511 				pqm->process->pasid,
512 				pqn->q->properties.queue_id, retval);
513 			if (retval != -ETIME)
514 				goto err_destroy_queue;
515 		}
516 
517 		pqm_clean_queue_resource(pqm, pqn);
518 		uninit_queue(pqn->q);
519 	}
520 
521 	list_del(&pqn->process_queue_list);
522 	kfree(pqn);
523 	clear_bit(qid, pqm->queue_slot_bitmap);
524 
525 	if (list_empty(&pdd->qpd.queues_list) &&
526 	    list_empty(&pdd->qpd.priv_queue_list))
527 		dqm->ops.unregister_process(dqm, &pdd->qpd);
528 
529 err_destroy_queue:
530 	return retval;
531 }
532 
pqm_update_queue_properties(struct process_queue_manager * pqm,unsigned int qid,struct queue_properties * p)533 int pqm_update_queue_properties(struct process_queue_manager *pqm,
534 				unsigned int qid, struct queue_properties *p)
535 {
536 	int retval;
537 	struct process_queue_node *pqn;
538 
539 	pqn = get_queue_by_qid(pqm, qid);
540 	if (!pqn) {
541 		pr_debug("No queue %d exists for update operation\n", qid);
542 		return -EFAULT;
543 	}
544 
545 	pqn->q->properties.queue_address = p->queue_address;
546 	pqn->q->properties.queue_size = p->queue_size;
547 	pqn->q->properties.queue_percent = p->queue_percent;
548 	pqn->q->properties.priority = p->priority;
549 	pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
550 
551 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
552 							pqn->q, NULL);
553 	if (retval != 0)
554 		return retval;
555 
556 	return 0;
557 }
558 
pqm_update_mqd(struct process_queue_manager * pqm,unsigned int qid,struct mqd_update_info * minfo)559 int pqm_update_mqd(struct process_queue_manager *pqm,
560 				unsigned int qid, struct mqd_update_info *minfo)
561 {
562 	int retval;
563 	struct process_queue_node *pqn;
564 
565 	pqn = get_queue_by_qid(pqm, qid);
566 	if (!pqn) {
567 		pr_debug("No queue %d exists for update operation\n", qid);
568 		return -EFAULT;
569 	}
570 
571 	/* CUs are masked for debugger requirements so deny user mask  */
572 	if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
573 		return -EBUSY;
574 
575 	/* ASICs that have WGPs must enforce pairwise enabled mask checks. */
576 	if (minfo && minfo->cu_mask.ptr &&
577 			KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
578 		int i;
579 
580 		for (i = 0; i < minfo->cu_mask.count; i += 2) {
581 			uint32_t cu_pair = (minfo->cu_mask.ptr[i / 32] >> (i % 32)) & 0x3;
582 
583 			if (cu_pair && cu_pair != 0x3) {
584 				pr_debug("CUs must be adjacent pairwise enabled.\n");
585 				return -EINVAL;
586 			}
587 		}
588 	}
589 
590 	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
591 							pqn->q, minfo);
592 	if (retval != 0)
593 		return retval;
594 
595 	if (minfo && minfo->cu_mask.ptr)
596 		pqn->q->properties.is_user_cu_masked = true;
597 
598 	return 0;
599 }
600 
pqm_get_kernel_queue(struct process_queue_manager * pqm,unsigned int qid)601 struct kernel_queue *pqm_get_kernel_queue(
602 					struct process_queue_manager *pqm,
603 					unsigned int qid)
604 {
605 	struct process_queue_node *pqn;
606 
607 	pqn = get_queue_by_qid(pqm, qid);
608 	if (pqn && pqn->kq)
609 		return pqn->kq;
610 
611 	return NULL;
612 }
613 
pqm_get_user_queue(struct process_queue_manager * pqm,unsigned int qid)614 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
615 					unsigned int qid)
616 {
617 	struct process_queue_node *pqn;
618 
619 	pqn = get_queue_by_qid(pqm, qid);
620 	return pqn ? pqn->q : NULL;
621 }
622 
pqm_get_wave_state(struct process_queue_manager * pqm,unsigned int qid,void __user * ctl_stack,u32 * ctl_stack_used_size,u32 * save_area_used_size)623 int pqm_get_wave_state(struct process_queue_manager *pqm,
624 		       unsigned int qid,
625 		       void __user *ctl_stack,
626 		       u32 *ctl_stack_used_size,
627 		       u32 *save_area_used_size)
628 {
629 	struct process_queue_node *pqn;
630 
631 	pqn = get_queue_by_qid(pqm, qid);
632 	if (!pqn) {
633 		pr_debug("amdkfd: No queue %d exists for operation\n",
634 			 qid);
635 		return -EFAULT;
636 	}
637 
638 	return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
639 						       pqn->q,
640 						       ctl_stack,
641 						       ctl_stack_used_size,
642 						       save_area_used_size);
643 }
644 
pqm_get_queue_snapshot(struct process_queue_manager * pqm,uint64_t exception_clear_mask,void __user * buf,int * num_qss_entries,uint32_t * entry_size)645 int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
646 			   uint64_t exception_clear_mask,
647 			   void __user *buf,
648 			   int *num_qss_entries,
649 			   uint32_t *entry_size)
650 {
651 	struct process_queue_node *pqn;
652 	struct kfd_queue_snapshot_entry src;
653 	uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
654 	int r = 0;
655 
656 	*num_qss_entries = 0;
657 	if (!(*entry_size))
658 		return -EINVAL;
659 
660 	*entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
661 	mutex_lock(&pqm->process->event_mutex);
662 
663 	memset(&src, 0, sizeof(src));
664 
665 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
666 		if (!pqn->q)
667 			continue;
668 
669 		if (*num_qss_entries < tmp_qss_entries) {
670 			set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
671 
672 			if (copy_to_user(buf, &src, *entry_size)) {
673 				r = -EFAULT;
674 				break;
675 			}
676 			buf += tmp_entry_size;
677 		}
678 		*num_qss_entries += 1;
679 	}
680 
681 	mutex_unlock(&pqm->process->event_mutex);
682 	return r;
683 }
684 
get_queue_data_sizes(struct kfd_process_device * pdd,struct queue * q,uint32_t * mqd_size,uint32_t * ctl_stack_size)685 static int get_queue_data_sizes(struct kfd_process_device *pdd,
686 				struct queue *q,
687 				uint32_t *mqd_size,
688 				uint32_t *ctl_stack_size)
689 {
690 	int ret;
691 
692 	ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
693 					    q->properties.queue_id,
694 					    mqd_size,
695 					    ctl_stack_size);
696 	if (ret)
697 		pr_err("Failed to get queue dump info (%d)\n", ret);
698 
699 	return ret;
700 }
701 
kfd_process_get_queue_info(struct kfd_process * p,uint32_t * num_queues,uint64_t * priv_data_sizes)702 int kfd_process_get_queue_info(struct kfd_process *p,
703 			       uint32_t *num_queues,
704 			       uint64_t *priv_data_sizes)
705 {
706 	uint32_t extra_data_sizes = 0;
707 	struct queue *q;
708 	int i;
709 	int ret;
710 
711 	*num_queues = 0;
712 
713 	/* Run over all PDDs of the process */
714 	for (i = 0; i < p->n_pdds; i++) {
715 		struct kfd_process_device *pdd = p->pdds[i];
716 
717 		list_for_each_entry(q, &pdd->qpd.queues_list, list) {
718 			if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
719 				q->properties.type == KFD_QUEUE_TYPE_SDMA ||
720 				q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
721 				uint32_t mqd_size, ctl_stack_size;
722 
723 				*num_queues = *num_queues + 1;
724 
725 				ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
726 				if (ret)
727 					return ret;
728 
729 				extra_data_sizes += mqd_size + ctl_stack_size;
730 			} else {
731 				pr_err("Unsupported queue type (%d)\n", q->properties.type);
732 				return -EOPNOTSUPP;
733 			}
734 		}
735 	}
736 	*priv_data_sizes = extra_data_sizes +
737 				(*num_queues * sizeof(struct kfd_criu_queue_priv_data));
738 
739 	return 0;
740 }
741 
pqm_checkpoint_mqd(struct process_queue_manager * pqm,unsigned int qid,void * mqd,void * ctl_stack)742 static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
743 			      unsigned int qid,
744 			      void *mqd,
745 			      void *ctl_stack)
746 {
747 	struct process_queue_node *pqn;
748 
749 	pqn = get_queue_by_qid(pqm, qid);
750 	if (!pqn) {
751 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
752 		return -EFAULT;
753 	}
754 
755 	if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
756 		pr_err("amdkfd: queue dumping not supported on this device\n");
757 		return -EOPNOTSUPP;
758 	}
759 
760 	return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
761 						       pqn->q, mqd, ctl_stack);
762 }
763 
criu_checkpoint_queue(struct kfd_process_device * pdd,struct queue * q,struct kfd_criu_queue_priv_data * q_data)764 static int criu_checkpoint_queue(struct kfd_process_device *pdd,
765 			   struct queue *q,
766 			   struct kfd_criu_queue_priv_data *q_data)
767 {
768 	uint8_t *mqd, *ctl_stack;
769 	int ret;
770 
771 	mqd = (void *)(q_data + 1);
772 	ctl_stack = mqd + q_data->mqd_size;
773 
774 	q_data->gpu_id = pdd->user_gpu_id;
775 	q_data->type = q->properties.type;
776 	q_data->format = q->properties.format;
777 	q_data->q_id =  q->properties.queue_id;
778 	q_data->q_address = q->properties.queue_address;
779 	q_data->q_size = q->properties.queue_size;
780 	q_data->priority = q->properties.priority;
781 	q_data->q_percent = q->properties.queue_percent;
782 	q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
783 	q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
784 	q_data->doorbell_id = q->doorbell_id;
785 
786 	q_data->sdma_id = q->sdma_id;
787 
788 	q_data->eop_ring_buffer_address =
789 		q->properties.eop_ring_buffer_address;
790 
791 	q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
792 
793 	q_data->ctx_save_restore_area_address =
794 		q->properties.ctx_save_restore_area_address;
795 
796 	q_data->ctx_save_restore_area_size =
797 		q->properties.ctx_save_restore_area_size;
798 
799 	q_data->gws = !!q->gws;
800 
801 	ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
802 	if (ret) {
803 		pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
804 		return ret;
805 	}
806 
807 	pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
808 	return ret;
809 }
810 
criu_checkpoint_queues_device(struct kfd_process_device * pdd,uint8_t __user * user_priv,unsigned int * q_index,uint64_t * queues_priv_data_offset)811 static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
812 				   uint8_t __user *user_priv,
813 				   unsigned int *q_index,
814 				   uint64_t *queues_priv_data_offset)
815 {
816 	unsigned int q_private_data_size = 0;
817 	uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
818 	struct queue *q;
819 	int ret = 0;
820 
821 	list_for_each_entry(q, &pdd->qpd.queues_list, list) {
822 		struct kfd_criu_queue_priv_data *q_data;
823 		uint64_t q_data_size;
824 		uint32_t mqd_size;
825 		uint32_t ctl_stack_size;
826 
827 		if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
828 			q->properties.type != KFD_QUEUE_TYPE_SDMA &&
829 			q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
830 
831 			pr_err("Unsupported queue type (%d)\n", q->properties.type);
832 			ret = -EOPNOTSUPP;
833 			break;
834 		}
835 
836 		ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
837 		if (ret)
838 			break;
839 
840 		q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
841 
842 		/* Increase local buffer space if needed */
843 		if (q_private_data_size < q_data_size) {
844 			kfree(q_private_data);
845 
846 			q_private_data = kzalloc(q_data_size, GFP_KERNEL);
847 			if (!q_private_data) {
848 				ret = -ENOMEM;
849 				break;
850 			}
851 			q_private_data_size = q_data_size;
852 		}
853 
854 		q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
855 
856 		/* data stored in this order: priv_data, mqd, ctl_stack */
857 		q_data->mqd_size = mqd_size;
858 		q_data->ctl_stack_size = ctl_stack_size;
859 
860 		ret = criu_checkpoint_queue(pdd, q, q_data);
861 		if (ret)
862 			break;
863 
864 		q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
865 
866 		ret = copy_to_user(user_priv + *queues_priv_data_offset,
867 				q_data, q_data_size);
868 		if (ret) {
869 			ret = -EFAULT;
870 			break;
871 		}
872 		*queues_priv_data_offset += q_data_size;
873 		*q_index = *q_index + 1;
874 	}
875 
876 	kfree(q_private_data);
877 
878 	return ret;
879 }
880 
kfd_criu_checkpoint_queues(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_data_offset)881 int kfd_criu_checkpoint_queues(struct kfd_process *p,
882 			 uint8_t __user *user_priv_data,
883 			 uint64_t *priv_data_offset)
884 {
885 	int ret = 0, pdd_index, q_index = 0;
886 
887 	for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
888 		struct kfd_process_device *pdd = p->pdds[pdd_index];
889 
890 		/*
891 		 * criu_checkpoint_queues_device will copy data to user and update q_index and
892 		 * queues_priv_data_offset
893 		 */
894 		ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
895 					      priv_data_offset);
896 
897 		if (ret)
898 			break;
899 	}
900 
901 	return ret;
902 }
903 
set_queue_properties_from_criu(struct queue_properties * qp,struct kfd_criu_queue_priv_data * q_data)904 static void set_queue_properties_from_criu(struct queue_properties *qp,
905 					  struct kfd_criu_queue_priv_data *q_data)
906 {
907 	qp->is_interop = false;
908 	qp->queue_percent = q_data->q_percent;
909 	qp->priority = q_data->priority;
910 	qp->queue_address = q_data->q_address;
911 	qp->queue_size = q_data->q_size;
912 	qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
913 	qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
914 	qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
915 	qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
916 	qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
917 	qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
918 	qp->ctl_stack_size = q_data->ctl_stack_size;
919 	qp->type = q_data->type;
920 	qp->format = q_data->format;
921 }
922 
kfd_criu_restore_queue(struct kfd_process * p,uint8_t __user * user_priv_ptr,uint64_t * priv_data_offset,uint64_t max_priv_data_size)923 int kfd_criu_restore_queue(struct kfd_process *p,
924 			   uint8_t __user *user_priv_ptr,
925 			   uint64_t *priv_data_offset,
926 			   uint64_t max_priv_data_size)
927 {
928 	uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
929 	struct kfd_criu_queue_priv_data *q_data;
930 	struct kfd_process_device *pdd;
931 	uint64_t q_extra_data_size;
932 	struct queue_properties qp;
933 	unsigned int queue_id;
934 	int ret = 0;
935 
936 	if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
937 		return -EINVAL;
938 
939 	q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
940 	if (!q_data)
941 		return -ENOMEM;
942 
943 	ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
944 	if (ret) {
945 		ret = -EFAULT;
946 		goto exit;
947 	}
948 
949 	*priv_data_offset += sizeof(*q_data);
950 	q_extra_data_size = (uint64_t)q_data->ctl_stack_size + q_data->mqd_size;
951 
952 	if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
953 		ret = -EINVAL;
954 		goto exit;
955 	}
956 
957 	q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
958 	if (!q_extra_data) {
959 		ret = -ENOMEM;
960 		goto exit;
961 	}
962 
963 	ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
964 	if (ret) {
965 		ret = -EFAULT;
966 		goto exit;
967 	}
968 
969 	*priv_data_offset += q_extra_data_size;
970 
971 	pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
972 	if (!pdd) {
973 		pr_err("Failed to get pdd\n");
974 		ret = -EINVAL;
975 		goto exit;
976 	}
977 
978 	/* data stored in this order: mqd, ctl_stack */
979 	mqd = q_extra_data;
980 	ctl_stack = mqd + q_data->mqd_size;
981 
982 	memset(&qp, 0, sizeof(qp));
983 	set_queue_properties_from_criu(&qp, q_data);
984 
985 	print_queue_properties(&qp);
986 
987 	ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, NULL, q_data, mqd, ctl_stack,
988 				NULL);
989 	if (ret) {
990 		pr_err("Failed to create new queue err:%d\n", ret);
991 		goto exit;
992 	}
993 
994 	if (q_data->gws)
995 		ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
996 
997 exit:
998 	if (ret)
999 		pr_err("Failed to restore queue (%d)\n", ret);
1000 	else
1001 		pr_debug("Queue id %d was restored successfully\n", queue_id);
1002 
1003 	kfree(q_data);
1004 	kfree(q_extra_data);
1005 
1006 	return ret;
1007 }
1008 
pqm_get_queue_checkpoint_info(struct process_queue_manager * pqm,unsigned int qid,uint32_t * mqd_size,uint32_t * ctl_stack_size)1009 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
1010 				  unsigned int qid,
1011 				  uint32_t *mqd_size,
1012 				  uint32_t *ctl_stack_size)
1013 {
1014 	struct process_queue_node *pqn;
1015 
1016 	pqn = get_queue_by_qid(pqm, qid);
1017 	if (!pqn) {
1018 		pr_debug("amdkfd: No queue %d exists for operation\n", qid);
1019 		return -EFAULT;
1020 	}
1021 
1022 	if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
1023 		pr_err("amdkfd: queue dumping not supported on this device\n");
1024 		return -EOPNOTSUPP;
1025 	}
1026 
1027 	pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
1028 						       pqn->q, mqd_size,
1029 						       ctl_stack_size);
1030 	return 0;
1031 }
1032 
1033 #if defined(CONFIG_DEBUG_FS)
1034 
pqm_debugfs_mqds(struct seq_file * m,void * data)1035 int pqm_debugfs_mqds(struct seq_file *m, void *data)
1036 {
1037 	struct process_queue_manager *pqm = data;
1038 	struct process_queue_node *pqn;
1039 	struct queue *q;
1040 	enum KFD_MQD_TYPE mqd_type;
1041 	struct mqd_manager *mqd_mgr;
1042 	int r = 0, xcc, num_xccs = 1;
1043 	void *mqd;
1044 	uint64_t size = 0;
1045 
1046 	list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
1047 		if (pqn->q) {
1048 			q = pqn->q;
1049 			switch (q->properties.type) {
1050 			case KFD_QUEUE_TYPE_SDMA:
1051 			case KFD_QUEUE_TYPE_SDMA_XGMI:
1052 				seq_printf(m, "  SDMA queue on device %x\n",
1053 					   q->device->id);
1054 				mqd_type = KFD_MQD_TYPE_SDMA;
1055 				break;
1056 			case KFD_QUEUE_TYPE_COMPUTE:
1057 				seq_printf(m, "  Compute queue on device %x\n",
1058 					   q->device->id);
1059 				mqd_type = KFD_MQD_TYPE_CP;
1060 				num_xccs = NUM_XCC(q->device->xcc_mask);
1061 				break;
1062 			default:
1063 				seq_printf(m,
1064 				"  Bad user queue type %d on device %x\n",
1065 					   q->properties.type, q->device->id);
1066 				continue;
1067 			}
1068 			mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
1069 			size = mqd_mgr->mqd_stride(mqd_mgr,
1070 							&q->properties);
1071 		} else if (pqn->kq) {
1072 			q = pqn->kq->queue;
1073 			mqd_mgr = pqn->kq->mqd_mgr;
1074 			switch (q->properties.type) {
1075 			case KFD_QUEUE_TYPE_DIQ:
1076 				seq_printf(m, "  DIQ on device %x\n",
1077 					   pqn->kq->dev->id);
1078 				break;
1079 			default:
1080 				seq_printf(m,
1081 				"  Bad kernel queue type %d on device %x\n",
1082 					   q->properties.type,
1083 					   pqn->kq->dev->id);
1084 				continue;
1085 			}
1086 		} else {
1087 			seq_printf(m,
1088 		"  Weird: Queue node with neither kernel nor user queue\n");
1089 			continue;
1090 		}
1091 
1092 		for (xcc = 0; xcc < num_xccs; xcc++) {
1093 			mqd = q->mqd + size * xcc;
1094 			r = mqd_mgr->debugfs_show_mqd(m, mqd);
1095 			if (r != 0)
1096 				break;
1097 		}
1098 	}
1099 
1100 	return r;
1101 }
1102 
1103 #endif
1104