xref: /openbmc/linux/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c (revision ed4543328f7108e1047b83b96ca7f7208747d930)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/mutex.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_kernel_queue.h"
29 #include "kfd_priv.h"
30 
inc_wptr(unsigned int * wptr,unsigned int increment_bytes,unsigned int buffer_size_bytes)31 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
32 				unsigned int buffer_size_bytes)
33 {
34 	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
35 
36 	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
37 	     "Runlist IB overflow");
38 	*wptr = temp;
39 }
40 
pm_calc_rlib_size(struct packet_manager * pm,unsigned int * rlib_size,bool * over_subscription)41 static void pm_calc_rlib_size(struct packet_manager *pm,
42 				unsigned int *rlib_size,
43 				bool *over_subscription)
44 {
45 	unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
46 	unsigned int map_queue_size;
47 	unsigned int max_proc_per_quantum = 1;
48 	struct kfd_node *node = pm->dqm->dev;
49 	struct device *dev = node->adev->dev;
50 
51 	process_count = pm->dqm->processes_count;
52 	queue_count = pm->dqm->active_queue_count;
53 	compute_queue_count = pm->dqm->active_cp_queue_count;
54 	gws_queue_count = pm->dqm->gws_queue_count;
55 
56 	/* check if there is over subscription
57 	 * Note: the arbitration between the number of VMIDs and
58 	 * hws_max_conc_proc has been done in
59 	 * kgd2kfd_device_init().
60 	 */
61 	*over_subscription = false;
62 
63 	if (node->max_proc_per_quantum > 1)
64 		max_proc_per_quantum = node->max_proc_per_quantum;
65 
66 	if ((process_count > max_proc_per_quantum) ||
67 	    compute_queue_count > get_cp_queues_num(pm->dqm) ||
68 	    gws_queue_count > 1) {
69 		*over_subscription = true;
70 		dev_dbg(dev, "Over subscribed runlist\n");
71 	}
72 
73 	map_queue_size = pm->pmf->map_queues_size;
74 	/* calculate run list ib allocation size */
75 	*rlib_size = process_count * pm->pmf->map_process_size +
76 		     queue_count * map_queue_size;
77 
78 	/*
79 	 * Increase the allocation size in case we need a chained run list
80 	 * when over subscription
81 	 */
82 	if (*over_subscription)
83 		*rlib_size += pm->pmf->runlist_size;
84 
85 	dev_dbg(dev, "runlist ib size %d\n", *rlib_size);
86 }
87 
pm_allocate_runlist_ib(struct packet_manager * pm,unsigned int ** rl_buffer,uint64_t * rl_gpu_buffer,unsigned int * rl_buffer_size,bool * is_over_subscription)88 static int pm_allocate_runlist_ib(struct packet_manager *pm,
89 				unsigned int **rl_buffer,
90 				uint64_t *rl_gpu_buffer,
91 				unsigned int *rl_buffer_size,
92 				bool *is_over_subscription)
93 {
94 	struct kfd_node *node = pm->dqm->dev;
95 	struct device *dev = node->adev->dev;
96 	int retval;
97 
98 	if (WARN_ON(pm->allocated))
99 		return -EINVAL;
100 
101 	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
102 
103 	mutex_lock(&pm->lock);
104 
105 	retval = kfd_gtt_sa_allocate(node, *rl_buffer_size, &pm->ib_buffer_obj);
106 
107 	if (retval) {
108 		dev_err(dev, "Failed to allocate runlist IB\n");
109 		goto out;
110 	}
111 
112 	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
113 	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
114 
115 	memset(*rl_buffer, 0, *rl_buffer_size);
116 	pm->allocated = true;
117 
118 out:
119 	mutex_unlock(&pm->lock);
120 	return retval;
121 }
122 
pm_create_runlist_ib(struct packet_manager * pm,struct list_head * queues,uint64_t * rl_gpu_addr,size_t * rl_size_bytes)123 static int pm_create_runlist_ib(struct packet_manager *pm,
124 				struct list_head *queues,
125 				uint64_t *rl_gpu_addr,
126 				size_t *rl_size_bytes)
127 {
128 	unsigned int alloc_size_bytes;
129 	unsigned int *rl_buffer, rl_wptr, i;
130 	struct kfd_node *node = pm->dqm->dev;
131 	struct device *dev = node->adev->dev;
132 	int retval, processes_mapped;
133 	struct device_process_node *cur;
134 	struct qcm_process_device *qpd;
135 	struct queue *q;
136 	struct kernel_queue *kq;
137 	bool is_over_subscription;
138 
139 	rl_wptr = retval = processes_mapped = 0;
140 
141 	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
142 				&alloc_size_bytes, &is_over_subscription);
143 	if (retval)
144 		return retval;
145 
146 	*rl_size_bytes = alloc_size_bytes;
147 	pm->ib_size_bytes = alloc_size_bytes;
148 
149 	dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
150 		pm->dqm->processes_count, pm->dqm->active_queue_count);
151 
152 	/* build the run list ib packet */
153 	list_for_each_entry(cur, queues, list) {
154 		qpd = cur->qpd;
155 		/* build map process packet */
156 		if (processes_mapped >= pm->dqm->processes_count) {
157 			dev_dbg(dev, "Not enough space left in runlist IB\n");
158 			pm_release_ib(pm);
159 			return -ENOMEM;
160 		}
161 
162 		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
163 		if (retval)
164 			return retval;
165 
166 		processes_mapped++;
167 		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
168 				alloc_size_bytes);
169 
170 		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
171 			if (!kq->queue->properties.is_active)
172 				continue;
173 
174 			dev_dbg(dev,
175 				"static_queue, mapping kernel q %d, is debug status %d\n",
176 				kq->queue->queue, qpd->is_debug);
177 
178 			retval = pm->pmf->map_queues(pm,
179 						&rl_buffer[rl_wptr],
180 						kq->queue,
181 						qpd->is_debug);
182 			if (retval)
183 				return retval;
184 
185 			inc_wptr(&rl_wptr,
186 				pm->pmf->map_queues_size,
187 				alloc_size_bytes);
188 		}
189 
190 		list_for_each_entry(q, &qpd->queues_list, list) {
191 			if (!q->properties.is_active)
192 				continue;
193 
194 			dev_dbg(dev,
195 				"static_queue, mapping user queue %d, is debug status %d\n",
196 				q->queue, qpd->is_debug);
197 
198 			retval = pm->pmf->map_queues(pm,
199 						&rl_buffer[rl_wptr],
200 						q,
201 						qpd->is_debug);
202 
203 			if (retval)
204 				return retval;
205 
206 			inc_wptr(&rl_wptr,
207 				pm->pmf->map_queues_size,
208 				alloc_size_bytes);
209 		}
210 	}
211 
212 	dev_dbg(dev, "Finished map process and queues to runlist\n");
213 
214 	if (is_over_subscription) {
215 		if (!pm->is_over_subscription)
216 			dev_warn(
217 				dev,
218 				"Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
219 		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
220 					*rl_gpu_addr,
221 					alloc_size_bytes / sizeof(uint32_t),
222 					true);
223 	}
224 	pm->is_over_subscription = is_over_subscription;
225 
226 	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
227 		pr_debug("0x%2X ", rl_buffer[i]);
228 	pr_debug("\n");
229 
230 	return retval;
231 }
232 
pm_init(struct packet_manager * pm,struct device_queue_manager * dqm)233 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
234 {
235 	switch (dqm->dev->adev->asic_type) {
236 	case CHIP_KAVERI:
237 	case CHIP_HAWAII:
238 		/* PM4 packet structures on CIK are the same as on VI */
239 	case CHIP_CARRIZO:
240 	case CHIP_TONGA:
241 	case CHIP_FIJI:
242 	case CHIP_POLARIS10:
243 	case CHIP_POLARIS11:
244 	case CHIP_POLARIS12:
245 	case CHIP_VEGAM:
246 		pm->pmf = &kfd_vi_pm_funcs;
247 		break;
248 	default:
249 		if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) ||
250 		    KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))
251 			pm->pmf = &kfd_aldebaran_pm_funcs;
252 		else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
253 			pm->pmf = &kfd_v9_pm_funcs;
254 		else {
255 			WARN(1, "Unexpected ASIC family %u",
256 			     dqm->dev->adev->asic_type);
257 			return -EINVAL;
258 		}
259 	}
260 
261 	pm->dqm = dqm;
262 	mutex_init(&pm->lock);
263 	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
264 	if (!pm->priv_queue) {
265 		mutex_destroy(&pm->lock);
266 		return -ENOMEM;
267 	}
268 	pm->allocated = false;
269 
270 	return 0;
271 }
272 
pm_uninit(struct packet_manager * pm,bool hanging)273 void pm_uninit(struct packet_manager *pm, bool hanging)
274 {
275 	mutex_destroy(&pm->lock);
276 	kernel_queue_uninit(pm->priv_queue, hanging);
277 	pm->priv_queue = NULL;
278 }
279 
pm_send_set_resources(struct packet_manager * pm,struct scheduling_resources * res)280 int pm_send_set_resources(struct packet_manager *pm,
281 				struct scheduling_resources *res)
282 {
283 	struct kfd_node *node = pm->dqm->dev;
284 	struct device *dev = node->adev->dev;
285 	uint32_t *buffer, size;
286 	int retval = 0;
287 
288 	size = pm->pmf->set_resources_size;
289 	mutex_lock(&pm->lock);
290 	kq_acquire_packet_buffer(pm->priv_queue,
291 					size / sizeof(uint32_t),
292 					(unsigned int **)&buffer);
293 	if (!buffer) {
294 		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
295 		retval = -ENOMEM;
296 		goto out;
297 	}
298 
299 	retval = pm->pmf->set_resources(pm, buffer, res);
300 	if (!retval)
301 		kq_submit_packet(pm->priv_queue);
302 	else
303 		kq_rollback_packet(pm->priv_queue);
304 
305 out:
306 	mutex_unlock(&pm->lock);
307 
308 	return retval;
309 }
310 
pm_send_runlist(struct packet_manager * pm,struct list_head * dqm_queues)311 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
312 {
313 	uint64_t rl_gpu_ib_addr;
314 	uint32_t *rl_buffer;
315 	size_t rl_ib_size, packet_size_dwords;
316 	int retval;
317 
318 	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
319 					&rl_ib_size);
320 	if (retval)
321 		goto fail_create_runlist_ib;
322 
323 	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
324 
325 	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
326 	mutex_lock(&pm->lock);
327 
328 	retval = kq_acquire_packet_buffer(pm->priv_queue,
329 					packet_size_dwords, &rl_buffer);
330 	if (retval)
331 		goto fail_acquire_packet_buffer;
332 
333 	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
334 					rl_ib_size / sizeof(uint32_t), false);
335 	if (retval)
336 		goto fail_create_runlist;
337 
338 	kq_submit_packet(pm->priv_queue);
339 
340 	mutex_unlock(&pm->lock);
341 
342 	return retval;
343 
344 fail_create_runlist:
345 	kq_rollback_packet(pm->priv_queue);
346 fail_acquire_packet_buffer:
347 	mutex_unlock(&pm->lock);
348 fail_create_runlist_ib:
349 	pm_release_ib(pm);
350 	return retval;
351 }
352 
pm_send_query_status(struct packet_manager * pm,uint64_t fence_address,uint64_t fence_value)353 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
354 			uint64_t fence_value)
355 {
356 	struct kfd_node *node = pm->dqm->dev;
357 	struct device *dev = node->adev->dev;
358 	uint32_t *buffer, size;
359 	int retval = 0;
360 
361 	if (WARN_ON(!fence_address))
362 		return -EFAULT;
363 
364 	size = pm->pmf->query_status_size;
365 	mutex_lock(&pm->lock);
366 	kq_acquire_packet_buffer(pm->priv_queue,
367 			size / sizeof(uint32_t), (unsigned int **)&buffer);
368 	if (!buffer) {
369 		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
370 		retval = -ENOMEM;
371 		goto out;
372 	}
373 
374 	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
375 	if (!retval)
376 		kq_submit_packet(pm->priv_queue);
377 	else
378 		kq_rollback_packet(pm->priv_queue);
379 
380 out:
381 	mutex_unlock(&pm->lock);
382 	return retval;
383 }
384 
pm_update_grace_period(struct packet_manager * pm,uint32_t grace_period)385 int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
386 {
387 	struct kfd_node *node = pm->dqm->dev;
388 	struct device *dev = node->adev->dev;
389 	int retval = 0;
390 	uint32_t *buffer, size;
391 
392 	size = pm->pmf->set_grace_period_size;
393 
394 	mutex_lock(&pm->lock);
395 
396 	if (size) {
397 		kq_acquire_packet_buffer(pm->priv_queue,
398 			size / sizeof(uint32_t),
399 			(unsigned int **)&buffer);
400 
401 		if (!buffer) {
402 			dev_err(dev,
403 				"Failed to allocate buffer on kernel queue\n");
404 			retval = -ENOMEM;
405 			goto out;
406 		}
407 
408 		retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
409 		if (!retval)
410 			kq_submit_packet(pm->priv_queue);
411 		else
412 			kq_rollback_packet(pm->priv_queue);
413 	}
414 
415 out:
416 	mutex_unlock(&pm->lock);
417 	return retval;
418 }
419 
pm_send_unmap_queue(struct packet_manager * pm,enum kfd_unmap_queues_filter filter,uint32_t filter_param,bool reset)420 int pm_send_unmap_queue(struct packet_manager *pm,
421 			enum kfd_unmap_queues_filter filter,
422 			uint32_t filter_param, bool reset)
423 {
424 	struct kfd_node *node = pm->dqm->dev;
425 	struct device *dev = node->adev->dev;
426 	uint32_t *buffer, size;
427 	int retval = 0;
428 
429 	size = pm->pmf->unmap_queues_size;
430 	mutex_lock(&pm->lock);
431 	kq_acquire_packet_buffer(pm->priv_queue,
432 			size / sizeof(uint32_t), (unsigned int **)&buffer);
433 	if (!buffer) {
434 		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
435 		retval = -ENOMEM;
436 		goto out;
437 	}
438 
439 	retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset);
440 	if (!retval)
441 		kq_submit_packet(pm->priv_queue);
442 	else
443 		kq_rollback_packet(pm->priv_queue);
444 
445 out:
446 	mutex_unlock(&pm->lock);
447 	return retval;
448 }
449 
pm_release_ib(struct packet_manager * pm)450 void pm_release_ib(struct packet_manager *pm)
451 {
452 	mutex_lock(&pm->lock);
453 	if (pm->allocated) {
454 		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
455 		pm->allocated = false;
456 	}
457 	mutex_unlock(&pm->lock);
458 }
459 
460 #if defined(CONFIG_DEBUG_FS)
461 
pm_debugfs_runlist(struct seq_file * m,void * data)462 int pm_debugfs_runlist(struct seq_file *m, void *data)
463 {
464 	struct packet_manager *pm = data;
465 
466 	mutex_lock(&pm->lock);
467 
468 	if (!pm->allocated) {
469 		seq_puts(m, "  No active runlist\n");
470 		goto out;
471 	}
472 
473 	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
474 		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
475 
476 out:
477 	mutex_unlock(&pm->lock);
478 	return 0;
479 }
480 
pm_debugfs_hang_hws(struct packet_manager * pm)481 int pm_debugfs_hang_hws(struct packet_manager *pm)
482 {
483 	struct kfd_node *node = pm->dqm->dev;
484 	struct device *dev = node->adev->dev;
485 	uint32_t *buffer, size;
486 	int r = 0;
487 
488 	if (!pm->priv_queue)
489 		return -EAGAIN;
490 
491 	size = pm->pmf->query_status_size;
492 	mutex_lock(&pm->lock);
493 	kq_acquire_packet_buffer(pm->priv_queue,
494 			size / sizeof(uint32_t), (unsigned int **)&buffer);
495 	if (!buffer) {
496 		dev_err(dev, "Failed to allocate buffer on kernel queue\n");
497 		r = -ENOMEM;
498 		goto out;
499 	}
500 	memset(buffer, 0x55, size);
501 	kq_submit_packet(pm->priv_queue);
502 
503 	dev_info(dev, "Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
504 		 buffer[0], buffer[1], buffer[2], buffer[3], buffer[4],
505 		 buffer[5], buffer[6]);
506 out:
507 	mutex_unlock(&pm->lock);
508 	return r;
509 }
510 
511 
512 #endif
513