xref: /openbmc/linux/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c (revision 2e35facf82bcdd9b9eb9129f4fb31127b79249ec)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/slab.h>
25 #include <linux/mutex.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_kernel_queue.h"
28 #include "kfd_priv.h"
29 
30 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
31 				unsigned int buffer_size_bytes)
32 {
33 	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
34 
35 	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
36 	     "Runlist IB overflow");
37 	*wptr = temp;
38 }
39 
40 static void pm_calc_rlib_size(struct packet_manager *pm,
41 				unsigned int *rlib_size,
42 				bool *over_subscription)
43 {
44 	unsigned int process_count, queue_count, compute_queue_count;
45 	unsigned int map_queue_size;
46 	unsigned int max_proc_per_quantum = 1;
47 	struct kfd_dev *dev = pm->dqm->dev;
48 
49 	process_count = pm->dqm->processes_count;
50 	queue_count = pm->dqm->queue_count;
51 	compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
52 				pm->dqm->xgmi_sdma_queue_count;
53 
54 	/* check if there is over subscription
55 	 * Note: the arbitration between the number of VMIDs and
56 	 * hws_max_conc_proc has been done in
57 	 * kgd2kfd_device_init().
58 	 */
59 	*over_subscription = false;
60 
61 	if (dev->max_proc_per_quantum > 1)
62 		max_proc_per_quantum = dev->max_proc_per_quantum;
63 
64 	if ((process_count > max_proc_per_quantum) ||
65 	    compute_queue_count > get_queues_num(pm->dqm)) {
66 		*over_subscription = true;
67 		pr_debug("Over subscribed runlist\n");
68 	}
69 
70 	map_queue_size = pm->pmf->map_queues_size;
71 	/* calculate run list ib allocation size */
72 	*rlib_size = process_count * pm->pmf->map_process_size +
73 		     queue_count * map_queue_size;
74 
75 	/*
76 	 * Increase the allocation size in case we need a chained run list
77 	 * when over subscription
78 	 */
79 	if (*over_subscription)
80 		*rlib_size += pm->pmf->runlist_size;
81 
82 	pr_debug("runlist ib size %d\n", *rlib_size);
83 }
84 
85 static int pm_allocate_runlist_ib(struct packet_manager *pm,
86 				unsigned int **rl_buffer,
87 				uint64_t *rl_gpu_buffer,
88 				unsigned int *rl_buffer_size,
89 				bool *is_over_subscription)
90 {
91 	int retval;
92 
93 	if (WARN_ON(pm->allocated))
94 		return -EINVAL;
95 
96 	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
97 
98 	mutex_lock(&pm->lock);
99 
100 	retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
101 					&pm->ib_buffer_obj);
102 
103 	if (retval) {
104 		pr_err("Failed to allocate runlist IB\n");
105 		goto out;
106 	}
107 
108 	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
109 	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
110 
111 	memset(*rl_buffer, 0, *rl_buffer_size);
112 	pm->allocated = true;
113 
114 out:
115 	mutex_unlock(&pm->lock);
116 	return retval;
117 }
118 
119 static int pm_create_runlist_ib(struct packet_manager *pm,
120 				struct list_head *queues,
121 				uint64_t *rl_gpu_addr,
122 				size_t *rl_size_bytes)
123 {
124 	unsigned int alloc_size_bytes;
125 	unsigned int *rl_buffer, rl_wptr, i;
126 	int retval, proccesses_mapped;
127 	struct device_process_node *cur;
128 	struct qcm_process_device *qpd;
129 	struct queue *q;
130 	struct kernel_queue *kq;
131 	bool is_over_subscription;
132 
133 	rl_wptr = retval = proccesses_mapped = 0;
134 
135 	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
136 				&alloc_size_bytes, &is_over_subscription);
137 	if (retval)
138 		return retval;
139 
140 	*rl_size_bytes = alloc_size_bytes;
141 	pm->ib_size_bytes = alloc_size_bytes;
142 
143 	pr_debug("Building runlist ib process count: %d queues count %d\n",
144 		pm->dqm->processes_count, pm->dqm->queue_count);
145 
146 	/* build the run list ib packet */
147 	list_for_each_entry(cur, queues, list) {
148 		qpd = cur->qpd;
149 		/* build map process packet */
150 		if (proccesses_mapped >= pm->dqm->processes_count) {
151 			pr_debug("Not enough space left in runlist IB\n");
152 			pm_release_ib(pm);
153 			return -ENOMEM;
154 		}
155 
156 		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
157 		if (retval)
158 			return retval;
159 
160 		proccesses_mapped++;
161 		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
162 				alloc_size_bytes);
163 
164 		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
165 			if (!kq->queue->properties.is_active)
166 				continue;
167 
168 			pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
169 				kq->queue->queue, qpd->is_debug);
170 
171 			retval = pm->pmf->map_queues(pm,
172 						&rl_buffer[rl_wptr],
173 						kq->queue,
174 						qpd->is_debug);
175 			if (retval)
176 				return retval;
177 
178 			inc_wptr(&rl_wptr,
179 				pm->pmf->map_queues_size,
180 				alloc_size_bytes);
181 		}
182 
183 		list_for_each_entry(q, &qpd->queues_list, list) {
184 			if (!q->properties.is_active)
185 				continue;
186 
187 			pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
188 				q->queue, qpd->is_debug);
189 
190 			retval = pm->pmf->map_queues(pm,
191 						&rl_buffer[rl_wptr],
192 						q,
193 						qpd->is_debug);
194 
195 			if (retval)
196 				return retval;
197 
198 			inc_wptr(&rl_wptr,
199 				pm->pmf->map_queues_size,
200 				alloc_size_bytes);
201 		}
202 	}
203 
204 	pr_debug("Finished map process and queues to runlist\n");
205 
206 	if (is_over_subscription)
207 		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
208 					*rl_gpu_addr,
209 					alloc_size_bytes / sizeof(uint32_t),
210 					true);
211 
212 	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
213 		pr_debug("0x%2X ", rl_buffer[i]);
214 	pr_debug("\n");
215 
216 	return retval;
217 }
218 
219 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
220 {
221 	switch (dqm->dev->device_info->asic_family) {
222 	case CHIP_KAVERI:
223 	case CHIP_HAWAII:
224 		/* PM4 packet structures on CIK are the same as on VI */
225 	case CHIP_CARRIZO:
226 	case CHIP_TONGA:
227 	case CHIP_FIJI:
228 	case CHIP_POLARIS10:
229 	case CHIP_POLARIS11:
230 	case CHIP_POLARIS12:
231 	case CHIP_VEGAM:
232 		pm->pmf = &kfd_vi_pm_funcs;
233 		break;
234 	case CHIP_VEGA10:
235 	case CHIP_VEGA12:
236 	case CHIP_VEGA20:
237 	case CHIP_RAVEN:
238 		pm->pmf = &kfd_v9_pm_funcs;
239 		break;
240 	case CHIP_NAVI10:
241 		pm->pmf = &kfd_v10_pm_funcs;
242 		break;
243 	default:
244 		WARN(1, "Unexpected ASIC family %u",
245 		     dqm->dev->device_info->asic_family);
246 		return -EINVAL;
247 	}
248 
249 	pm->dqm = dqm;
250 	mutex_init(&pm->lock);
251 	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
252 	if (!pm->priv_queue) {
253 		mutex_destroy(&pm->lock);
254 		return -ENOMEM;
255 	}
256 	pm->allocated = false;
257 
258 	return 0;
259 }
260 
261 void pm_uninit(struct packet_manager *pm)
262 {
263 	mutex_destroy(&pm->lock);
264 	kernel_queue_uninit(pm->priv_queue);
265 }
266 
267 int pm_send_set_resources(struct packet_manager *pm,
268 				struct scheduling_resources *res)
269 {
270 	uint32_t *buffer, size;
271 	int retval = 0;
272 
273 	size = pm->pmf->set_resources_size;
274 	mutex_lock(&pm->lock);
275 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
276 					size / sizeof(uint32_t),
277 					(unsigned int **)&buffer);
278 	if (!buffer) {
279 		pr_err("Failed to allocate buffer on kernel queue\n");
280 		retval = -ENOMEM;
281 		goto out;
282 	}
283 
284 	retval = pm->pmf->set_resources(pm, buffer, res);
285 	if (!retval)
286 		pm->priv_queue->ops.submit_packet(pm->priv_queue);
287 	else
288 		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
289 
290 out:
291 	mutex_unlock(&pm->lock);
292 
293 	return retval;
294 }
295 
296 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
297 {
298 	uint64_t rl_gpu_ib_addr;
299 	uint32_t *rl_buffer;
300 	size_t rl_ib_size, packet_size_dwords;
301 	int retval;
302 
303 	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
304 					&rl_ib_size);
305 	if (retval)
306 		goto fail_create_runlist_ib;
307 
308 	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
309 
310 	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
311 	mutex_lock(&pm->lock);
312 
313 	retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
314 					packet_size_dwords, &rl_buffer);
315 	if (retval)
316 		goto fail_acquire_packet_buffer;
317 
318 	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
319 					rl_ib_size / sizeof(uint32_t), false);
320 	if (retval)
321 		goto fail_create_runlist;
322 
323 	pm->priv_queue->ops.submit_packet(pm->priv_queue);
324 
325 	mutex_unlock(&pm->lock);
326 
327 	return retval;
328 
329 fail_create_runlist:
330 	pm->priv_queue->ops.rollback_packet(pm->priv_queue);
331 fail_acquire_packet_buffer:
332 	mutex_unlock(&pm->lock);
333 fail_create_runlist_ib:
334 	pm_release_ib(pm);
335 	return retval;
336 }
337 
338 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
339 			uint32_t fence_value)
340 {
341 	uint32_t *buffer, size;
342 	int retval = 0;
343 
344 	if (WARN_ON(!fence_address))
345 		return -EFAULT;
346 
347 	size = pm->pmf->query_status_size;
348 	mutex_lock(&pm->lock);
349 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
350 			size / sizeof(uint32_t), (unsigned int **)&buffer);
351 	if (!buffer) {
352 		pr_err("Failed to allocate buffer on kernel queue\n");
353 		retval = -ENOMEM;
354 		goto out;
355 	}
356 
357 	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
358 	if (!retval)
359 		pm->priv_queue->ops.submit_packet(pm->priv_queue);
360 	else
361 		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
362 
363 out:
364 	mutex_unlock(&pm->lock);
365 	return retval;
366 }
367 
368 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
369 			enum kfd_unmap_queues_filter filter,
370 			uint32_t filter_param, bool reset,
371 			unsigned int sdma_engine)
372 {
373 	uint32_t *buffer, size;
374 	int retval = 0;
375 
376 	size = pm->pmf->unmap_queues_size;
377 	mutex_lock(&pm->lock);
378 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
379 			size / sizeof(uint32_t), (unsigned int **)&buffer);
380 	if (!buffer) {
381 		pr_err("Failed to allocate buffer on kernel queue\n");
382 		retval = -ENOMEM;
383 		goto out;
384 	}
385 
386 	retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
387 				       reset, sdma_engine);
388 	if (!retval)
389 		pm->priv_queue->ops.submit_packet(pm->priv_queue);
390 	else
391 		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
392 
393 out:
394 	mutex_unlock(&pm->lock);
395 	return retval;
396 }
397 
398 void pm_release_ib(struct packet_manager *pm)
399 {
400 	mutex_lock(&pm->lock);
401 	if (pm->allocated) {
402 		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
403 		pm->allocated = false;
404 	}
405 	mutex_unlock(&pm->lock);
406 }
407 
408 #if defined(CONFIG_DEBUG_FS)
409 
410 int pm_debugfs_runlist(struct seq_file *m, void *data)
411 {
412 	struct packet_manager *pm = data;
413 
414 	mutex_lock(&pm->lock);
415 
416 	if (!pm->allocated) {
417 		seq_puts(m, "  No active runlist\n");
418 		goto out;
419 	}
420 
421 	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
422 		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
423 
424 out:
425 	mutex_unlock(&pm->lock);
426 	return 0;
427 }
428 
429 int pm_debugfs_hang_hws(struct packet_manager *pm)
430 {
431 	uint32_t *buffer, size;
432 	int r = 0;
433 
434 	size = pm->pmf->query_status_size;
435 	mutex_lock(&pm->lock);
436 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
437 			size / sizeof(uint32_t), (unsigned int **)&buffer);
438 	if (!buffer) {
439 		pr_err("Failed to allocate buffer on kernel queue\n");
440 		r = -ENOMEM;
441 		goto out;
442 	}
443 	memset(buffer, 0x55, size);
444 	pm->priv_queue->ops.submit_packet(pm->priv_queue);
445 
446 	pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
447 		buffer[0], buffer[1], buffer[2], buffer[3],
448 		buffer[4], buffer[5], buffer[6]);
449 out:
450 	mutex_unlock(&pm->lock);
451 	return r;
452 }
453 
454 
455 #endif
456