xref: /openbmc/linux/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/slab.h>
25 #include <linux/mutex.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_kernel_queue.h"
28 #include "kfd_priv.h"
29 
30 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
31 				unsigned int buffer_size_bytes)
32 {
33 	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
34 
35 	WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
36 	     "Runlist IB overflow");
37 	*wptr = temp;
38 }
39 
40 static void pm_calc_rlib_size(struct packet_manager *pm,
41 				unsigned int *rlib_size,
42 				bool *over_subscription)
43 {
44 	unsigned int process_count, queue_count, compute_queue_count;
45 	unsigned int map_queue_size;
46 	unsigned int max_proc_per_quantum = 1;
47 	struct kfd_dev *dev = pm->dqm->dev;
48 
49 	process_count = pm->dqm->processes_count;
50 	queue_count = pm->dqm->queue_count;
51 	compute_queue_count = queue_count - pm->dqm->sdma_queue_count;
52 
53 	/* check if there is over subscription
54 	 * Note: the arbitration between the number of VMIDs and
55 	 * hws_max_conc_proc has been done in
56 	 * kgd2kfd_device_init().
57 	 */
58 	*over_subscription = false;
59 
60 	if (dev->max_proc_per_quantum > 1)
61 		max_proc_per_quantum = dev->max_proc_per_quantum;
62 
63 	if ((process_count > max_proc_per_quantum) ||
64 	    compute_queue_count > get_queues_num(pm->dqm)) {
65 		*over_subscription = true;
66 		pr_debug("Over subscribed runlist\n");
67 	}
68 
69 	map_queue_size = pm->pmf->map_queues_size;
70 	/* calculate run list ib allocation size */
71 	*rlib_size = process_count * pm->pmf->map_process_size +
72 		     queue_count * map_queue_size;
73 
74 	/*
75 	 * Increase the allocation size in case we need a chained run list
76 	 * when over subscription
77 	 */
78 	if (*over_subscription)
79 		*rlib_size += pm->pmf->runlist_size;
80 
81 	pr_debug("runlist ib size %d\n", *rlib_size);
82 }
83 
84 static int pm_allocate_runlist_ib(struct packet_manager *pm,
85 				unsigned int **rl_buffer,
86 				uint64_t *rl_gpu_buffer,
87 				unsigned int *rl_buffer_size,
88 				bool *is_over_subscription)
89 {
90 	int retval;
91 
92 	if (WARN_ON(pm->allocated))
93 		return -EINVAL;
94 
95 	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
96 
97 	mutex_lock(&pm->lock);
98 
99 	retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
100 					&pm->ib_buffer_obj);
101 
102 	if (retval) {
103 		pr_err("Failed to allocate runlist IB\n");
104 		goto out;
105 	}
106 
107 	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
108 	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
109 
110 	memset(*rl_buffer, 0, *rl_buffer_size);
111 	pm->allocated = true;
112 
113 out:
114 	mutex_unlock(&pm->lock);
115 	return retval;
116 }
117 
118 static int pm_create_runlist_ib(struct packet_manager *pm,
119 				struct list_head *queues,
120 				uint64_t *rl_gpu_addr,
121 				size_t *rl_size_bytes)
122 {
123 	unsigned int alloc_size_bytes;
124 	unsigned int *rl_buffer, rl_wptr, i;
125 	int retval, proccesses_mapped;
126 	struct device_process_node *cur;
127 	struct qcm_process_device *qpd;
128 	struct queue *q;
129 	struct kernel_queue *kq;
130 	bool is_over_subscription;
131 
132 	rl_wptr = retval = proccesses_mapped = 0;
133 
134 	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
135 				&alloc_size_bytes, &is_over_subscription);
136 	if (retval)
137 		return retval;
138 
139 	*rl_size_bytes = alloc_size_bytes;
140 	pm->ib_size_bytes = alloc_size_bytes;
141 
142 	pr_debug("Building runlist ib process count: %d queues count %d\n",
143 		pm->dqm->processes_count, pm->dqm->queue_count);
144 
145 	/* build the run list ib packet */
146 	list_for_each_entry(cur, queues, list) {
147 		qpd = cur->qpd;
148 		/* build map process packet */
149 		if (proccesses_mapped >= pm->dqm->processes_count) {
150 			pr_debug("Not enough space left in runlist IB\n");
151 			pm_release_ib(pm);
152 			return -ENOMEM;
153 		}
154 
155 		retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
156 		if (retval)
157 			return retval;
158 
159 		proccesses_mapped++;
160 		inc_wptr(&rl_wptr, pm->pmf->map_process_size,
161 				alloc_size_bytes);
162 
163 		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
164 			if (!kq->queue->properties.is_active)
165 				continue;
166 
167 			pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
168 				kq->queue->queue, qpd->is_debug);
169 
170 			retval = pm->pmf->map_queues(pm,
171 						&rl_buffer[rl_wptr],
172 						kq->queue,
173 						qpd->is_debug);
174 			if (retval)
175 				return retval;
176 
177 			inc_wptr(&rl_wptr,
178 				pm->pmf->map_queues_size,
179 				alloc_size_bytes);
180 		}
181 
182 		list_for_each_entry(q, &qpd->queues_list, list) {
183 			if (!q->properties.is_active)
184 				continue;
185 
186 			pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
187 				q->queue, qpd->is_debug);
188 
189 			retval = pm->pmf->map_queues(pm,
190 						&rl_buffer[rl_wptr],
191 						q,
192 						qpd->is_debug);
193 
194 			if (retval)
195 				return retval;
196 
197 			inc_wptr(&rl_wptr,
198 				pm->pmf->map_queues_size,
199 				alloc_size_bytes);
200 		}
201 	}
202 
203 	pr_debug("Finished map process and queues to runlist\n");
204 
205 	if (is_over_subscription)
206 		retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
207 					*rl_gpu_addr,
208 					alloc_size_bytes / sizeof(uint32_t),
209 					true);
210 
211 	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
212 		pr_debug("0x%2X ", rl_buffer[i]);
213 	pr_debug("\n");
214 
215 	return retval;
216 }
217 
218 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
219 {
220 	switch (dqm->dev->device_info->asic_family) {
221 	case CHIP_KAVERI:
222 	case CHIP_HAWAII:
223 		/* PM4 packet structures on CIK are the same as on VI */
224 	case CHIP_CARRIZO:
225 	case CHIP_TONGA:
226 	case CHIP_FIJI:
227 	case CHIP_POLARIS10:
228 	case CHIP_POLARIS11:
229 		pm->pmf = &kfd_vi_pm_funcs;
230 		break;
231 	case CHIP_VEGA10:
232 	case CHIP_VEGA20:
233 	case CHIP_RAVEN:
234 		pm->pmf = &kfd_v9_pm_funcs;
235 		break;
236 	default:
237 		WARN(1, "Unexpected ASIC family %u",
238 		     dqm->dev->device_info->asic_family);
239 		return -EINVAL;
240 	}
241 
242 	pm->dqm = dqm;
243 	mutex_init(&pm->lock);
244 	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
245 	if (!pm->priv_queue) {
246 		mutex_destroy(&pm->lock);
247 		return -ENOMEM;
248 	}
249 	pm->allocated = false;
250 
251 	return 0;
252 }
253 
254 void pm_uninit(struct packet_manager *pm)
255 {
256 	mutex_destroy(&pm->lock);
257 	kernel_queue_uninit(pm->priv_queue);
258 }
259 
260 int pm_send_set_resources(struct packet_manager *pm,
261 				struct scheduling_resources *res)
262 {
263 	uint32_t *buffer, size;
264 	int retval = 0;
265 
266 	size = pm->pmf->set_resources_size;
267 	mutex_lock(&pm->lock);
268 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
269 					size / sizeof(uint32_t),
270 					(unsigned int **)&buffer);
271 	if (!buffer) {
272 		pr_err("Failed to allocate buffer on kernel queue\n");
273 		retval = -ENOMEM;
274 		goto out;
275 	}
276 
277 	retval = pm->pmf->set_resources(pm, buffer, res);
278 	if (!retval)
279 		pm->priv_queue->ops.submit_packet(pm->priv_queue);
280 	else
281 		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
282 
283 out:
284 	mutex_unlock(&pm->lock);
285 
286 	return retval;
287 }
288 
289 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
290 {
291 	uint64_t rl_gpu_ib_addr;
292 	uint32_t *rl_buffer;
293 	size_t rl_ib_size, packet_size_dwords;
294 	int retval;
295 
296 	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
297 					&rl_ib_size);
298 	if (retval)
299 		goto fail_create_runlist_ib;
300 
301 	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
302 
303 	packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
304 	mutex_lock(&pm->lock);
305 
306 	retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
307 					packet_size_dwords, &rl_buffer);
308 	if (retval)
309 		goto fail_acquire_packet_buffer;
310 
311 	retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
312 					rl_ib_size / sizeof(uint32_t), false);
313 	if (retval)
314 		goto fail_create_runlist;
315 
316 	pm->priv_queue->ops.submit_packet(pm->priv_queue);
317 
318 	mutex_unlock(&pm->lock);
319 
320 	return retval;
321 
322 fail_create_runlist:
323 	pm->priv_queue->ops.rollback_packet(pm->priv_queue);
324 fail_acquire_packet_buffer:
325 	mutex_unlock(&pm->lock);
326 fail_create_runlist_ib:
327 	pm_release_ib(pm);
328 	return retval;
329 }
330 
331 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
332 			uint32_t fence_value)
333 {
334 	uint32_t *buffer, size;
335 	int retval = 0;
336 
337 	if (WARN_ON(!fence_address))
338 		return -EFAULT;
339 
340 	size = pm->pmf->query_status_size;
341 	mutex_lock(&pm->lock);
342 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
343 			size / sizeof(uint32_t), (unsigned int **)&buffer);
344 	if (!buffer) {
345 		pr_err("Failed to allocate buffer on kernel queue\n");
346 		retval = -ENOMEM;
347 		goto out;
348 	}
349 
350 	retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
351 	if (!retval)
352 		pm->priv_queue->ops.submit_packet(pm->priv_queue);
353 	else
354 		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
355 
356 out:
357 	mutex_unlock(&pm->lock);
358 	return retval;
359 }
360 
361 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
362 			enum kfd_unmap_queues_filter filter,
363 			uint32_t filter_param, bool reset,
364 			unsigned int sdma_engine)
365 {
366 	uint32_t *buffer, size;
367 	int retval = 0;
368 
369 	size = pm->pmf->unmap_queues_size;
370 	mutex_lock(&pm->lock);
371 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
372 			size / sizeof(uint32_t), (unsigned int **)&buffer);
373 	if (!buffer) {
374 		pr_err("Failed to allocate buffer on kernel queue\n");
375 		retval = -ENOMEM;
376 		goto out;
377 	}
378 
379 	retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
380 				       reset, sdma_engine);
381 	if (!retval)
382 		pm->priv_queue->ops.submit_packet(pm->priv_queue);
383 	else
384 		pm->priv_queue->ops.rollback_packet(pm->priv_queue);
385 
386 out:
387 	mutex_unlock(&pm->lock);
388 	return retval;
389 }
390 
391 void pm_release_ib(struct packet_manager *pm)
392 {
393 	mutex_lock(&pm->lock);
394 	if (pm->allocated) {
395 		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
396 		pm->allocated = false;
397 	}
398 	mutex_unlock(&pm->lock);
399 }
400 
401 #if defined(CONFIG_DEBUG_FS)
402 
403 int pm_debugfs_runlist(struct seq_file *m, void *data)
404 {
405 	struct packet_manager *pm = data;
406 
407 	mutex_lock(&pm->lock);
408 
409 	if (!pm->allocated) {
410 		seq_puts(m, "  No active runlist\n");
411 		goto out;
412 	}
413 
414 	seq_hex_dump(m, "  ", DUMP_PREFIX_OFFSET, 32, 4,
415 		     pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
416 
417 out:
418 	mutex_unlock(&pm->lock);
419 	return 0;
420 }
421 
422 int pm_debugfs_hang_hws(struct packet_manager *pm)
423 {
424 	uint32_t *buffer, size;
425 	int r = 0;
426 
427 	size = pm->pmf->query_status_size;
428 	mutex_lock(&pm->lock);
429 	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
430 			size / sizeof(uint32_t), (unsigned int **)&buffer);
431 	if (!buffer) {
432 		pr_err("Failed to allocate buffer on kernel queue\n");
433 		r = -ENOMEM;
434 		goto out;
435 	}
436 	memset(buffer, 0x55, size);
437 	pm->priv_queue->ops.submit_packet(pm->priv_queue);
438 
439 	pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
440 		buffer[0], buffer[1], buffer[2], buffer[3],
441 		buffer[4], buffer[5], buffer[6]);
442 out:
443 	mutex_unlock(&pm->lock);
444 	return r;
445 }
446 
447 
448 #endif
449