xref: /openbmc/linux/drivers/gpu/host1x/job.c (revision ff1f8558)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Tegra host1x Job
4  *
5  * Copyright (c) 2010-2015, NVIDIA Corporation.
6  */
7 
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/host1x.h>
11 #include <linux/iommu.h>
12 #include <linux/kref.h>
13 #include <linux/module.h>
14 #include <linux/scatterlist.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <trace/events/host1x.h>
18 
19 #include "channel.h"
20 #include "dev.h"
21 #include "job.h"
22 #include "syncpt.h"
23 
24 #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
25 
26 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
27 				    u32 num_cmdbufs, u32 num_relocs)
28 {
29 	struct host1x_job *job = NULL;
30 	unsigned int num_unpins = num_relocs;
31 	u64 total;
32 	void *mem;
33 
34 	if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
35 		num_unpins += num_cmdbufs;
36 
37 	/* Check that we're not going to overflow */
38 	total = sizeof(struct host1x_job) +
39 		(u64)num_relocs * sizeof(struct host1x_reloc) +
40 		(u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
41 		(u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
42 		(u64)num_unpins * sizeof(dma_addr_t) +
43 		(u64)num_unpins * sizeof(u32 *);
44 	if (total > ULONG_MAX)
45 		return NULL;
46 
47 	mem = job = kzalloc(total, GFP_KERNEL);
48 	if (!job)
49 		return NULL;
50 
51 	kref_init(&job->ref);
52 	job->channel = ch;
53 
54 	/* Redistribute memory to the structs  */
55 	mem += sizeof(struct host1x_job);
56 	job->relocs = num_relocs ? mem : NULL;
57 	mem += num_relocs * sizeof(struct host1x_reloc);
58 	job->unpins = num_unpins ? mem : NULL;
59 	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
60 	job->gathers = num_cmdbufs ? mem : NULL;
61 	mem += num_cmdbufs * sizeof(struct host1x_job_gather);
62 	job->addr_phys = num_unpins ? mem : NULL;
63 
64 	job->reloc_addr_phys = job->addr_phys;
65 	job->gather_addr_phys = &job->addr_phys[num_relocs];
66 
67 	return job;
68 }
69 EXPORT_SYMBOL(host1x_job_alloc);
70 
71 struct host1x_job *host1x_job_get(struct host1x_job *job)
72 {
73 	kref_get(&job->ref);
74 	return job;
75 }
76 EXPORT_SYMBOL(host1x_job_get);
77 
78 static void job_free(struct kref *ref)
79 {
80 	struct host1x_job *job = container_of(ref, struct host1x_job, ref);
81 
82 	kfree(job);
83 }
84 
85 void host1x_job_put(struct host1x_job *job)
86 {
87 	kref_put(&job->ref, job_free);
88 }
89 EXPORT_SYMBOL(host1x_job_put);
90 
91 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
92 			   unsigned int words, unsigned int offset)
93 {
94 	struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
95 
96 	gather->words = words;
97 	gather->bo = bo;
98 	gather->offset = offset;
99 
100 	job->num_gathers++;
101 }
102 EXPORT_SYMBOL(host1x_job_add_gather);
103 
104 static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
105 {
106 	struct host1x_client *client = job->client;
107 	struct device *dev = client->dev;
108 	struct host1x_job_gather *g;
109 	struct iommu_domain *domain;
110 	unsigned int i;
111 	int err;
112 
113 	domain = iommu_get_domain_for_dev(dev);
114 	job->num_unpins = 0;
115 
116 	for (i = 0; i < job->num_relocs; i++) {
117 		struct host1x_reloc *reloc = &job->relocs[i];
118 		dma_addr_t phys_addr, *phys;
119 		struct sg_table *sgt;
120 
121 		reloc->target.bo = host1x_bo_get(reloc->target.bo);
122 		if (!reloc->target.bo) {
123 			err = -EINVAL;
124 			goto unpin;
125 		}
126 
127 		/*
128 		 * If the client device is not attached to an IOMMU, the
129 		 * physical address of the buffer object can be used.
130 		 *
131 		 * Similarly, when an IOMMU domain is shared between all
132 		 * host1x clients, the IOVA is already available, so no
133 		 * need to map the buffer object again.
134 		 *
135 		 * XXX Note that this isn't always safe to do because it
136 		 * relies on an assumption that no cache maintenance is
137 		 * needed on the buffer objects.
138 		 */
139 		if (!domain || client->group)
140 			phys = &phys_addr;
141 		else
142 			phys = NULL;
143 
144 		sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
145 		if (IS_ERR(sgt)) {
146 			err = PTR_ERR(sgt);
147 			goto unpin;
148 		}
149 
150 		if (sgt) {
151 			unsigned long mask = HOST1X_RELOC_READ |
152 					     HOST1X_RELOC_WRITE;
153 			enum dma_data_direction dir;
154 
155 			switch (reloc->flags & mask) {
156 			case HOST1X_RELOC_READ:
157 				dir = DMA_TO_DEVICE;
158 				break;
159 
160 			case HOST1X_RELOC_WRITE:
161 				dir = DMA_FROM_DEVICE;
162 				break;
163 
164 			case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
165 				dir = DMA_BIDIRECTIONAL;
166 				break;
167 
168 			default:
169 				err = -EINVAL;
170 				goto unpin;
171 			}
172 
173 			err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
174 			if (!err) {
175 				err = -ENOMEM;
176 				goto unpin;
177 			}
178 
179 			job->unpins[job->num_unpins].dev = dev;
180 			job->unpins[job->num_unpins].dir = dir;
181 			phys_addr = sg_dma_address(sgt->sgl);
182 		}
183 
184 		job->addr_phys[job->num_unpins] = phys_addr;
185 		job->unpins[job->num_unpins].bo = reloc->target.bo;
186 		job->unpins[job->num_unpins].sgt = sgt;
187 		job->num_unpins++;
188 	}
189 
190 	/*
191 	 * We will copy gathers BO content later, so there is no need to
192 	 * hold and pin them.
193 	 */
194 	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
195 		return 0;
196 
197 	for (i = 0; i < job->num_gathers; i++) {
198 		size_t gather_size = 0;
199 		struct scatterlist *sg;
200 		struct sg_table *sgt;
201 		dma_addr_t phys_addr;
202 		unsigned long shift;
203 		struct iova *alloc;
204 		dma_addr_t *phys;
205 		unsigned int j;
206 
207 		g = &job->gathers[i];
208 		g->bo = host1x_bo_get(g->bo);
209 		if (!g->bo) {
210 			err = -EINVAL;
211 			goto unpin;
212 		}
213 
214 		/**
215 		 * If the host1x is not attached to an IOMMU, there is no need
216 		 * to map the buffer object for the host1x, since the physical
217 		 * address can simply be used.
218 		 */
219 		if (!iommu_get_domain_for_dev(host->dev))
220 			phys = &phys_addr;
221 		else
222 			phys = NULL;
223 
224 		sgt = host1x_bo_pin(host->dev, g->bo, phys);
225 		if (IS_ERR(sgt)) {
226 			err = PTR_ERR(sgt);
227 			goto put;
228 		}
229 
230 		if (host->domain) {
231 			for_each_sg(sgt->sgl, sg, sgt->nents, j)
232 				gather_size += sg->length;
233 			gather_size = iova_align(&host->iova, gather_size);
234 
235 			shift = iova_shift(&host->iova);
236 			alloc = alloc_iova(&host->iova, gather_size >> shift,
237 					   host->iova_end >> shift, true);
238 			if (!alloc) {
239 				err = -ENOMEM;
240 				goto put;
241 			}
242 
243 			err = iommu_map_sg(host->domain,
244 					iova_dma_addr(&host->iova, alloc),
245 					sgt->sgl, sgt->nents, IOMMU_READ);
246 			if (err == 0) {
247 				__free_iova(&host->iova, alloc);
248 				err = -EINVAL;
249 				goto put;
250 			}
251 
252 			job->unpins[job->num_unpins].size = gather_size;
253 			phys_addr = iova_dma_addr(&host->iova, alloc);
254 		} else if (sgt) {
255 			err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
256 					 DMA_TO_DEVICE);
257 			if (!err) {
258 				err = -ENOMEM;
259 				goto put;
260 			}
261 
262 			job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
263 			job->unpins[job->num_unpins].dev = host->dev;
264 			phys_addr = sg_dma_address(sgt->sgl);
265 		}
266 
267 		job->addr_phys[job->num_unpins] = phys_addr;
268 		job->gather_addr_phys[i] = phys_addr;
269 
270 		job->unpins[job->num_unpins].bo = g->bo;
271 		job->unpins[job->num_unpins].sgt = sgt;
272 		job->num_unpins++;
273 	}
274 
275 	return 0;
276 
277 put:
278 	host1x_bo_put(g->bo);
279 unpin:
280 	host1x_job_unpin(job);
281 	return err;
282 }
283 
284 static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
285 {
286 	void *cmdbuf_addr = NULL;
287 	struct host1x_bo *cmdbuf = g->bo;
288 	unsigned int i;
289 
290 	/* pin & patch the relocs for one gather */
291 	for (i = 0; i < job->num_relocs; i++) {
292 		struct host1x_reloc *reloc = &job->relocs[i];
293 		u32 reloc_addr = (job->reloc_addr_phys[i] +
294 				  reloc->target.offset) >> reloc->shift;
295 		u32 *target;
296 
297 		/* skip all other gathers */
298 		if (cmdbuf != reloc->cmdbuf.bo)
299 			continue;
300 
301 		if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
302 			target = (u32 *)job->gather_copy_mapped +
303 					reloc->cmdbuf.offset / sizeof(u32) +
304 						g->offset / sizeof(u32);
305 			goto patch_reloc;
306 		}
307 
308 		if (!cmdbuf_addr) {
309 			cmdbuf_addr = host1x_bo_mmap(cmdbuf);
310 
311 			if (unlikely(!cmdbuf_addr)) {
312 				pr_err("Could not map cmdbuf for relocation\n");
313 				return -ENOMEM;
314 			}
315 		}
316 
317 		target = cmdbuf_addr + reloc->cmdbuf.offset;
318 patch_reloc:
319 		*target = reloc_addr;
320 	}
321 
322 	if (cmdbuf_addr)
323 		host1x_bo_munmap(cmdbuf, cmdbuf_addr);
324 
325 	return 0;
326 }
327 
328 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
329 			unsigned int offset)
330 {
331 	offset *= sizeof(u32);
332 
333 	if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
334 		return false;
335 
336 	/* relocation shift value validation isn't implemented yet */
337 	if (reloc->shift)
338 		return false;
339 
340 	return true;
341 }
342 
343 struct host1x_firewall {
344 	struct host1x_job *job;
345 	struct device *dev;
346 
347 	unsigned int num_relocs;
348 	struct host1x_reloc *reloc;
349 
350 	struct host1x_bo *cmdbuf;
351 	unsigned int offset;
352 
353 	u32 words;
354 	u32 class;
355 	u32 reg;
356 	u32 mask;
357 	u32 count;
358 };
359 
360 static int check_register(struct host1x_firewall *fw, unsigned long offset)
361 {
362 	if (!fw->job->is_addr_reg)
363 		return 0;
364 
365 	if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
366 		if (!fw->num_relocs)
367 			return -EINVAL;
368 
369 		if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
370 			return -EINVAL;
371 
372 		fw->num_relocs--;
373 		fw->reloc++;
374 	}
375 
376 	return 0;
377 }
378 
379 static int check_class(struct host1x_firewall *fw, u32 class)
380 {
381 	if (!fw->job->is_valid_class) {
382 		if (fw->class != class)
383 			return -EINVAL;
384 	} else {
385 		if (!fw->job->is_valid_class(fw->class))
386 			return -EINVAL;
387 	}
388 
389 	return 0;
390 }
391 
392 static int check_mask(struct host1x_firewall *fw)
393 {
394 	u32 mask = fw->mask;
395 	u32 reg = fw->reg;
396 	int ret;
397 
398 	while (mask) {
399 		if (fw->words == 0)
400 			return -EINVAL;
401 
402 		if (mask & 1) {
403 			ret = check_register(fw, reg);
404 			if (ret < 0)
405 				return ret;
406 
407 			fw->words--;
408 			fw->offset++;
409 		}
410 		mask >>= 1;
411 		reg++;
412 	}
413 
414 	return 0;
415 }
416 
417 static int check_incr(struct host1x_firewall *fw)
418 {
419 	u32 count = fw->count;
420 	u32 reg = fw->reg;
421 	int ret;
422 
423 	while (count) {
424 		if (fw->words == 0)
425 			return -EINVAL;
426 
427 		ret = check_register(fw, reg);
428 		if (ret < 0)
429 			return ret;
430 
431 		reg++;
432 		fw->words--;
433 		fw->offset++;
434 		count--;
435 	}
436 
437 	return 0;
438 }
439 
440 static int check_nonincr(struct host1x_firewall *fw)
441 {
442 	u32 count = fw->count;
443 	int ret;
444 
445 	while (count) {
446 		if (fw->words == 0)
447 			return -EINVAL;
448 
449 		ret = check_register(fw, fw->reg);
450 		if (ret < 0)
451 			return ret;
452 
453 		fw->words--;
454 		fw->offset++;
455 		count--;
456 	}
457 
458 	return 0;
459 }
460 
461 static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
462 {
463 	u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
464 		(g->offset / sizeof(u32));
465 	u32 job_class = fw->class;
466 	int err = 0;
467 
468 	fw->words = g->words;
469 	fw->cmdbuf = g->bo;
470 	fw->offset = 0;
471 
472 	while (fw->words && !err) {
473 		u32 word = cmdbuf_base[fw->offset];
474 		u32 opcode = (word & 0xf0000000) >> 28;
475 
476 		fw->mask = 0;
477 		fw->reg = 0;
478 		fw->count = 0;
479 		fw->words--;
480 		fw->offset++;
481 
482 		switch (opcode) {
483 		case 0:
484 			fw->class = word >> 6 & 0x3ff;
485 			fw->mask = word & 0x3f;
486 			fw->reg = word >> 16 & 0xfff;
487 			err = check_class(fw, job_class);
488 			if (!err)
489 				err = check_mask(fw);
490 			if (err)
491 				goto out;
492 			break;
493 		case 1:
494 			fw->reg = word >> 16 & 0xfff;
495 			fw->count = word & 0xffff;
496 			err = check_incr(fw);
497 			if (err)
498 				goto out;
499 			break;
500 
501 		case 2:
502 			fw->reg = word >> 16 & 0xfff;
503 			fw->count = word & 0xffff;
504 			err = check_nonincr(fw);
505 			if (err)
506 				goto out;
507 			break;
508 
509 		case 3:
510 			fw->mask = word & 0xffff;
511 			fw->reg = word >> 16 & 0xfff;
512 			err = check_mask(fw);
513 			if (err)
514 				goto out;
515 			break;
516 		case 4:
517 		case 14:
518 			break;
519 		default:
520 			err = -EINVAL;
521 			break;
522 		}
523 	}
524 
525 out:
526 	return err;
527 }
528 
529 static inline int copy_gathers(struct device *host, struct host1x_job *job,
530 			       struct device *dev)
531 {
532 	struct host1x_firewall fw;
533 	size_t size = 0;
534 	size_t offset = 0;
535 	unsigned int i;
536 
537 	fw.job = job;
538 	fw.dev = dev;
539 	fw.reloc = job->relocs;
540 	fw.num_relocs = job->num_relocs;
541 	fw.class = job->class;
542 
543 	for (i = 0; i < job->num_gathers; i++) {
544 		struct host1x_job_gather *g = &job->gathers[i];
545 
546 		size += g->words * sizeof(u32);
547 	}
548 
549 	/*
550 	 * Try a non-blocking allocation from a higher priority pools first,
551 	 * as awaiting for the allocation here is a major performance hit.
552 	 */
553 	job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
554 					       GFP_NOWAIT);
555 
556 	/* the higher priority allocation failed, try the generic-blocking */
557 	if (!job->gather_copy_mapped)
558 		job->gather_copy_mapped = dma_alloc_wc(host, size,
559 						       &job->gather_copy,
560 						       GFP_KERNEL);
561 	if (!job->gather_copy_mapped)
562 		return -ENOMEM;
563 
564 	job->gather_copy_size = size;
565 
566 	for (i = 0; i < job->num_gathers; i++) {
567 		struct host1x_job_gather *g = &job->gathers[i];
568 		void *gather;
569 
570 		/* Copy the gather */
571 		gather = host1x_bo_mmap(g->bo);
572 		memcpy(job->gather_copy_mapped + offset, gather + g->offset,
573 		       g->words * sizeof(u32));
574 		host1x_bo_munmap(g->bo, gather);
575 
576 		/* Store the location in the buffer */
577 		g->base = job->gather_copy;
578 		g->offset = offset;
579 
580 		/* Validate the job */
581 		if (validate(&fw, g))
582 			return -EINVAL;
583 
584 		offset += g->words * sizeof(u32);
585 	}
586 
587 	/* No relocs should remain at this point */
588 	if (fw.num_relocs)
589 		return -EINVAL;
590 
591 	return 0;
592 }
593 
594 int host1x_job_pin(struct host1x_job *job, struct device *dev)
595 {
596 	int err;
597 	unsigned int i, j;
598 	struct host1x *host = dev_get_drvdata(dev->parent);
599 
600 	/* pin memory */
601 	err = pin_job(host, job);
602 	if (err)
603 		goto out;
604 
605 	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
606 		err = copy_gathers(host->dev, job, dev);
607 		if (err)
608 			goto out;
609 	}
610 
611 	/* patch gathers */
612 	for (i = 0; i < job->num_gathers; i++) {
613 		struct host1x_job_gather *g = &job->gathers[i];
614 
615 		/* process each gather mem only once */
616 		if (g->handled)
617 			continue;
618 
619 		/* copy_gathers() sets gathers base if firewall is enabled */
620 		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
621 			g->base = job->gather_addr_phys[i];
622 
623 		for (j = i + 1; j < job->num_gathers; j++) {
624 			if (job->gathers[j].bo == g->bo) {
625 				job->gathers[j].handled = true;
626 				job->gathers[j].base = g->base;
627 			}
628 		}
629 
630 		err = do_relocs(job, g);
631 		if (err)
632 			break;
633 	}
634 
635 out:
636 	if (err)
637 		host1x_job_unpin(job);
638 	wmb();
639 
640 	return err;
641 }
642 EXPORT_SYMBOL(host1x_job_pin);
643 
644 void host1x_job_unpin(struct host1x_job *job)
645 {
646 	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
647 	unsigned int i;
648 
649 	for (i = 0; i < job->num_unpins; i++) {
650 		struct host1x_job_unpin_data *unpin = &job->unpins[i];
651 		struct device *dev = unpin->dev ?: host->dev;
652 		struct sg_table *sgt = unpin->sgt;
653 
654 		if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
655 		    unpin->size && host->domain) {
656 			iommu_unmap(host->domain, job->addr_phys[i],
657 				    unpin->size);
658 			free_iova(&host->iova,
659 				iova_pfn(&host->iova, job->addr_phys[i]));
660 		}
661 
662 		if (unpin->dev && sgt)
663 			dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
664 				     unpin->dir);
665 
666 		host1x_bo_unpin(dev, unpin->bo, sgt);
667 		host1x_bo_put(unpin->bo);
668 	}
669 
670 	job->num_unpins = 0;
671 
672 	if (job->gather_copy_size)
673 		dma_free_wc(host->dev, job->gather_copy_size,
674 			    job->gather_copy_mapped, job->gather_copy);
675 }
676 EXPORT_SYMBOL(host1x_job_unpin);
677 
678 /*
679  * Debug routine used to dump job entries
680  */
681 void host1x_job_dump(struct device *dev, struct host1x_job *job)
682 {
683 	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt_id);
684 	dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
685 	dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
686 	dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
687 	dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
688 	dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
689 }
690