xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gpummu.c (revision b830f94f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
3 
4 #include "msm_drv.h"
5 #include "msm_mmu.h"
6 #include "adreno/adreno_gpu.h"
7 #include "adreno/a2xx.xml.h"
8 
9 struct msm_gpummu {
10 	struct msm_mmu base;
11 	struct msm_gpu *gpu;
12 	dma_addr_t pt_base;
13 	uint32_t *table;
14 };
15 #define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
16 
17 #define GPUMMU_VA_START SZ_16M
18 #define GPUMMU_VA_RANGE (0xfff * SZ_64K)
19 #define GPUMMU_PAGE_SIZE SZ_4K
20 #define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
21 
22 static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
23 		int cnt)
24 {
25 	return 0;
26 }
27 
28 static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
29 		int cnt)
30 {
31 }
32 
33 static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
34 		struct sg_table *sgt, unsigned len, int prot)
35 {
36 	struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
37 	unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
38 	struct scatterlist *sg;
39 	unsigned prot_bits = 0;
40 	unsigned i, j;
41 
42 	if (prot & IOMMU_WRITE)
43 		prot_bits |= 1;
44 	if (prot & IOMMU_READ)
45 		prot_bits |= 2;
46 
47 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
48 		dma_addr_t addr = sg->dma_address;
49 		for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
50 			gpummu->table[idx] = addr | prot_bits;
51 			addr += GPUMMU_PAGE_SIZE;
52 		}
53 	}
54 
55 	/* we can improve by deferring flush for multiple map() */
56 	gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
57 		A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
58 		A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
59 	return 0;
60 }
61 
62 static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
63 {
64 	struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
65 	unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
66 	unsigned i;
67 
68 	for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
69                 gpummu->table[idx] = 0;
70 
71 	gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
72 		A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
73 		A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
74 	return 0;
75 }
76 
77 static void msm_gpummu_destroy(struct msm_mmu *mmu)
78 {
79 	struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
80 
81 	dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
82 		DMA_ATTR_FORCE_CONTIGUOUS);
83 
84 	kfree(gpummu);
85 }
86 
87 static const struct msm_mmu_funcs funcs = {
88 		.attach = msm_gpummu_attach,
89 		.detach = msm_gpummu_detach,
90 		.map = msm_gpummu_map,
91 		.unmap = msm_gpummu_unmap,
92 		.destroy = msm_gpummu_destroy,
93 };
94 
95 struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
96 {
97 	struct msm_gpummu *gpummu;
98 
99 	gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
100 	if (!gpummu)
101 		return ERR_PTR(-ENOMEM);
102 
103 	gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
104 		GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
105 	if (!gpummu->table) {
106 		kfree(gpummu);
107 		return ERR_PTR(-ENOMEM);
108 	}
109 
110 	gpummu->gpu = gpu;
111 	msm_mmu_init(&gpummu->base, dev, &funcs);
112 
113 	return &gpummu->base;
114 }
115 
116 void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
117 		dma_addr_t *tran_error)
118 {
119 	dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
120 
121 	*pt_base = base;
122 	*tran_error = base + TABLE_SIZE; /* 32-byte aligned */
123 }
124