xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem_vma.c (revision 95d1deb0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include "msm_drv.h"
8 #include "msm_fence.h"
9 #include "msm_gem.h"
10 #include "msm_mmu.h"
11 
12 static void
13 msm_gem_address_space_destroy(struct kref *kref)
14 {
15 	struct msm_gem_address_space *aspace = container_of(kref,
16 			struct msm_gem_address_space, kref);
17 
18 	drm_mm_takedown(&aspace->mm);
19 	if (aspace->mmu)
20 		aspace->mmu->funcs->destroy(aspace->mmu);
21 	put_pid(aspace->pid);
22 	kfree(aspace);
23 }
24 
25 
26 void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
27 {
28 	if (aspace)
29 		kref_put(&aspace->kref, msm_gem_address_space_destroy);
30 }
31 
32 struct msm_gem_address_space *
33 msm_gem_address_space_get(struct msm_gem_address_space *aspace)
34 {
35 	if (!IS_ERR_OR_NULL(aspace))
36 		kref_get(&aspace->kref);
37 
38 	return aspace;
39 }
40 
41 bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
42 {
43 	if (vma->inuse > 0)
44 		return true;
45 
46 	while (vma->fence_mask) {
47 		unsigned idx = ffs(vma->fence_mask) - 1;
48 
49 		if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx]))
50 			return true;
51 
52 		vma->fence_mask &= ~BIT(idx);
53 	}
54 
55 	return false;
56 }
57 
58 /* Actually unmap memory for the vma */
59 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
60 		struct msm_gem_vma *vma)
61 {
62 	unsigned size = vma->node.size;
63 
64 	/* Print a message if we try to purge a vma in use */
65 	if (GEM_WARN_ON(msm_gem_vma_inuse(vma)))
66 		return;
67 
68 	/* Don't do anything if the memory isn't mapped */
69 	if (!vma->mapped)
70 		return;
71 
72 	if (aspace->mmu)
73 		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
74 
75 	vma->mapped = false;
76 }
77 
78 /* Remove reference counts for the mapping */
79 void msm_gem_unpin_vma(struct msm_gem_vma *vma)
80 {
81 	if (GEM_WARN_ON(!vma->inuse))
82 		return;
83 	if (!GEM_WARN_ON(!vma->iova))
84 		vma->inuse--;
85 }
86 
87 /* Replace pin reference with fence: */
88 void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
89 {
90 	vma->fctx[fctx->index] = fctx;
91 	vma->fence[fctx->index] = fctx->last_fence;
92 	vma->fence_mask |= BIT(fctx->index);
93 	msm_gem_unpin_vma(vma);
94 }
95 
96 /* Map and pin vma: */
97 int
98 msm_gem_map_vma(struct msm_gem_address_space *aspace,
99 		struct msm_gem_vma *vma, int prot,
100 		struct sg_table *sgt, int size)
101 {
102 	int ret = 0;
103 
104 	if (GEM_WARN_ON(!vma->iova))
105 		return -EINVAL;
106 
107 	/* Increase the usage counter */
108 	vma->inuse++;
109 
110 	if (vma->mapped)
111 		return 0;
112 
113 	vma->mapped = true;
114 
115 	if (aspace && aspace->mmu)
116 		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
117 				size, prot);
118 
119 	if (ret) {
120 		vma->mapped = false;
121 		vma->inuse--;
122 	}
123 
124 	return ret;
125 }
126 
127 /* Close an iova.  Warn if it is still in use */
128 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
129 		struct msm_gem_vma *vma)
130 {
131 	if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped))
132 		return;
133 
134 	spin_lock(&aspace->lock);
135 	if (vma->iova)
136 		drm_mm_remove_node(&vma->node);
137 	spin_unlock(&aspace->lock);
138 
139 	vma->iova = 0;
140 
141 	msm_gem_address_space_put(aspace);
142 }
143 
144 /* Initialize a new vma and allocate an iova for it */
145 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
146 		struct msm_gem_vma *vma, int size,
147 		u64 range_start, u64 range_end)
148 {
149 	int ret;
150 
151 	if (GEM_WARN_ON(vma->iova))
152 		return -EBUSY;
153 
154 	spin_lock(&aspace->lock);
155 	ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
156 					  size, PAGE_SIZE, 0,
157 					  range_start, range_end, 0);
158 	spin_unlock(&aspace->lock);
159 
160 	if (ret)
161 		return ret;
162 
163 	vma->iova = vma->node.start;
164 	vma->mapped = false;
165 
166 	kref_get(&aspace->kref);
167 
168 	return 0;
169 }
170 
171 struct msm_gem_address_space *
172 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
173 		u64 va_start, u64 size)
174 {
175 	struct msm_gem_address_space *aspace;
176 
177 	if (IS_ERR(mmu))
178 		return ERR_CAST(mmu);
179 
180 	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
181 	if (!aspace)
182 		return ERR_PTR(-ENOMEM);
183 
184 	spin_lock_init(&aspace->lock);
185 	aspace->name = name;
186 	aspace->mmu = mmu;
187 
188 	drm_mm_init(&aspace->mm, va_start, size);
189 
190 	kref_init(&aspace->kref);
191 
192 	return aspace;
193 }
194