xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem_vma.c (revision fc2f0756)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include "msm_drv.h"
8 #include "msm_fence.h"
9 #include "msm_gem.h"
10 #include "msm_mmu.h"
11 
12 static void
13 msm_gem_address_space_destroy(struct kref *kref)
14 {
15 	struct msm_gem_address_space *aspace = container_of(kref,
16 			struct msm_gem_address_space, kref);
17 
18 	drm_mm_takedown(&aspace->mm);
19 	if (aspace->mmu)
20 		aspace->mmu->funcs->destroy(aspace->mmu);
21 	put_pid(aspace->pid);
22 	kfree(aspace);
23 }
24 
25 
26 void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
27 {
28 	if (aspace)
29 		kref_put(&aspace->kref, msm_gem_address_space_destroy);
30 }
31 
32 struct msm_gem_address_space *
33 msm_gem_address_space_get(struct msm_gem_address_space *aspace)
34 {
35 	if (!IS_ERR_OR_NULL(aspace))
36 		kref_get(&aspace->kref);
37 
38 	return aspace;
39 }
40 
41 bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
42 {
43 	if (vma->inuse > 0)
44 		return true;
45 
46 	while (vma->fence_mask) {
47 		unsigned idx = ffs(vma->fence_mask) - 1;
48 
49 		if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx]))
50 			return true;
51 
52 		vma->fence_mask &= ~BIT(idx);
53 	}
54 
55 	return false;
56 }
57 
58 /* Actually unmap memory for the vma */
59 void msm_gem_vma_purge(struct msm_gem_vma *vma)
60 {
61 	struct msm_gem_address_space *aspace = vma->aspace;
62 	unsigned size = vma->node.size;
63 
64 	/* Print a message if we try to purge a vma in use */
65 	GEM_WARN_ON(msm_gem_vma_inuse(vma));
66 
67 	/* Don't do anything if the memory isn't mapped */
68 	if (!vma->mapped)
69 		return;
70 
71 	aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
72 
73 	vma->mapped = false;
74 }
75 
76 /* Remove reference counts for the mapping */
77 void msm_gem_vma_unpin(struct msm_gem_vma *vma)
78 {
79 	if (GEM_WARN_ON(!vma->inuse))
80 		return;
81 	if (!GEM_WARN_ON(!vma->iova))
82 		vma->inuse--;
83 }
84 
85 /* Replace pin reference with fence: */
86 void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
87 {
88 	vma->fctx[fctx->index] = fctx;
89 	vma->fence[fctx->index] = fctx->last_fence;
90 	vma->fence_mask |= BIT(fctx->index);
91 	msm_gem_vma_unpin(vma);
92 }
93 
94 /* Map and pin vma: */
95 int
96 msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
97 		struct sg_table *sgt, int size)
98 {
99 	struct msm_gem_address_space *aspace = vma->aspace;
100 	int ret;
101 
102 	if (GEM_WARN_ON(!vma->iova))
103 		return -EINVAL;
104 
105 	/* Increase the usage counter */
106 	vma->inuse++;
107 
108 	if (vma->mapped)
109 		return 0;
110 
111 	vma->mapped = true;
112 
113 	if (!aspace)
114 		return 0;
115 
116 	ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);
117 
118 	if (ret) {
119 		vma->mapped = false;
120 		vma->inuse--;
121 	}
122 
123 	return ret;
124 }
125 
126 /* Close an iova.  Warn if it is still in use */
127 void msm_gem_vma_close(struct msm_gem_vma *vma)
128 {
129 	struct msm_gem_address_space *aspace = vma->aspace;
130 
131 	GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
132 
133 	spin_lock(&aspace->lock);
134 	if (vma->iova)
135 		drm_mm_remove_node(&vma->node);
136 	spin_unlock(&aspace->lock);
137 
138 	vma->iova = 0;
139 
140 	msm_gem_address_space_put(aspace);
141 }
142 
143 struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
144 {
145 	struct msm_gem_vma *vma;
146 
147 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
148 	if (!vma)
149 		return NULL;
150 
151 	vma->aspace = aspace;
152 
153 	return vma;
154 }
155 
156 /* Initialize a new vma and allocate an iova for it */
157 int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
158 		u64 range_start, u64 range_end)
159 {
160 	struct msm_gem_address_space *aspace = vma->aspace;
161 	int ret;
162 
163 	if (GEM_WARN_ON(!aspace))
164 		return -EINVAL;
165 
166 	if (GEM_WARN_ON(vma->iova))
167 		return -EBUSY;
168 
169 	spin_lock(&aspace->lock);
170 	ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
171 					  size, PAGE_SIZE, 0,
172 					  range_start, range_end, 0);
173 	spin_unlock(&aspace->lock);
174 
175 	if (ret)
176 		return ret;
177 
178 	vma->iova = vma->node.start;
179 	vma->mapped = false;
180 
181 	kref_get(&aspace->kref);
182 
183 	return 0;
184 }
185 
186 struct msm_gem_address_space *
187 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
188 		u64 va_start, u64 size)
189 {
190 	struct msm_gem_address_space *aspace;
191 
192 	if (IS_ERR(mmu))
193 		return ERR_CAST(mmu);
194 
195 	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
196 	if (!aspace)
197 		return ERR_PTR(-ENOMEM);
198 
199 	spin_lock_init(&aspace->lock);
200 	aspace->name = name;
201 	aspace->mmu = mmu;
202 	aspace->va_start = va_start;
203 	aspace->va_size  = size;
204 
205 	drm_mm_init(&aspace->mm, va_start, size);
206 
207 	kref_init(&aspace->kref);
208 
209 	return aspace;
210 }
211