xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem_vma.c (revision ca35ab2a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include "msm_drv.h"
8 #include "msm_gem.h"
9 #include "msm_mmu.h"
10 
11 static void
12 msm_gem_address_space_destroy(struct kref *kref)
13 {
14 	struct msm_gem_address_space *aspace = container_of(kref,
15 			struct msm_gem_address_space, kref);
16 
17 	drm_mm_takedown(&aspace->mm);
18 	if (aspace->mmu)
19 		aspace->mmu->funcs->destroy(aspace->mmu);
20 	put_pid(aspace->pid);
21 	kfree(aspace);
22 }
23 
24 
25 void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
26 {
27 	if (aspace)
28 		kref_put(&aspace->kref, msm_gem_address_space_destroy);
29 }
30 
31 struct msm_gem_address_space *
32 msm_gem_address_space_get(struct msm_gem_address_space *aspace)
33 {
34 	if (!IS_ERR_OR_NULL(aspace))
35 		kref_get(&aspace->kref);
36 
37 	return aspace;
38 }
39 
40 bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
41 {
42 	return !!vma->inuse;
43 }
44 
45 /* Actually unmap memory for the vma */
46 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
47 		struct msm_gem_vma *vma)
48 {
49 	unsigned size = vma->node.size << PAGE_SHIFT;
50 
51 	/* Print a message if we try to purge a vma in use */
52 	if (GEM_WARN_ON(msm_gem_vma_inuse(vma)))
53 		return;
54 
55 	/* Don't do anything if the memory isn't mapped */
56 	if (!vma->mapped)
57 		return;
58 
59 	if (aspace->mmu)
60 		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
61 
62 	vma->mapped = false;
63 }
64 
65 /* Remove reference counts for the mapping */
66 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
67 		struct msm_gem_vma *vma)
68 {
69 	if (!GEM_WARN_ON(!vma->iova))
70 		vma->inuse--;
71 }
72 
73 int
74 msm_gem_map_vma(struct msm_gem_address_space *aspace,
75 		struct msm_gem_vma *vma, int prot,
76 		struct sg_table *sgt, int npages)
77 {
78 	unsigned size = npages << PAGE_SHIFT;
79 	int ret = 0;
80 
81 	if (GEM_WARN_ON(!vma->iova))
82 		return -EINVAL;
83 
84 	/* Increase the usage counter */
85 	vma->inuse++;
86 
87 	if (vma->mapped)
88 		return 0;
89 
90 	vma->mapped = true;
91 
92 	if (aspace && aspace->mmu)
93 		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
94 				size, prot);
95 
96 	if (ret) {
97 		vma->mapped = false;
98 		vma->inuse--;
99 	}
100 
101 	return ret;
102 }
103 
104 /* Close an iova.  Warn if it is still in use */
105 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
106 		struct msm_gem_vma *vma)
107 {
108 	if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped))
109 		return;
110 
111 	spin_lock(&aspace->lock);
112 	if (vma->iova)
113 		drm_mm_remove_node(&vma->node);
114 	spin_unlock(&aspace->lock);
115 
116 	vma->iova = 0;
117 
118 	msm_gem_address_space_put(aspace);
119 }
120 
121 /* Initialize a new vma and allocate an iova for it */
122 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
123 		struct msm_gem_vma *vma, int npages,
124 		u64 range_start, u64 range_end)
125 {
126 	int ret;
127 
128 	if (GEM_WARN_ON(vma->iova))
129 		return -EBUSY;
130 
131 	spin_lock(&aspace->lock);
132 	ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
133 		0, range_start, range_end, 0);
134 	spin_unlock(&aspace->lock);
135 
136 	if (ret)
137 		return ret;
138 
139 	vma->iova = vma->node.start << PAGE_SHIFT;
140 	vma->mapped = false;
141 
142 	kref_get(&aspace->kref);
143 
144 	return 0;
145 }
146 
147 struct msm_gem_address_space *
148 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
149 		u64 va_start, u64 size)
150 {
151 	struct msm_gem_address_space *aspace;
152 
153 	if (IS_ERR(mmu))
154 		return ERR_CAST(mmu);
155 
156 	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
157 	if (!aspace)
158 		return ERR_PTR(-ENOMEM);
159 
160 	spin_lock_init(&aspace->lock);
161 	aspace->name = name;
162 	aspace->mmu = mmu;
163 
164 	drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
165 
166 	kref_init(&aspace->kref);
167 
168 	return aspace;
169 }
170