1 /*
2  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
3  * Copyright (c) 2012 David Airlie <airlied@linux.ie>
4  * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <drm/drmP.h>
26 #include <drm/drm_mm.h>
27 #include <drm/drm_vma_manager.h>
28 #include <linux/fs.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/types.h>
35 
36 /**
37  * DOC: vma offset manager
38  *
39  * The vma-manager is responsible to map arbitrary driver-dependent memory
40  * regions into the linear user address-space. It provides offsets to the
41  * caller which can then be used on the address_space of the drm-device. It
42  * takes care to not overlap regions, size them appropriately and to not
43  * confuse mm-core by inconsistent fake vm_pgoff fields.
44  * Drivers shouldn't use this for object placement in VMEM. This manager should
45  * only be used to manage mappings into linear user-space VMs.
46  *
47  * We use drm_mm as backend to manage object allocations. But it is highly
48  * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
49  * speed up offset lookups.
50  *
51  * You must not use multiple offset managers on a single address_space.
52  * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
53  * no longer be linear. Please use VM_NONLINEAR in that case and implement your
54  * own offset managers.
55  *
56  * This offset manager works on page-based addresses. That is, every argument
57  * and return code (with the exception of drm_vma_node_offset_addr()) is given
58  * in number of pages, not number of bytes. That means, object sizes and offsets
59  * must always be page-aligned (as usual).
60  * If you want to get a valid byte-based user-space address for a given offset,
61  * please see drm_vma_node_offset_addr().
62  *
63  * Additionally to offset management, the vma offset manager also handles access
64  * management. For every open-file context that is allowed to access a given
65  * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
66  * open-file with the offset of the node will fail with -EACCES. To revoke
67  * access again, use drm_vma_node_revoke(). However, the caller is responsible
68  * for destroying already existing mappings, if required.
69  */
70 
71 /**
72  * drm_vma_offset_manager_init - Initialize new offset-manager
73  * @mgr: Manager object
74  * @page_offset: Offset of available memory area (page-based)
75  * @size: Size of available address space range (page-based)
76  *
77  * Initialize a new offset-manager. The offset and area size available for the
78  * manager are given as @page_offset and @size. Both are interpreted as
79  * page-numbers, not bytes.
80  *
81  * Adding/removing nodes from the manager is locked internally and protected
82  * against concurrent access. However, node allocation and destruction is left
83  * for the caller. While calling into the vma-manager, a given node must
84  * always be guaranteed to be referenced.
85  */
86 void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
87 				 unsigned long page_offset, unsigned long size)
88 {
89 	rwlock_init(&mgr->vm_lock);
90 	mgr->vm_addr_space_rb = RB_ROOT;
91 	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
92 }
93 EXPORT_SYMBOL(drm_vma_offset_manager_init);
94 
95 /**
96  * drm_vma_offset_manager_destroy() - Destroy offset manager
97  * @mgr: Manager object
98  *
99  * Destroy an object manager which was previously created via
100  * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
101  * before destroying the manager. Otherwise, drm_mm will refuse to free the
102  * requested resources.
103  *
104  * The manager must not be accessed after this function is called.
105  */
106 void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
107 {
108 	/* take the lock to protect against buggy drivers */
109 	write_lock(&mgr->vm_lock);
110 	drm_mm_takedown(&mgr->vm_addr_space_mm);
111 	write_unlock(&mgr->vm_lock);
112 }
113 EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
114 
115 /**
116  * drm_vma_offset_lookup() - Find node in offset space
117  * @mgr: Manager object
118  * @start: Start address for object (page-based)
119  * @pages: Size of object (page-based)
120  *
121  * Find a node given a start address and object size. This returns the _best_
122  * match for the given node. That is, @start may point somewhere into a valid
123  * region and the given node will be returned, as long as the node spans the
124  * whole requested area (given the size in number of pages as @pages).
125  *
126  * RETURNS:
127  * Returns NULL if no suitable node can be found. Otherwise, the best match
128  * is returned. It's the caller's responsibility to make sure the node doesn't
129  * get destroyed before the caller can access it.
130  */
131 struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
132 						  unsigned long start,
133 						  unsigned long pages)
134 {
135 	struct drm_vma_offset_node *node;
136 
137 	read_lock(&mgr->vm_lock);
138 	node = drm_vma_offset_lookup_locked(mgr, start, pages);
139 	read_unlock(&mgr->vm_lock);
140 
141 	return node;
142 }
143 EXPORT_SYMBOL(drm_vma_offset_lookup);
144 
145 /**
146  * drm_vma_offset_lookup_locked() - Find node in offset space
147  * @mgr: Manager object
148  * @start: Start address for object (page-based)
149  * @pages: Size of object (page-based)
150  *
151  * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
152  * manually. See drm_vma_offset_lock_lookup() for an example.
153  *
154  * RETURNS:
155  * Returns NULL if no suitable node can be found. Otherwise, the best match
156  * is returned.
157  */
158 struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
159 							 unsigned long start,
160 							 unsigned long pages)
161 {
162 	struct drm_vma_offset_node *node, *best;
163 	struct rb_node *iter;
164 	unsigned long offset;
165 
166 	iter = mgr->vm_addr_space_rb.rb_node;
167 	best = NULL;
168 
169 	while (likely(iter)) {
170 		node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
171 		offset = node->vm_node.start;
172 		if (start >= offset) {
173 			iter = iter->rb_right;
174 			best = node;
175 			if (start == offset)
176 				break;
177 		} else {
178 			iter = iter->rb_left;
179 		}
180 	}
181 
182 	/* verify that the node spans the requested area */
183 	if (best) {
184 		offset = best->vm_node.start + best->vm_node.size;
185 		if (offset < start + pages)
186 			best = NULL;
187 	}
188 
189 	return best;
190 }
191 EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
192 
193 /* internal helper to link @node into the rb-tree */
194 static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
195 				   struct drm_vma_offset_node *node)
196 {
197 	struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
198 	struct rb_node *parent = NULL;
199 	struct drm_vma_offset_node *iter_node;
200 
201 	while (likely(*iter)) {
202 		parent = *iter;
203 		iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
204 
205 		if (node->vm_node.start < iter_node->vm_node.start)
206 			iter = &(*iter)->rb_left;
207 		else if (node->vm_node.start > iter_node->vm_node.start)
208 			iter = &(*iter)->rb_right;
209 		else
210 			BUG();
211 	}
212 
213 	rb_link_node(&node->vm_rb, parent, iter);
214 	rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
215 }
216 
217 /**
218  * drm_vma_offset_add() - Add offset node to manager
219  * @mgr: Manager object
220  * @node: Node to be added
221  * @pages: Allocation size visible to user-space (in number of pages)
222  *
223  * Add a node to the offset-manager. If the node was already added, this does
224  * nothing and return 0. @pages is the size of the object given in number of
225  * pages.
226  * After this call succeeds, you can access the offset of the node until it
227  * is removed again.
228  *
229  * If this call fails, it is safe to retry the operation or call
230  * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
231  * case.
232  *
233  * @pages is not required to be the same size as the underlying memory object
234  * that you want to map. It only limits the size that user-space can map into
235  * their address space.
236  *
237  * RETURNS:
238  * 0 on success, negative error code on failure.
239  */
240 int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
241 		       struct drm_vma_offset_node *node, unsigned long pages)
242 {
243 	int ret;
244 
245 	write_lock(&mgr->vm_lock);
246 
247 	if (drm_mm_node_allocated(&node->vm_node)) {
248 		ret = 0;
249 		goto out_unlock;
250 	}
251 
252 	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
253 				 pages, 0, DRM_MM_SEARCH_DEFAULT);
254 	if (ret)
255 		goto out_unlock;
256 
257 	_drm_vma_offset_add_rb(mgr, node);
258 
259 out_unlock:
260 	write_unlock(&mgr->vm_lock);
261 	return ret;
262 }
263 EXPORT_SYMBOL(drm_vma_offset_add);
264 
265 /**
266  * drm_vma_offset_remove() - Remove offset node from manager
267  * @mgr: Manager object
268  * @node: Node to be removed
269  *
270  * Remove a node from the offset manager. If the node wasn't added before, this
271  * does nothing. After this call returns, the offset and size will be 0 until a
272  * new offset is allocated via drm_vma_offset_add() again. Helper functions like
273  * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
274  * offset is allocated.
275  */
276 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
277 			   struct drm_vma_offset_node *node)
278 {
279 	write_lock(&mgr->vm_lock);
280 
281 	if (drm_mm_node_allocated(&node->vm_node)) {
282 		rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
283 		drm_mm_remove_node(&node->vm_node);
284 		memset(&node->vm_node, 0, sizeof(node->vm_node));
285 	}
286 
287 	write_unlock(&mgr->vm_lock);
288 }
289 EXPORT_SYMBOL(drm_vma_offset_remove);
290 
291 /**
292  * drm_vma_node_allow - Add open-file to list of allowed users
293  * @node: Node to modify
294  * @filp: Open file to add
295  *
296  * Add @filp to the list of allowed open-files for this node. If @filp is
297  * already on this list, the ref-count is incremented.
298  *
299  * The list of allowed-users is preserved across drm_vma_offset_add() and
300  * drm_vma_offset_remove() calls. You may even call it if the node is currently
301  * not added to any offset-manager.
302  *
303  * You must remove all open-files the same number of times as you added them
304  * before destroying the node. Otherwise, you will leak memory.
305  *
306  * This is locked against concurrent access internally.
307  *
308  * RETURNS:
309  * 0 on success, negative error code on internal failure (out-of-mem)
310  */
311 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
312 {
313 	struct rb_node **iter;
314 	struct rb_node *parent = NULL;
315 	struct drm_vma_offset_file *new, *entry;
316 	int ret = 0;
317 
318 	/* Preallocate entry to avoid atomic allocations below. It is quite
319 	 * unlikely that an open-file is added twice to a single node so we
320 	 * don't optimize for this case. OOM is checked below only if the entry
321 	 * is actually used. */
322 	new = kmalloc(sizeof(*entry), GFP_KERNEL);
323 
324 	write_lock(&node->vm_lock);
325 
326 	iter = &node->vm_files.rb_node;
327 
328 	while (likely(*iter)) {
329 		parent = *iter;
330 		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
331 
332 		if (filp == entry->vm_filp) {
333 			entry->vm_count++;
334 			goto unlock;
335 		} else if (filp > entry->vm_filp) {
336 			iter = &(*iter)->rb_right;
337 		} else {
338 			iter = &(*iter)->rb_left;
339 		}
340 	}
341 
342 	if (!new) {
343 		ret = -ENOMEM;
344 		goto unlock;
345 	}
346 
347 	new->vm_filp = filp;
348 	new->vm_count = 1;
349 	rb_link_node(&new->vm_rb, parent, iter);
350 	rb_insert_color(&new->vm_rb, &node->vm_files);
351 	new = NULL;
352 
353 unlock:
354 	write_unlock(&node->vm_lock);
355 	kfree(new);
356 	return ret;
357 }
358 EXPORT_SYMBOL(drm_vma_node_allow);
359 
360 /**
361  * drm_vma_node_revoke - Remove open-file from list of allowed users
362  * @node: Node to modify
363  * @filp: Open file to remove
364  *
365  * Decrement the ref-count of @filp in the list of allowed open-files on @node.
366  * If the ref-count drops to zero, remove @filp from the list. You must call
367  * this once for every drm_vma_node_allow() on @filp.
368  *
369  * This is locked against concurrent access internally.
370  *
371  * If @filp is not on the list, nothing is done.
372  */
373 void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
374 {
375 	struct drm_vma_offset_file *entry;
376 	struct rb_node *iter;
377 
378 	write_lock(&node->vm_lock);
379 
380 	iter = node->vm_files.rb_node;
381 	while (likely(iter)) {
382 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
383 		if (filp == entry->vm_filp) {
384 			if (!--entry->vm_count) {
385 				rb_erase(&entry->vm_rb, &node->vm_files);
386 				kfree(entry);
387 			}
388 			break;
389 		} else if (filp > entry->vm_filp) {
390 			iter = iter->rb_right;
391 		} else {
392 			iter = iter->rb_left;
393 		}
394 	}
395 
396 	write_unlock(&node->vm_lock);
397 }
398 EXPORT_SYMBOL(drm_vma_node_revoke);
399 
400 /**
401  * drm_vma_node_is_allowed - Check whether an open-file is granted access
402  * @node: Node to check
403  * @filp: Open-file to check for
404  *
405  * Search the list in @node whether @filp is currently on the list of allowed
406  * open-files (see drm_vma_node_allow()).
407  *
408  * This is locked against concurrent access internally.
409  *
410  * RETURNS:
411  * true iff @filp is on the list
412  */
413 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
414 			     struct file *filp)
415 {
416 	struct drm_vma_offset_file *entry;
417 	struct rb_node *iter;
418 
419 	read_lock(&node->vm_lock);
420 
421 	iter = node->vm_files.rb_node;
422 	while (likely(iter)) {
423 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
424 		if (filp == entry->vm_filp)
425 			break;
426 		else if (filp > entry->vm_filp)
427 			iter = iter->rb_right;
428 		else
429 			iter = iter->rb_left;
430 	}
431 
432 	read_unlock(&node->vm_lock);
433 
434 	return iter;
435 }
436 EXPORT_SYMBOL(drm_vma_node_is_allowed);
437