13e2b88cbSDirk Hohndel (VMware) // SPDX-License-Identifier: GPL-2.0 OR MIT
2fe3078faSDavid Herrmann /*
3fe3078faSDavid Herrmann  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4fe3078faSDavid Herrmann  * Copyright (c) 2012 David Airlie <airlied@linux.ie>
5fe3078faSDavid Herrmann  * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6fe3078faSDavid Herrmann  *
7fe3078faSDavid Herrmann  * Permission is hereby granted, free of charge, to any person obtaining a
8fe3078faSDavid Herrmann  * copy of this software and associated documentation files (the "Software"),
9fe3078faSDavid Herrmann  * to deal in the Software without restriction, including without limitation
10fe3078faSDavid Herrmann  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11fe3078faSDavid Herrmann  * and/or sell copies of the Software, and to permit persons to whom the
12fe3078faSDavid Herrmann  * Software is furnished to do so, subject to the following conditions:
13fe3078faSDavid Herrmann  *
14fe3078faSDavid Herrmann  * The above copyright notice and this permission notice shall be included in
15fe3078faSDavid Herrmann  * all copies or substantial portions of the Software.
16fe3078faSDavid Herrmann  *
17fe3078faSDavid Herrmann  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18fe3078faSDavid Herrmann  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19fe3078faSDavid Herrmann  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20fe3078faSDavid Herrmann  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21fe3078faSDavid Herrmann  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22fe3078faSDavid Herrmann  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23fe3078faSDavid Herrmann  * OTHER DEALINGS IN THE SOFTWARE.
24fe3078faSDavid Herrmann  */
25fe3078faSDavid Herrmann 
26fe3078faSDavid Herrmann #include <linux/mm.h>
27fe3078faSDavid Herrmann #include <linux/module.h>
28fe3078faSDavid Herrmann #include <linux/rbtree.h>
29fe3078faSDavid Herrmann #include <linux/slab.h>
30fe3078faSDavid Herrmann #include <linux/spinlock.h>
31fe3078faSDavid Herrmann #include <linux/types.h>
32fe3078faSDavid Herrmann 
330500c04eSSam Ravnborg #include <drm/drm_mm.h>
340500c04eSSam Ravnborg #include <drm/drm_vma_manager.h>
350500c04eSSam Ravnborg 
36fe3078faSDavid Herrmann /**
37fe3078faSDavid Herrmann  * DOC: vma offset manager
38fe3078faSDavid Herrmann  *
39fe3078faSDavid Herrmann  * The vma-manager is responsible to map arbitrary driver-dependent memory
40fe3078faSDavid Herrmann  * regions into the linear user address-space. It provides offsets to the
41fe3078faSDavid Herrmann  * caller which can then be used on the address_space of the drm-device. It
42fe3078faSDavid Herrmann  * takes care to not overlap regions, size them appropriately and to not
43fe3078faSDavid Herrmann  * confuse mm-core by inconsistent fake vm_pgoff fields.
44fe3078faSDavid Herrmann  * Drivers shouldn't use this for object placement in VMEM. This manager should
45fe3078faSDavid Herrmann  * only be used to manage mappings into linear user-space VMs.
46fe3078faSDavid Herrmann  *
47fe3078faSDavid Herrmann  * We use drm_mm as backend to manage object allocations. But it is highly
48fe3078faSDavid Herrmann  * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
49fe3078faSDavid Herrmann  * speed up offset lookups.
50fe3078faSDavid Herrmann  *
51fe3078faSDavid Herrmann  * You must not use multiple offset managers on a single address_space.
52fe3078faSDavid Herrmann  * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
530661a336SKirill A. Shutemov  * no longer be linear.
54fe3078faSDavid Herrmann  *
55fe3078faSDavid Herrmann  * This offset manager works on page-based addresses. That is, every argument
56fe3078faSDavid Herrmann  * and return code (with the exception of drm_vma_node_offset_addr()) is given
57fe3078faSDavid Herrmann  * in number of pages, not number of bytes. That means, object sizes and offsets
58fe3078faSDavid Herrmann  * must always be page-aligned (as usual).
59fe3078faSDavid Herrmann  * If you want to get a valid byte-based user-space address for a given offset,
60fe3078faSDavid Herrmann  * please see drm_vma_node_offset_addr().
6188d7ebe5SDavid Herrmann  *
6288d7ebe5SDavid Herrmann  * Additionally to offset management, the vma offset manager also handles access
6388d7ebe5SDavid Herrmann  * management. For every open-file context that is allowed to access a given
6488d7ebe5SDavid Herrmann  * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
6588d7ebe5SDavid Herrmann  * open-file with the offset of the node will fail with -EACCES. To revoke
6688d7ebe5SDavid Herrmann  * access again, use drm_vma_node_revoke(). However, the caller is responsible
6788d7ebe5SDavid Herrmann  * for destroying already existing mappings, if required.
68fe3078faSDavid Herrmann  */
69fe3078faSDavid Herrmann 
70fe3078faSDavid Herrmann /**
71fe3078faSDavid Herrmann  * drm_vma_offset_manager_init - Initialize new offset-manager
72fe3078faSDavid Herrmann  * @mgr: Manager object
73fe3078faSDavid Herrmann  * @page_offset: Offset of available memory area (page-based)
74fe3078faSDavid Herrmann  * @size: Size of available address space range (page-based)
75fe3078faSDavid Herrmann  *
76fe3078faSDavid Herrmann  * Initialize a new offset-manager. The offset and area size available for the
77fe3078faSDavid Herrmann  * manager are given as @page_offset and @size. Both are interpreted as
78fe3078faSDavid Herrmann  * page-numbers, not bytes.
79fe3078faSDavid Herrmann  *
80fe3078faSDavid Herrmann  * Adding/removing nodes from the manager is locked internally and protected
81fe3078faSDavid Herrmann  * against concurrent access. However, node allocation and destruction is left
82fe3078faSDavid Herrmann  * for the caller. While calling into the vma-manager, a given node must
83fe3078faSDavid Herrmann  * always be guaranteed to be referenced.
84fe3078faSDavid Herrmann  */
drm_vma_offset_manager_init(struct drm_vma_offset_manager * mgr,unsigned long page_offset,unsigned long size)85fe3078faSDavid Herrmann void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
86fe3078faSDavid Herrmann 				 unsigned long page_offset, unsigned long size)
87fe3078faSDavid Herrmann {
88fe3078faSDavid Herrmann 	rwlock_init(&mgr->vm_lock);
89fe3078faSDavid Herrmann 	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
90fe3078faSDavid Herrmann }
91fe3078faSDavid Herrmann EXPORT_SYMBOL(drm_vma_offset_manager_init);
92fe3078faSDavid Herrmann 
93fe3078faSDavid Herrmann /**
94fe3078faSDavid Herrmann  * drm_vma_offset_manager_destroy() - Destroy offset manager
95fe3078faSDavid Herrmann  * @mgr: Manager object
96fe3078faSDavid Herrmann  *
97fe3078faSDavid Herrmann  * Destroy an object manager which was previously created via
98fe3078faSDavid Herrmann  * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
99fe3078faSDavid Herrmann  * before destroying the manager. Otherwise, drm_mm will refuse to free the
100fe3078faSDavid Herrmann  * requested resources.
101fe3078faSDavid Herrmann  *
102fe3078faSDavid Herrmann  * The manager must not be accessed after this function is called.
103fe3078faSDavid Herrmann  */
drm_vma_offset_manager_destroy(struct drm_vma_offset_manager * mgr)104fe3078faSDavid Herrmann void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
105fe3078faSDavid Herrmann {
106fe3078faSDavid Herrmann 	drm_mm_takedown(&mgr->vm_addr_space_mm);
107fe3078faSDavid Herrmann }
108fe3078faSDavid Herrmann EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
109fe3078faSDavid Herrmann 
110fe3078faSDavid Herrmann /**
1112225cfe4SDaniel Vetter  * drm_vma_offset_lookup_locked() - Find node in offset space
112fe3078faSDavid Herrmann  * @mgr: Manager object
113fe3078faSDavid Herrmann  * @start: Start address for object (page-based)
114fe3078faSDavid Herrmann  * @pages: Size of object (page-based)
115fe3078faSDavid Herrmann  *
116fe3078faSDavid Herrmann  * Find a node given a start address and object size. This returns the _best_
117fe3078faSDavid Herrmann  * match for the given node. That is, @start may point somewhere into a valid
118fe3078faSDavid Herrmann  * region and the given node will be returned, as long as the node spans the
119fe3078faSDavid Herrmann  * whole requested area (given the size in number of pages as @pages).
120fe3078faSDavid Herrmann  *
1212225cfe4SDaniel Vetter  * Note that before lookup the vma offset manager lookup lock must be acquired
1222225cfe4SDaniel Vetter  * with drm_vma_offset_lock_lookup(). See there for an example. This can then be
1232225cfe4SDaniel Vetter  * used to implement weakly referenced lookups using kref_get_unless_zero().
1242225cfe4SDaniel Vetter  *
1252225cfe4SDaniel Vetter  * Example:
126da5335b8SDaniel Vetter  *
127da5335b8SDaniel Vetter  * ::
128da5335b8SDaniel Vetter  *
1292225cfe4SDaniel Vetter  *     drm_vma_offset_lock_lookup(mgr);
1302225cfe4SDaniel Vetter  *     node = drm_vma_offset_lookup_locked(mgr);
1312225cfe4SDaniel Vetter  *     if (node)
1322225cfe4SDaniel Vetter  *         kref_get_unless_zero(container_of(node, sth, entr));
1332225cfe4SDaniel Vetter  *     drm_vma_offset_unlock_lookup(mgr);
1342225cfe4SDaniel Vetter  *
135fe3078faSDavid Herrmann  * RETURNS:
136fe3078faSDavid Herrmann  * Returns NULL if no suitable node can be found. Otherwise, the best match
137fe3078faSDavid Herrmann  * is returned. It's the caller's responsibility to make sure the node doesn't
138fe3078faSDavid Herrmann  * get destroyed before the caller can access it.
139fe3078faSDavid Herrmann  */
drm_vma_offset_lookup_locked(struct drm_vma_offset_manager * mgr,unsigned long start,unsigned long pages)140fe3078faSDavid Herrmann struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
141fe3078faSDavid Herrmann 							 unsigned long start,
142fe3078faSDavid Herrmann 							 unsigned long pages)
143fe3078faSDavid Herrmann {
144db2395ecSChris Wilson 	struct drm_mm_node *node, *best;
145fe3078faSDavid Herrmann 	struct rb_node *iter;
146fe3078faSDavid Herrmann 	unsigned long offset;
147fe3078faSDavid Herrmann 
148f808c13fSDavidlohr Bueso 	iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
149fe3078faSDavid Herrmann 	best = NULL;
150fe3078faSDavid Herrmann 
151fe3078faSDavid Herrmann 	while (likely(iter)) {
152db2395ecSChris Wilson 		node = rb_entry(iter, struct drm_mm_node, rb);
153db2395ecSChris Wilson 		offset = node->start;
154fe3078faSDavid Herrmann 		if (start >= offset) {
155fe3078faSDavid Herrmann 			iter = iter->rb_right;
156fe3078faSDavid Herrmann 			best = node;
157fe3078faSDavid Herrmann 			if (start == offset)
158fe3078faSDavid Herrmann 				break;
159fe3078faSDavid Herrmann 		} else {
160fe3078faSDavid Herrmann 			iter = iter->rb_left;
161fe3078faSDavid Herrmann 		}
162fe3078faSDavid Herrmann 	}
163fe3078faSDavid Herrmann 
164fe3078faSDavid Herrmann 	/* verify that the node spans the requested area */
165fe3078faSDavid Herrmann 	if (best) {
166db2395ecSChris Wilson 		offset = best->start + best->size;
167fe3078faSDavid Herrmann 		if (offset < start + pages)
168fe3078faSDavid Herrmann 			best = NULL;
169fe3078faSDavid Herrmann 	}
170fe3078faSDavid Herrmann 
171db2395ecSChris Wilson 	if (!best)
172db2395ecSChris Wilson 		return NULL;
173db2395ecSChris Wilson 
174db2395ecSChris Wilson 	return container_of(best, struct drm_vma_offset_node, vm_node);
175fe3078faSDavid Herrmann }
176fe3078faSDavid Herrmann EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
177fe3078faSDavid Herrmann 
178fe3078faSDavid Herrmann /**
179fe3078faSDavid Herrmann  * drm_vma_offset_add() - Add offset node to manager
180fe3078faSDavid Herrmann  * @mgr: Manager object
181fe3078faSDavid Herrmann  * @node: Node to be added
182fe3078faSDavid Herrmann  * @pages: Allocation size visible to user-space (in number of pages)
183fe3078faSDavid Herrmann  *
184fe3078faSDavid Herrmann  * Add a node to the offset-manager. If the node was already added, this does
185fe3078faSDavid Herrmann  * nothing and return 0. @pages is the size of the object given in number of
186fe3078faSDavid Herrmann  * pages.
187fe3078faSDavid Herrmann  * After this call succeeds, you can access the offset of the node until it
188fe3078faSDavid Herrmann  * is removed again.
189fe3078faSDavid Herrmann  *
190fe3078faSDavid Herrmann  * If this call fails, it is safe to retry the operation or call
191fe3078faSDavid Herrmann  * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
192fe3078faSDavid Herrmann  * case.
193fe3078faSDavid Herrmann  *
194fe3078faSDavid Herrmann  * @pages is not required to be the same size as the underlying memory object
195fe3078faSDavid Herrmann  * that you want to map. It only limits the size that user-space can map into
196fe3078faSDavid Herrmann  * their address space.
197fe3078faSDavid Herrmann  *
198fe3078faSDavid Herrmann  * RETURNS:
199fe3078faSDavid Herrmann  * 0 on success, negative error code on failure.
200fe3078faSDavid Herrmann  */
drm_vma_offset_add(struct drm_vma_offset_manager * mgr,struct drm_vma_offset_node * node,unsigned long pages)201fe3078faSDavid Herrmann int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
202fe3078faSDavid Herrmann 		       struct drm_vma_offset_node *node, unsigned long pages)
203fe3078faSDavid Herrmann {
2047fb50b95SLiviu Dudau 	int ret = 0;
205fe3078faSDavid Herrmann 
206fe3078faSDavid Herrmann 	write_lock(&mgr->vm_lock);
207fe3078faSDavid Herrmann 
2087fb50b95SLiviu Dudau 	if (!drm_mm_node_allocated(&node->vm_node))
2097fb50b95SLiviu Dudau 		ret = drm_mm_insert_node(&mgr->vm_addr_space_mm,
2107fb50b95SLiviu Dudau 					 &node->vm_node, pages);
211fe3078faSDavid Herrmann 
212fe3078faSDavid Herrmann 	write_unlock(&mgr->vm_lock);
2137fb50b95SLiviu Dudau 
214fe3078faSDavid Herrmann 	return ret;
215fe3078faSDavid Herrmann }
216fe3078faSDavid Herrmann EXPORT_SYMBOL(drm_vma_offset_add);
217fe3078faSDavid Herrmann 
218fe3078faSDavid Herrmann /**
219fe3078faSDavid Herrmann  * drm_vma_offset_remove() - Remove offset node from manager
220fe3078faSDavid Herrmann  * @mgr: Manager object
221fe3078faSDavid Herrmann  * @node: Node to be removed
222fe3078faSDavid Herrmann  *
223fe3078faSDavid Herrmann  * Remove a node from the offset manager. If the node wasn't added before, this
224fe3078faSDavid Herrmann  * does nothing. After this call returns, the offset and size will be 0 until a
225fe3078faSDavid Herrmann  * new offset is allocated via drm_vma_offset_add() again. Helper functions like
226fe3078faSDavid Herrmann  * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
227fe3078faSDavid Herrmann  * offset is allocated.
228fe3078faSDavid Herrmann  */
drm_vma_offset_remove(struct drm_vma_offset_manager * mgr,struct drm_vma_offset_node * node)229fe3078faSDavid Herrmann void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
230fe3078faSDavid Herrmann 			   struct drm_vma_offset_node *node)
231fe3078faSDavid Herrmann {
232fe3078faSDavid Herrmann 	write_lock(&mgr->vm_lock);
233fe3078faSDavid Herrmann 
234fe3078faSDavid Herrmann 	if (drm_mm_node_allocated(&node->vm_node)) {
235fe3078faSDavid Herrmann 		drm_mm_remove_node(&node->vm_node);
236fe3078faSDavid Herrmann 		memset(&node->vm_node, 0, sizeof(node->vm_node));
237fe3078faSDavid Herrmann 	}
238fe3078faSDavid Herrmann 
239fe3078faSDavid Herrmann 	write_unlock(&mgr->vm_lock);
240fe3078faSDavid Herrmann }
241fe3078faSDavid Herrmann EXPORT_SYMBOL(drm_vma_offset_remove);
24288d7ebe5SDavid Herrmann 
vma_node_allow(struct drm_vma_offset_node * node,struct drm_file * tag,bool ref_counted)243*899d3a3cSNirmoy Das static int vma_node_allow(struct drm_vma_offset_node *node,
244*899d3a3cSNirmoy Das 			  struct drm_file *tag, bool ref_counted)
24588d7ebe5SDavid Herrmann {
24688d7ebe5SDavid Herrmann 	struct rb_node **iter;
24788d7ebe5SDavid Herrmann 	struct rb_node *parent = NULL;
24888d7ebe5SDavid Herrmann 	struct drm_vma_offset_file *new, *entry;
24988d7ebe5SDavid Herrmann 	int ret = 0;
25088d7ebe5SDavid Herrmann 
25188d7ebe5SDavid Herrmann 	/* Preallocate entry to avoid atomic allocations below. It is quite
25288d7ebe5SDavid Herrmann 	 * unlikely that an open-file is added twice to a single node so we
25388d7ebe5SDavid Herrmann 	 * don't optimize for this case. OOM is checked below only if the entry
25488d7ebe5SDavid Herrmann 	 * is actually used. */
25588d7ebe5SDavid Herrmann 	new = kmalloc(sizeof(*entry), GFP_KERNEL);
25688d7ebe5SDavid Herrmann 
25788d7ebe5SDavid Herrmann 	write_lock(&node->vm_lock);
25888d7ebe5SDavid Herrmann 
25988d7ebe5SDavid Herrmann 	iter = &node->vm_files.rb_node;
26088d7ebe5SDavid Herrmann 
26188d7ebe5SDavid Herrmann 	while (likely(*iter)) {
26288d7ebe5SDavid Herrmann 		parent = *iter;
26388d7ebe5SDavid Herrmann 		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
26488d7ebe5SDavid Herrmann 
265d9a1f0b4SDavid Herrmann 		if (tag == entry->vm_tag) {
266*899d3a3cSNirmoy Das 			if (ref_counted)
26788d7ebe5SDavid Herrmann 				entry->vm_count++;
26888d7ebe5SDavid Herrmann 			goto unlock;
269d9a1f0b4SDavid Herrmann 		} else if (tag > entry->vm_tag) {
27088d7ebe5SDavid Herrmann 			iter = &(*iter)->rb_right;
27188d7ebe5SDavid Herrmann 		} else {
27288d7ebe5SDavid Herrmann 			iter = &(*iter)->rb_left;
27388d7ebe5SDavid Herrmann 		}
27488d7ebe5SDavid Herrmann 	}
27588d7ebe5SDavid Herrmann 
27688d7ebe5SDavid Herrmann 	if (!new) {
27788d7ebe5SDavid Herrmann 		ret = -ENOMEM;
27888d7ebe5SDavid Herrmann 		goto unlock;
27988d7ebe5SDavid Herrmann 	}
28088d7ebe5SDavid Herrmann 
281d9a1f0b4SDavid Herrmann 	new->vm_tag = tag;
28288d7ebe5SDavid Herrmann 	new->vm_count = 1;
28388d7ebe5SDavid Herrmann 	rb_link_node(&new->vm_rb, parent, iter);
28488d7ebe5SDavid Herrmann 	rb_insert_color(&new->vm_rb, &node->vm_files);
28588d7ebe5SDavid Herrmann 	new = NULL;
28688d7ebe5SDavid Herrmann 
28788d7ebe5SDavid Herrmann unlock:
28888d7ebe5SDavid Herrmann 	write_unlock(&node->vm_lock);
28988d7ebe5SDavid Herrmann 	kfree(new);
29088d7ebe5SDavid Herrmann 	return ret;
29188d7ebe5SDavid Herrmann }
292*899d3a3cSNirmoy Das 
293*899d3a3cSNirmoy Das /**
294*899d3a3cSNirmoy Das  * drm_vma_node_allow - Add open-file to list of allowed users
295*899d3a3cSNirmoy Das  * @node: Node to modify
296*899d3a3cSNirmoy Das  * @tag: Tag of file to remove
297*899d3a3cSNirmoy Das  *
298*899d3a3cSNirmoy Das  * Add @tag to the list of allowed open-files for this node. If @tag is
299*899d3a3cSNirmoy Das  * already on this list, the ref-count is incremented.
300*899d3a3cSNirmoy Das  *
301*899d3a3cSNirmoy Das  * The list of allowed-users is preserved across drm_vma_offset_add() and
302*899d3a3cSNirmoy Das  * drm_vma_offset_remove() calls. You may even call it if the node is currently
303*899d3a3cSNirmoy Das  * not added to any offset-manager.
304*899d3a3cSNirmoy Das  *
305*899d3a3cSNirmoy Das  * You must remove all open-files the same number of times as you added them
306*899d3a3cSNirmoy Das  * before destroying the node. Otherwise, you will leak memory.
307*899d3a3cSNirmoy Das  *
308*899d3a3cSNirmoy Das  * This is locked against concurrent access internally.
309*899d3a3cSNirmoy Das  *
310*899d3a3cSNirmoy Das  * RETURNS:
311*899d3a3cSNirmoy Das  * 0 on success, negative error code on internal failure (out-of-mem)
312*899d3a3cSNirmoy Das  */
drm_vma_node_allow(struct drm_vma_offset_node * node,struct drm_file * tag)313*899d3a3cSNirmoy Das int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
314*899d3a3cSNirmoy Das {
315*899d3a3cSNirmoy Das 	return vma_node_allow(node, tag, true);
316*899d3a3cSNirmoy Das }
31788d7ebe5SDavid Herrmann EXPORT_SYMBOL(drm_vma_node_allow);
31888d7ebe5SDavid Herrmann 
31988d7ebe5SDavid Herrmann /**
320*899d3a3cSNirmoy Das  * drm_vma_node_allow_once - Add open-file to list of allowed users
321*899d3a3cSNirmoy Das  * @node: Node to modify
322*899d3a3cSNirmoy Das  * @tag: Tag of file to remove
323*899d3a3cSNirmoy Das  *
324*899d3a3cSNirmoy Das  * Add @tag to the list of allowed open-files for this node.
325*899d3a3cSNirmoy Das  *
326*899d3a3cSNirmoy Das  * The list of allowed-users is preserved across drm_vma_offset_add() and
327*899d3a3cSNirmoy Das  * drm_vma_offset_remove() calls. You may even call it if the node is currently
328*899d3a3cSNirmoy Das  * not added to any offset-manager.
329*899d3a3cSNirmoy Das  *
330*899d3a3cSNirmoy Das  * This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
331*899d3a3cSNirmoy Das  * should only be called once after this.
332*899d3a3cSNirmoy Das  *
333*899d3a3cSNirmoy Das  * This is locked against concurrent access internally.
334*899d3a3cSNirmoy Das  *
335*899d3a3cSNirmoy Das  * RETURNS:
336*899d3a3cSNirmoy Das  * 0 on success, negative error code on internal failure (out-of-mem)
337*899d3a3cSNirmoy Das  */
drm_vma_node_allow_once(struct drm_vma_offset_node * node,struct drm_file * tag)338*899d3a3cSNirmoy Das int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
339*899d3a3cSNirmoy Das {
340*899d3a3cSNirmoy Das 	return vma_node_allow(node, tag, false);
341*899d3a3cSNirmoy Das }
342*899d3a3cSNirmoy Das EXPORT_SYMBOL(drm_vma_node_allow_once);
343*899d3a3cSNirmoy Das 
344*899d3a3cSNirmoy Das /**
34588d7ebe5SDavid Herrmann  * drm_vma_node_revoke - Remove open-file from list of allowed users
34688d7ebe5SDavid Herrmann  * @node: Node to modify
347d9a1f0b4SDavid Herrmann  * @tag: Tag of file to remove
34888d7ebe5SDavid Herrmann  *
349d9a1f0b4SDavid Herrmann  * Decrement the ref-count of @tag in the list of allowed open-files on @node.
350d9a1f0b4SDavid Herrmann  * If the ref-count drops to zero, remove @tag from the list. You must call
351d9a1f0b4SDavid Herrmann  * this once for every drm_vma_node_allow() on @tag.
35288d7ebe5SDavid Herrmann  *
35388d7ebe5SDavid Herrmann  * This is locked against concurrent access internally.
35488d7ebe5SDavid Herrmann  *
355d9a1f0b4SDavid Herrmann  * If @tag is not on the list, nothing is done.
35688d7ebe5SDavid Herrmann  */
drm_vma_node_revoke(struct drm_vma_offset_node * node,struct drm_file * tag)357d9a1f0b4SDavid Herrmann void drm_vma_node_revoke(struct drm_vma_offset_node *node,
358d9a1f0b4SDavid Herrmann 			 struct drm_file *tag)
35988d7ebe5SDavid Herrmann {
36088d7ebe5SDavid Herrmann 	struct drm_vma_offset_file *entry;
36188d7ebe5SDavid Herrmann 	struct rb_node *iter;
36288d7ebe5SDavid Herrmann 
36388d7ebe5SDavid Herrmann 	write_lock(&node->vm_lock);
36488d7ebe5SDavid Herrmann 
36588d7ebe5SDavid Herrmann 	iter = node->vm_files.rb_node;
36688d7ebe5SDavid Herrmann 	while (likely(iter)) {
36788d7ebe5SDavid Herrmann 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
368d9a1f0b4SDavid Herrmann 		if (tag == entry->vm_tag) {
36988d7ebe5SDavid Herrmann 			if (!--entry->vm_count) {
37088d7ebe5SDavid Herrmann 				rb_erase(&entry->vm_rb, &node->vm_files);
37188d7ebe5SDavid Herrmann 				kfree(entry);
37288d7ebe5SDavid Herrmann 			}
37388d7ebe5SDavid Herrmann 			break;
374d9a1f0b4SDavid Herrmann 		} else if (tag > entry->vm_tag) {
37588d7ebe5SDavid Herrmann 			iter = iter->rb_right;
37688d7ebe5SDavid Herrmann 		} else {
37788d7ebe5SDavid Herrmann 			iter = iter->rb_left;
37888d7ebe5SDavid Herrmann 		}
37988d7ebe5SDavid Herrmann 	}
38088d7ebe5SDavid Herrmann 
38188d7ebe5SDavid Herrmann 	write_unlock(&node->vm_lock);
38288d7ebe5SDavid Herrmann }
38388d7ebe5SDavid Herrmann EXPORT_SYMBOL(drm_vma_node_revoke);
38488d7ebe5SDavid Herrmann 
38588d7ebe5SDavid Herrmann /**
38688d7ebe5SDavid Herrmann  * drm_vma_node_is_allowed - Check whether an open-file is granted access
38788d7ebe5SDavid Herrmann  * @node: Node to check
388d9a1f0b4SDavid Herrmann  * @tag: Tag of file to remove
38988d7ebe5SDavid Herrmann  *
390d9a1f0b4SDavid Herrmann  * Search the list in @node whether @tag is currently on the list of allowed
39188d7ebe5SDavid Herrmann  * open-files (see drm_vma_node_allow()).
39288d7ebe5SDavid Herrmann  *
39388d7ebe5SDavid Herrmann  * This is locked against concurrent access internally.
39488d7ebe5SDavid Herrmann  *
39588d7ebe5SDavid Herrmann  * RETURNS:
3960ae865efSCai Huoqing  * true if @filp is on the list
39788d7ebe5SDavid Herrmann  */
drm_vma_node_is_allowed(struct drm_vma_offset_node * node,struct drm_file * tag)39888d7ebe5SDavid Herrmann bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
399d9a1f0b4SDavid Herrmann 			     struct drm_file *tag)
40088d7ebe5SDavid Herrmann {
40188d7ebe5SDavid Herrmann 	struct drm_vma_offset_file *entry;
40288d7ebe5SDavid Herrmann 	struct rb_node *iter;
40388d7ebe5SDavid Herrmann 
40488d7ebe5SDavid Herrmann 	read_lock(&node->vm_lock);
40588d7ebe5SDavid Herrmann 
40688d7ebe5SDavid Herrmann 	iter = node->vm_files.rb_node;
40788d7ebe5SDavid Herrmann 	while (likely(iter)) {
40888d7ebe5SDavid Herrmann 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
409d9a1f0b4SDavid Herrmann 		if (tag == entry->vm_tag)
41088d7ebe5SDavid Herrmann 			break;
411d9a1f0b4SDavid Herrmann 		else if (tag > entry->vm_tag)
41288d7ebe5SDavid Herrmann 			iter = iter->rb_right;
41388d7ebe5SDavid Herrmann 		else
41488d7ebe5SDavid Herrmann 			iter = iter->rb_left;
41588d7ebe5SDavid Herrmann 	}
41688d7ebe5SDavid Herrmann 
41788d7ebe5SDavid Herrmann 	read_unlock(&node->vm_lock);
41888d7ebe5SDavid Herrmann 
41988d7ebe5SDavid Herrmann 	return iter;
42088d7ebe5SDavid Herrmann }
42188d7ebe5SDavid Herrmann EXPORT_SYMBOL(drm_vma_node_is_allowed);
422