1 /* 2 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright (c) 2012 David Airlie <airlied@linux.ie> 4 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25 #include <drm/drmP.h> 26 #include <drm/drm_mm.h> 27 #include <drm/drm_vma_manager.h> 28 #include <linux/mm.h> 29 #include <linux/module.h> 30 #include <linux/rbtree.h> 31 #include <linux/slab.h> 32 #include <linux/spinlock.h> 33 #include <linux/types.h> 34 35 /** 36 * DOC: vma offset manager 37 * 38 * The vma-manager is responsible to map arbitrary driver-dependent memory 39 * regions into the linear user address-space. It provides offsets to the 40 * caller which can then be used on the address_space of the drm-device. It 41 * takes care to not overlap regions, size them appropriately and to not 42 * confuse mm-core by inconsistent fake vm_pgoff fields. 43 * Drivers shouldn't use this for object placement in VMEM. This manager should 44 * only be used to manage mappings into linear user-space VMs. 45 * 46 * We use drm_mm as backend to manage object allocations. But it is highly 47 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to 48 * speed up offset lookups. 49 * 50 * You must not use multiple offset managers on a single address_space. 51 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will 52 * no longer be linear. 53 * 54 * This offset manager works on page-based addresses. That is, every argument 55 * and return code (with the exception of drm_vma_node_offset_addr()) is given 56 * in number of pages, not number of bytes. That means, object sizes and offsets 57 * must always be page-aligned (as usual). 58 * If you want to get a valid byte-based user-space address for a given offset, 59 * please see drm_vma_node_offset_addr(). 60 * 61 * Additionally to offset management, the vma offset manager also handles access 62 * management. For every open-file context that is allowed to access a given 63 * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this 64 * open-file with the offset of the node will fail with -EACCES. To revoke 65 * access again, use drm_vma_node_revoke(). However, the caller is responsible 66 * for destroying already existing mappings, if required. 67 */ 68 69 /** 70 * drm_vma_offset_manager_init - Initialize new offset-manager 71 * @mgr: Manager object 72 * @page_offset: Offset of available memory area (page-based) 73 * @size: Size of available address space range (page-based) 74 * 75 * Initialize a new offset-manager. The offset and area size available for the 76 * manager are given as @page_offset and @size. Both are interpreted as 77 * page-numbers, not bytes. 78 * 79 * Adding/removing nodes from the manager is locked internally and protected 80 * against concurrent access. However, node allocation and destruction is left 81 * for the caller. While calling into the vma-manager, a given node must 82 * always be guaranteed to be referenced. 83 */ 84 void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, 85 unsigned long page_offset, unsigned long size) 86 { 87 rwlock_init(&mgr->vm_lock); 88 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); 89 } 90 EXPORT_SYMBOL(drm_vma_offset_manager_init); 91 92 /** 93 * drm_vma_offset_manager_destroy() - Destroy offset manager 94 * @mgr: Manager object 95 * 96 * Destroy an object manager which was previously created via 97 * drm_vma_offset_manager_init(). The caller must remove all allocated nodes 98 * before destroying the manager. Otherwise, drm_mm will refuse to free the 99 * requested resources. 100 * 101 * The manager must not be accessed after this function is called. 102 */ 103 void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr) 104 { 105 /* take the lock to protect against buggy drivers */ 106 write_lock(&mgr->vm_lock); 107 drm_mm_takedown(&mgr->vm_addr_space_mm); 108 write_unlock(&mgr->vm_lock); 109 } 110 EXPORT_SYMBOL(drm_vma_offset_manager_destroy); 111 112 /** 113 * drm_vma_offset_lookup_locked() - Find node in offset space 114 * @mgr: Manager object 115 * @start: Start address for object (page-based) 116 * @pages: Size of object (page-based) 117 * 118 * Find a node given a start address and object size. This returns the _best_ 119 * match for the given node. That is, @start may point somewhere into a valid 120 * region and the given node will be returned, as long as the node spans the 121 * whole requested area (given the size in number of pages as @pages). 122 * 123 * Note that before lookup the vma offset manager lookup lock must be acquired 124 * with drm_vma_offset_lock_lookup(). See there for an example. This can then be 125 * used to implement weakly referenced lookups using kref_get_unless_zero(). 126 * 127 * Example: 128 * 129 * :: 130 * 131 * drm_vma_offset_lock_lookup(mgr); 132 * node = drm_vma_offset_lookup_locked(mgr); 133 * if (node) 134 * kref_get_unless_zero(container_of(node, sth, entr)); 135 * drm_vma_offset_unlock_lookup(mgr); 136 * 137 * RETURNS: 138 * Returns NULL if no suitable node can be found. Otherwise, the best match 139 * is returned. It's the caller's responsibility to make sure the node doesn't 140 * get destroyed before the caller can access it. 141 */ 142 struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, 143 unsigned long start, 144 unsigned long pages) 145 { 146 struct drm_mm_node *node, *best; 147 struct rb_node *iter; 148 unsigned long offset; 149 150 iter = mgr->vm_addr_space_mm.interval_tree.rb_node; 151 best = NULL; 152 153 while (likely(iter)) { 154 node = rb_entry(iter, struct drm_mm_node, rb); 155 offset = node->start; 156 if (start >= offset) { 157 iter = iter->rb_right; 158 best = node; 159 if (start == offset) 160 break; 161 } else { 162 iter = iter->rb_left; 163 } 164 } 165 166 /* verify that the node spans the requested area */ 167 if (best) { 168 offset = best->start + best->size; 169 if (offset < start + pages) 170 best = NULL; 171 } 172 173 if (!best) 174 return NULL; 175 176 return container_of(best, struct drm_vma_offset_node, vm_node); 177 } 178 EXPORT_SYMBOL(drm_vma_offset_lookup_locked); 179 180 /** 181 * drm_vma_offset_add() - Add offset node to manager 182 * @mgr: Manager object 183 * @node: Node to be added 184 * @pages: Allocation size visible to user-space (in number of pages) 185 * 186 * Add a node to the offset-manager. If the node was already added, this does 187 * nothing and return 0. @pages is the size of the object given in number of 188 * pages. 189 * After this call succeeds, you can access the offset of the node until it 190 * is removed again. 191 * 192 * If this call fails, it is safe to retry the operation or call 193 * drm_vma_offset_remove(), anyway. However, no cleanup is required in that 194 * case. 195 * 196 * @pages is not required to be the same size as the underlying memory object 197 * that you want to map. It only limits the size that user-space can map into 198 * their address space. 199 * 200 * RETURNS: 201 * 0 on success, negative error code on failure. 202 */ 203 int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, 204 struct drm_vma_offset_node *node, unsigned long pages) 205 { 206 int ret; 207 208 write_lock(&mgr->vm_lock); 209 210 if (drm_mm_node_allocated(&node->vm_node)) { 211 ret = 0; 212 goto out_unlock; 213 } 214 215 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, 216 pages, 0, DRM_MM_SEARCH_DEFAULT); 217 if (ret) 218 goto out_unlock; 219 220 out_unlock: 221 write_unlock(&mgr->vm_lock); 222 return ret; 223 } 224 EXPORT_SYMBOL(drm_vma_offset_add); 225 226 /** 227 * drm_vma_offset_remove() - Remove offset node from manager 228 * @mgr: Manager object 229 * @node: Node to be removed 230 * 231 * Remove a node from the offset manager. If the node wasn't added before, this 232 * does nothing. After this call returns, the offset and size will be 0 until a 233 * new offset is allocated via drm_vma_offset_add() again. Helper functions like 234 * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no 235 * offset is allocated. 236 */ 237 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, 238 struct drm_vma_offset_node *node) 239 { 240 write_lock(&mgr->vm_lock); 241 242 if (drm_mm_node_allocated(&node->vm_node)) { 243 drm_mm_remove_node(&node->vm_node); 244 memset(&node->vm_node, 0, sizeof(node->vm_node)); 245 } 246 247 write_unlock(&mgr->vm_lock); 248 } 249 EXPORT_SYMBOL(drm_vma_offset_remove); 250 251 /** 252 * drm_vma_node_allow - Add open-file to list of allowed users 253 * @node: Node to modify 254 * @tag: Tag of file to remove 255 * 256 * Add @tag to the list of allowed open-files for this node. If @tag is 257 * already on this list, the ref-count is incremented. 258 * 259 * The list of allowed-users is preserved across drm_vma_offset_add() and 260 * drm_vma_offset_remove() calls. You may even call it if the node is currently 261 * not added to any offset-manager. 262 * 263 * You must remove all open-files the same number of times as you added them 264 * before destroying the node. Otherwise, you will leak memory. 265 * 266 * This is locked against concurrent access internally. 267 * 268 * RETURNS: 269 * 0 on success, negative error code on internal failure (out-of-mem) 270 */ 271 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag) 272 { 273 struct rb_node **iter; 274 struct rb_node *parent = NULL; 275 struct drm_vma_offset_file *new, *entry; 276 int ret = 0; 277 278 /* Preallocate entry to avoid atomic allocations below. It is quite 279 * unlikely that an open-file is added twice to a single node so we 280 * don't optimize for this case. OOM is checked below only if the entry 281 * is actually used. */ 282 new = kmalloc(sizeof(*entry), GFP_KERNEL); 283 284 write_lock(&node->vm_lock); 285 286 iter = &node->vm_files.rb_node; 287 288 while (likely(*iter)) { 289 parent = *iter; 290 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); 291 292 if (tag == entry->vm_tag) { 293 entry->vm_count++; 294 goto unlock; 295 } else if (tag > entry->vm_tag) { 296 iter = &(*iter)->rb_right; 297 } else { 298 iter = &(*iter)->rb_left; 299 } 300 } 301 302 if (!new) { 303 ret = -ENOMEM; 304 goto unlock; 305 } 306 307 new->vm_tag = tag; 308 new->vm_count = 1; 309 rb_link_node(&new->vm_rb, parent, iter); 310 rb_insert_color(&new->vm_rb, &node->vm_files); 311 new = NULL; 312 313 unlock: 314 write_unlock(&node->vm_lock); 315 kfree(new); 316 return ret; 317 } 318 EXPORT_SYMBOL(drm_vma_node_allow); 319 320 /** 321 * drm_vma_node_revoke - Remove open-file from list of allowed users 322 * @node: Node to modify 323 * @tag: Tag of file to remove 324 * 325 * Decrement the ref-count of @tag in the list of allowed open-files on @node. 326 * If the ref-count drops to zero, remove @tag from the list. You must call 327 * this once for every drm_vma_node_allow() on @tag. 328 * 329 * This is locked against concurrent access internally. 330 * 331 * If @tag is not on the list, nothing is done. 332 */ 333 void drm_vma_node_revoke(struct drm_vma_offset_node *node, 334 struct drm_file *tag) 335 { 336 struct drm_vma_offset_file *entry; 337 struct rb_node *iter; 338 339 write_lock(&node->vm_lock); 340 341 iter = node->vm_files.rb_node; 342 while (likely(iter)) { 343 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); 344 if (tag == entry->vm_tag) { 345 if (!--entry->vm_count) { 346 rb_erase(&entry->vm_rb, &node->vm_files); 347 kfree(entry); 348 } 349 break; 350 } else if (tag > entry->vm_tag) { 351 iter = iter->rb_right; 352 } else { 353 iter = iter->rb_left; 354 } 355 } 356 357 write_unlock(&node->vm_lock); 358 } 359 EXPORT_SYMBOL(drm_vma_node_revoke); 360 361 /** 362 * drm_vma_node_is_allowed - Check whether an open-file is granted access 363 * @node: Node to check 364 * @tag: Tag of file to remove 365 * 366 * Search the list in @node whether @tag is currently on the list of allowed 367 * open-files (see drm_vma_node_allow()). 368 * 369 * This is locked against concurrent access internally. 370 * 371 * RETURNS: 372 * true iff @filp is on the list 373 */ 374 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, 375 struct drm_file *tag) 376 { 377 struct drm_vma_offset_file *entry; 378 struct rb_node *iter; 379 380 read_lock(&node->vm_lock); 381 382 iter = node->vm_files.rb_node; 383 while (likely(iter)) { 384 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); 385 if (tag == entry->vm_tag) 386 break; 387 else if (tag > entry->vm_tag) 388 iter = iter->rb_right; 389 else 390 iter = iter->rb_left; 391 } 392 393 read_unlock(&node->vm_lock); 394 395 return iter; 396 } 397 EXPORT_SYMBOL(drm_vma_node_is_allowed); 398