Lines Matching full:va

36  * The DRM GPU VA Manager, represented by struct drm_gpuva_manager keeps track
37 * of a GPU's virtual address (VA) space and manages the corresponding virtual
42 * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
47 * The GPU VA manager internally uses a rb-tree to manage the
51 * portion of VA space reserved by the kernel. This node is initialized together
52 * with the GPU VA manager instance and removed when the GPU VA manager is
69 * Besides its capability to manage and represent a GPU VA space, the
73 * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
74 * and merging of existent GPU VA mappings with the ones that are requested to
83 * of the GPU VA space.
85 * Depending on how the new GPU VA mapping intersects with the existent mappings
86 * of the GPU VA space the &drm_gpuva_fn_ops callbacks contain an arbitrary
110 * to call back into the driver in order to unmap a range of GPU VA space. The
127 * To update the &drm_gpuva_manager's view of the GPU VA space
134 * The following diagram depicts the basic relationships of existent GPU VA
389 * Generally, the GPU VA manager does not take care of locking itself, it is
395 * The GPU VA manager also does not take care of the locking of the backing
396 * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to
401 * However, the GPU VA manager contains lockdep checks to ensure callers of its
402 * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is
441 * struct drm_gpuva *va;
445 * va = driver_gpuva_alloc();
446 * if (!va)
447 * ; // unwind previous VA space updates,
451 * drm_gpuva_map(mgr, va, &op->map);
452 * drm_gpuva_link(va);
458 * va = op->remap.unmap->va;
463 * ; // unwind previous VA space
471 * ; // unwind previous VA space
479 * drm_gpuva_unlink(va);
488 * va = op->unmap->va;
491 * drm_gpuva_unlink(va);
574 * drm_gpuva_unlink(op->remap.unmap->va);
575 * kfree(op->remap.unmap->va);
592 * drm_gpuva_unlink(op->unmap.va);
594 * kfree(op->unmap.va);
602 #define GPUVA_START(node) ((node)->va.addr)
603 #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
613 struct drm_gpuva *va);
614 static void __drm_gpuva_remove(struct drm_gpuva *va);
639 u64 kstart = mgr->kernel_alloc_node.va.addr; in drm_gpuva_in_kernel_node()
640 u64 krange = mgr->kernel_alloc_node.va.range; in drm_gpuva_in_kernel_node()
658 * @name: the name of the GPU VA space
659 * @start_offset: the start offset of the GPU VA space
660 * @range: the size of the GPU VA space
661 * @reserve_offset: the start of the kernel reserved GPU VA area
662 * @reserve_range: the size of the kernel reserved GPU VA area
690 mgr->kernel_alloc_node.va.addr = reserve_offset; in drm_gpuva_manager_init()
691 mgr->kernel_alloc_node.va.range = reserve_range; in drm_gpuva_manager_init()
705 * holds GPU VA mappings.
712 if (mgr->kernel_alloc_node.va.range) in drm_gpuva_manager_destroy()
722 struct drm_gpuva *va) in __drm_gpuva_insert() argument
728 GPUVA_START(va), in __drm_gpuva_insert()
729 GPUVA_LAST(va))) in __drm_gpuva_insert()
732 va->mgr = mgr; in __drm_gpuva_insert()
734 drm_gpuva_it_insert(va, &mgr->rb.tree); in __drm_gpuva_insert()
736 node = rb_prev(&va->rb.node); in __drm_gpuva_insert()
742 list_add(&va->rb.entry, head); in __drm_gpuva_insert()
750 * @va: the &drm_gpuva to insert
756 * VA space, such as drm_gpuva_for_each_va_safe() and
763 struct drm_gpuva *va) in drm_gpuva_insert() argument
765 u64 addr = va->va.addr; in drm_gpuva_insert()
766 u64 range = va->va.range; in drm_gpuva_insert()
771 return __drm_gpuva_insert(mgr, va); in drm_gpuva_insert()
776 __drm_gpuva_remove(struct drm_gpuva *va) in __drm_gpuva_remove() argument
778 drm_gpuva_it_remove(va, &va->mgr->rb.tree); in __drm_gpuva_remove()
779 list_del_init(&va->rb.entry); in __drm_gpuva_remove()
784 * @va: the &drm_gpuva to remove
786 * This removes the given &va from the underlaying tree.
789 * VA space, such as drm_gpuva_for_each_va_safe() and
793 drm_gpuva_remove(struct drm_gpuva *va) in drm_gpuva_remove() argument
795 struct drm_gpuva_manager *mgr = va->mgr; in drm_gpuva_remove()
797 if (unlikely(va == &mgr->kernel_alloc_node)) { in drm_gpuva_remove()
802 __drm_gpuva_remove(va); in drm_gpuva_remove()
808 * @va: the &drm_gpuva to link
810 * This adds the given &va to the GPU VA list of the &drm_gem_object it is
817 drm_gpuva_link(struct drm_gpuva *va) in drm_gpuva_link() argument
819 struct drm_gem_object *obj = va->gem.obj; in drm_gpuva_link()
826 list_add_tail(&va->gem.entry, &obj->gpuva.list); in drm_gpuva_link()
832 * @va: the &drm_gpuva to unlink
834 * This removes the given &va from the GPU VA list of the &drm_gem_object it is
841 drm_gpuva_unlink(struct drm_gpuva *va) in drm_gpuva_unlink() argument
843 struct drm_gem_object *obj = va->gem.obj; in drm_gpuva_unlink()
850 list_del_init(&va->gem.entry); in drm_gpuva_unlink()
884 struct drm_gpuva *va; in drm_gpuva_find() local
886 va = drm_gpuva_find_first(mgr, addr, range); in drm_gpuva_find()
887 if (!va) in drm_gpuva_find()
890 if (va->va.addr != addr || in drm_gpuva_find()
891 va->va.range != range) in drm_gpuva_find()
894 return va; in drm_gpuva_find()
904 * @start: the given GPU VA's start address
906 * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
908 * Note that if there is any free space between the GPU VA mappings no mapping
926 * @end: the given GPU VA's end address
928 * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
930 * Note that if there is any free space between the GPU VA mappings no mapping
946 * drm_gpuva_interval_empty() - indicate whether a given interval of the VA space
965 * @va: the &drm_gpuva to insert
966 * @op: the &drm_gpuva_op_map to initialize @va with
968 * Initializes the @va from the @op and inserts it into the given @mgr.
972 struct drm_gpuva *va, in drm_gpuva_map() argument
975 drm_gpuva_init_from_op(va, op); in drm_gpuva_map()
976 drm_gpuva_insert(mgr, va); in drm_gpuva_map()
995 struct drm_gpuva *curr = op->unmap->va; in drm_gpuva_remap()
1022 drm_gpuva_remove(op->va); in drm_gpuva_unmap()
1034 op.map.va.addr = addr; in op_map_cb()
1035 op.map.va.range = range; in op_map_cb()
1062 struct drm_gpuva *va, bool merge) in op_unmap_cb() argument
1067 op.unmap.va = va; in op_unmap_cb()
1079 struct drm_gpuva *va, *next; in __drm_gpuva_sm_map() local
1086 drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) { in __drm_gpuva_sm_map()
1087 struct drm_gem_object *obj = va->gem.obj; in __drm_gpuva_sm_map()
1088 u64 offset = va->gem.offset; in __drm_gpuva_sm_map()
1089 u64 addr = va->va.addr; in __drm_gpuva_sm_map()
1090 u64 range = va->va.range; in __drm_gpuva_sm_map()
1092 bool merge = !!va->gem.obj; in __drm_gpuva_sm_map()
1099 ret = op_unmap_cb(ops, priv, va, merge); in __drm_gpuva_sm_map()
1106 ret = op_unmap_cb(ops, priv, va, merge); in __drm_gpuva_sm_map()
1114 .va.addr = req_end, in __drm_gpuva_sm_map()
1115 .va.range = range - req_range, in __drm_gpuva_sm_map()
1120 .va = va, in __drm_gpuva_sm_map()
1132 .va.addr = addr, in __drm_gpuva_sm_map()
1133 .va.range = ls_range, in __drm_gpuva_sm_map()
1137 struct drm_gpuva_op_unmap u = { .va = va }; in __drm_gpuva_sm_map()
1159 .va.addr = req_end, in __drm_gpuva_sm_map()
1160 .va.range = end - req_end, in __drm_gpuva_sm_map()
1177 ret = op_unmap_cb(ops, priv, va, merge); in __drm_gpuva_sm_map()
1184 ret = op_unmap_cb(ops, priv, va, merge); in __drm_gpuva_sm_map()
1192 .va.addr = req_end, in __drm_gpuva_sm_map()
1193 .va.range = end - req_end, in __drm_gpuva_sm_map()
1198 .va = va, in __drm_gpuva_sm_map()
1220 struct drm_gpuva *va, *next; in __drm_gpuva_sm_unmap() local
1227 drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) { in __drm_gpuva_sm_unmap()
1230 struct drm_gem_object *obj = va->gem.obj; in __drm_gpuva_sm_unmap()
1231 u64 offset = va->gem.offset; in __drm_gpuva_sm_unmap()
1232 u64 addr = va->va.addr; in __drm_gpuva_sm_unmap()
1233 u64 range = va->va.range; in __drm_gpuva_sm_unmap()
1237 prev.va.addr = addr; in __drm_gpuva_sm_unmap()
1238 prev.va.range = req_addr - addr; in __drm_gpuva_sm_unmap()
1246 next.va.addr = req_end; in __drm_gpuva_sm_unmap()
1247 next.va.range = end - req_end; in __drm_gpuva_sm_unmap()
1255 struct drm_gpuva_op_unmap unmap = { .va = va }; in __drm_gpuva_sm_unmap()
1264 ret = op_unmap_cb(ops, priv, va, false); in __drm_gpuva_sm_unmap()
1275 * @mgr: the &drm_gpuva_manager representing the GPU VA space
1282 * This function iterates the given range of the GPU VA space. It utilizes the
1286 * Drivers may use these callbacks to update the GPU VA space right away within
1289 * be called before the &drm_gpuva_manager's view of the GPU VA space was
1291 * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
1325 * @mgr: the &drm_gpuva_manager representing the GPU VA space
1330 * This function iterates the given range of the GPU VA space. It utilizes the
1334 * Drivers may use these callbacks to update the GPU VA space right away within
1337 * called before the &drm_gpuva_manager's view of the GPU VA space was updated
1339 * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
1458 * @mgr: the &drm_gpuva_manager representing the GPU VA space
1477 * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
1479 * update the &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
1526 * @mgr: the &drm_gpuva_manager representing the GPU VA space
1541 * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
1543 * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
1587 * @mgr: the &drm_gpuva_manager representing the GPU VA space
1609 struct drm_gpuva *va; in drm_gpuva_prefetch_ops_create() local
1619 drm_gpuva_for_each_va_range(va, mgr, addr, end) { in drm_gpuva_prefetch_ops_create()
1627 op->prefetch.va = va; in drm_gpuva_prefetch_ops_create()
1641 * @mgr: the &drm_gpuva_manager representing the GPU VA space
1664 struct drm_gpuva *va; in drm_gpuva_gem_unmap_ops_create() local
1675 drm_gem_for_each_gpuva(va, obj) { in drm_gpuva_gem_unmap_ops_create()
1683 op->unmap.va = va; in drm_gpuva_gem_unmap_ops_create()