1e6303f32SDanilo Krummrich // SPDX-License-Identifier: GPL-2.0-only 2e6303f32SDanilo Krummrich /* 3e6303f32SDanilo Krummrich * Copyright (c) 2022 Red Hat. 4e6303f32SDanilo Krummrich * 5e6303f32SDanilo Krummrich * Permission is hereby granted, free of charge, to any person obtaining a 6e6303f32SDanilo Krummrich * copy of this software and associated documentation files (the "Software"), 7e6303f32SDanilo Krummrich * to deal in the Software without restriction, including without limitation 8e6303f32SDanilo Krummrich * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9e6303f32SDanilo Krummrich * and/or sell copies of the Software, and to permit persons to whom the 10e6303f32SDanilo Krummrich * Software is furnished to do so, subject to the following conditions: 11e6303f32SDanilo Krummrich * 12e6303f32SDanilo Krummrich * The above copyright notice and this permission notice shall be included in 13e6303f32SDanilo Krummrich * all copies or substantial portions of the Software. 14e6303f32SDanilo Krummrich * 15e6303f32SDanilo Krummrich * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16e6303f32SDanilo Krummrich * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17e6303f32SDanilo Krummrich * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18e6303f32SDanilo Krummrich * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19e6303f32SDanilo Krummrich * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20e6303f32SDanilo Krummrich * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21e6303f32SDanilo Krummrich * OTHER DEALINGS IN THE SOFTWARE. 22e6303f32SDanilo Krummrich * 23e6303f32SDanilo Krummrich * Authors: 24e6303f32SDanilo Krummrich * Danilo Krummrich <dakr@redhat.com> 25e6303f32SDanilo Krummrich * 26e6303f32SDanilo Krummrich */ 27e6303f32SDanilo Krummrich 28e6303f32SDanilo Krummrich #include <drm/drm_gpuva_mgr.h> 29e6303f32SDanilo Krummrich 30e6303f32SDanilo Krummrich #include <linux/interval_tree_generic.h> 31e6303f32SDanilo Krummrich #include <linux/mm.h> 32e6303f32SDanilo Krummrich 33e6303f32SDanilo Krummrich /** 34e6303f32SDanilo Krummrich * DOC: Overview 35e6303f32SDanilo Krummrich * 36e6303f32SDanilo Krummrich * The DRM GPU VA Manager, represented by struct drm_gpuva_manager keeps track 37e6303f32SDanilo Krummrich * of a GPU's virtual address (VA) space and manages the corresponding virtual 38e6303f32SDanilo Krummrich * mappings represented by &drm_gpuva objects. It also keeps track of the 39e6303f32SDanilo Krummrich * mapping's backing &drm_gem_object buffers. 40e6303f32SDanilo Krummrich * 41e6303f32SDanilo Krummrich * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing 42e6303f32SDanilo Krummrich * all existent GPU VA mappings using this &drm_gem_object as backing buffer. 43e6303f32SDanilo Krummrich * 44e6303f32SDanilo Krummrich * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also 45e6303f32SDanilo Krummrich * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'. 46e6303f32SDanilo Krummrich * 47e6303f32SDanilo Krummrich * The GPU VA manager internally uses a rb-tree to manage the 48e6303f32SDanilo Krummrich * &drm_gpuva mappings within a GPU's virtual address space. 49e6303f32SDanilo Krummrich * 50e6303f32SDanilo Krummrich * The &drm_gpuva_manager contains a special &drm_gpuva representing the 51e6303f32SDanilo Krummrich * portion of VA space reserved by the kernel. This node is initialized together 52e6303f32SDanilo Krummrich * with the GPU VA manager instance and removed when the GPU VA manager is 53e6303f32SDanilo Krummrich * destroyed. 54e6303f32SDanilo Krummrich * 55e6303f32SDanilo Krummrich * In a typical application drivers would embed struct drm_gpuva_manager and 56e6303f32SDanilo Krummrich * struct drm_gpuva within their own driver specific structures, there won't be 57e6303f32SDanilo Krummrich * any memory allocations of its own nor memory allocations of &drm_gpuva 58e6303f32SDanilo Krummrich * entries. 59e6303f32SDanilo Krummrich * 60e6303f32SDanilo Krummrich * The data structures needed to store &drm_gpuvas within the &drm_gpuva_manager 61e6303f32SDanilo Krummrich * are contained within struct drm_gpuva already. Hence, for inserting 62e6303f32SDanilo Krummrich * &drm_gpuva entries from within dma-fence signalling critical sections it is 63e6303f32SDanilo Krummrich * enough to pre-allocate the &drm_gpuva structures. 64e6303f32SDanilo Krummrich */ 65e6303f32SDanilo Krummrich 66e6303f32SDanilo Krummrich /** 67e6303f32SDanilo Krummrich * DOC: Split and Merge 68e6303f32SDanilo Krummrich * 69e6303f32SDanilo Krummrich * Besides its capability to manage and represent a GPU VA space, the 70e6303f32SDanilo Krummrich * &drm_gpuva_manager also provides functions to let the &drm_gpuva_manager 71e6303f32SDanilo Krummrich * calculate a sequence of operations to satisfy a given map or unmap request. 72e6303f32SDanilo Krummrich * 73e6303f32SDanilo Krummrich * Therefore the DRM GPU VA manager provides an algorithm implementing splitting 74e6303f32SDanilo Krummrich * and merging of existent GPU VA mappings with the ones that are requested to 75e6303f32SDanilo Krummrich * be mapped or unmapped. This feature is required by the Vulkan API to 76e6303f32SDanilo Krummrich * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this 77e6303f32SDanilo Krummrich * as VM BIND. 78e6303f32SDanilo Krummrich * 79e6303f32SDanilo Krummrich * Drivers can call drm_gpuva_sm_map() to receive a sequence of callbacks 80e6303f32SDanilo Krummrich * containing map, unmap and remap operations for a given newly requested 81e6303f32SDanilo Krummrich * mapping. The sequence of callbacks represents the set of operations to 82e6303f32SDanilo Krummrich * execute in order to integrate the new mapping cleanly into the current state 83e6303f32SDanilo Krummrich * of the GPU VA space. 84e6303f32SDanilo Krummrich * 85e6303f32SDanilo Krummrich * Depending on how the new GPU VA mapping intersects with the existent mappings 86e6303f32SDanilo Krummrich * of the GPU VA space the &drm_gpuva_fn_ops callbacks contain an arbitrary 87e6303f32SDanilo Krummrich * amount of unmap operations, a maximum of two remap operations and a single 88e6303f32SDanilo Krummrich * map operation. The caller might receive no callback at all if no operation is 89e6303f32SDanilo Krummrich * required, e.g. if the requested mapping already exists in the exact same way. 90e6303f32SDanilo Krummrich * 91e6303f32SDanilo Krummrich * The single map operation represents the original map operation requested by 92e6303f32SDanilo Krummrich * the caller. 93e6303f32SDanilo Krummrich * 94e6303f32SDanilo Krummrich * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the 95e6303f32SDanilo Krummrich * &drm_gpuva to unmap is physically contiguous with the original mapping 96e6303f32SDanilo Krummrich * request. Optionally, if 'keep' is set, drivers may keep the actual page table 97e6303f32SDanilo Krummrich * entries for this &drm_gpuva, adding the missing page table entries only and 98e6303f32SDanilo Krummrich * update the &drm_gpuva_manager's view of things accordingly. 99e6303f32SDanilo Krummrich * 100e6303f32SDanilo Krummrich * Drivers may do the same optimization, namely delta page table updates, also 101e6303f32SDanilo Krummrich * for remap operations. This is possible since &drm_gpuva_op_remap consists of 102e6303f32SDanilo Krummrich * one unmap operation and one or two map operations, such that drivers can 103e6303f32SDanilo Krummrich * derive the page table update delta accordingly. 104e6303f32SDanilo Krummrich * 105e6303f32SDanilo Krummrich * Note that there can't be more than two existent mappings to split up, one at 106e6303f32SDanilo Krummrich * the beginning and one at the end of the new mapping, hence there is a 107e6303f32SDanilo Krummrich * maximum of two remap operations. 108e6303f32SDanilo Krummrich * 109e6303f32SDanilo Krummrich * Analogous to drm_gpuva_sm_map() drm_gpuva_sm_unmap() uses &drm_gpuva_fn_ops 110e6303f32SDanilo Krummrich * to call back into the driver in order to unmap a range of GPU VA space. The 111e6303f32SDanilo Krummrich * logic behind this function is way simpler though: For all existent mappings 112e6303f32SDanilo Krummrich * enclosed by the given range unmap operations are created. For mappings which 113e6303f32SDanilo Krummrich * are only partically located within the given range, remap operations are 114e6303f32SDanilo Krummrich * created such that those mappings are split up and re-mapped partically. 115e6303f32SDanilo Krummrich * 116e6303f32SDanilo Krummrich * As an alternative to drm_gpuva_sm_map() and drm_gpuva_sm_unmap(), 117e6303f32SDanilo Krummrich * drm_gpuva_sm_map_ops_create() and drm_gpuva_sm_unmap_ops_create() can be used 118e6303f32SDanilo Krummrich * to directly obtain an instance of struct drm_gpuva_ops containing a list of 119e6303f32SDanilo Krummrich * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list 120e6303f32SDanilo Krummrich * contains the &drm_gpuva_ops analogous to the callbacks one would receive when 121e6303f32SDanilo Krummrich * calling drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). While this way requires 122e6303f32SDanilo Krummrich * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to 123e6303f32SDanilo Krummrich * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory 124e6303f32SDanilo Krummrich * allocations are possible (e.g. to allocate GPU page tables) and once in the 125e6303f32SDanilo Krummrich * dma-fence signalling critical path. 126e6303f32SDanilo Krummrich * 127e6303f32SDanilo Krummrich * To update the &drm_gpuva_manager's view of the GPU VA space 128e6303f32SDanilo Krummrich * drm_gpuva_insert() and drm_gpuva_remove() may be used. These functions can 129e6303f32SDanilo Krummrich * safely be used from &drm_gpuva_fn_ops callbacks originating from 130e6303f32SDanilo Krummrich * drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). However, it might be more 131e6303f32SDanilo Krummrich * convenient to use the provided helper functions drm_gpuva_map(), 132e6303f32SDanilo Krummrich * drm_gpuva_remap() and drm_gpuva_unmap() instead. 133e6303f32SDanilo Krummrich * 134e6303f32SDanilo Krummrich * The following diagram depicts the basic relationships of existent GPU VA 135e6303f32SDanilo Krummrich * mappings, a newly requested mapping and the resulting mappings as implemented 136e6303f32SDanilo Krummrich * by drm_gpuva_sm_map() - it doesn't cover any arbitrary combinations of these. 137e6303f32SDanilo Krummrich * 138e6303f32SDanilo Krummrich * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs 139e6303f32SDanilo Krummrich * could be kept. 140e6303f32SDanilo Krummrich * 141e6303f32SDanilo Krummrich * :: 142e6303f32SDanilo Krummrich * 143e6303f32SDanilo Krummrich * 0 a 1 144e6303f32SDanilo Krummrich * old: |-----------| (bo_offset=n) 145e6303f32SDanilo Krummrich * 146e6303f32SDanilo Krummrich * 0 a 1 147e6303f32SDanilo Krummrich * req: |-----------| (bo_offset=n) 148e6303f32SDanilo Krummrich * 149e6303f32SDanilo Krummrich * 0 a 1 150e6303f32SDanilo Krummrich * new: |-----------| (bo_offset=n) 151e6303f32SDanilo Krummrich * 152e6303f32SDanilo Krummrich * 153e6303f32SDanilo Krummrich * 2) Requested mapping is identical, except for the BO offset, hence replace 154e6303f32SDanilo Krummrich * the mapping. 155e6303f32SDanilo Krummrich * 156e6303f32SDanilo Krummrich * :: 157e6303f32SDanilo Krummrich * 158e6303f32SDanilo Krummrich * 0 a 1 159e6303f32SDanilo Krummrich * old: |-----------| (bo_offset=n) 160e6303f32SDanilo Krummrich * 161e6303f32SDanilo Krummrich * 0 a 1 162e6303f32SDanilo Krummrich * req: |-----------| (bo_offset=m) 163e6303f32SDanilo Krummrich * 164e6303f32SDanilo Krummrich * 0 a 1 165e6303f32SDanilo Krummrich * new: |-----------| (bo_offset=m) 166e6303f32SDanilo Krummrich * 167e6303f32SDanilo Krummrich * 168e6303f32SDanilo Krummrich * 3) Requested mapping is identical, except for the backing BO, hence replace 169e6303f32SDanilo Krummrich * the mapping. 170e6303f32SDanilo Krummrich * 171e6303f32SDanilo Krummrich * :: 172e6303f32SDanilo Krummrich * 173e6303f32SDanilo Krummrich * 0 a 1 174e6303f32SDanilo Krummrich * old: |-----------| (bo_offset=n) 175e6303f32SDanilo Krummrich * 176e6303f32SDanilo Krummrich * 0 b 1 177e6303f32SDanilo Krummrich * req: |-----------| (bo_offset=n) 178e6303f32SDanilo Krummrich * 179e6303f32SDanilo Krummrich * 0 b 1 180e6303f32SDanilo Krummrich * new: |-----------| (bo_offset=n) 181e6303f32SDanilo Krummrich * 182e6303f32SDanilo Krummrich * 183e6303f32SDanilo Krummrich * 4) Existent mapping is a left aligned subset of the requested one, hence 184e6303f32SDanilo Krummrich * replace the existent one. 185e6303f32SDanilo Krummrich * 186e6303f32SDanilo Krummrich * :: 187e6303f32SDanilo Krummrich * 188e6303f32SDanilo Krummrich * 0 a 1 189e6303f32SDanilo Krummrich * old: |-----| (bo_offset=n) 190e6303f32SDanilo Krummrich * 191e6303f32SDanilo Krummrich * 0 a 2 192e6303f32SDanilo Krummrich * req: |-----------| (bo_offset=n) 193e6303f32SDanilo Krummrich * 194e6303f32SDanilo Krummrich * 0 a 2 195e6303f32SDanilo Krummrich * new: |-----------| (bo_offset=n) 196e6303f32SDanilo Krummrich * 197e6303f32SDanilo Krummrich * .. note:: 198e6303f32SDanilo Krummrich * We expect to see the same result for a request with a different BO 199e6303f32SDanilo Krummrich * and/or non-contiguous BO offset. 200e6303f32SDanilo Krummrich * 201e6303f32SDanilo Krummrich * 202e6303f32SDanilo Krummrich * 5) Requested mapping's range is a left aligned subset of the existent one, 203e6303f32SDanilo Krummrich * but backed by a different BO. Hence, map the requested mapping and split 204e6303f32SDanilo Krummrich * the existent one adjusting its BO offset. 205e6303f32SDanilo Krummrich * 206e6303f32SDanilo Krummrich * :: 207e6303f32SDanilo Krummrich * 208e6303f32SDanilo Krummrich * 0 a 2 209e6303f32SDanilo Krummrich * old: |-----------| (bo_offset=n) 210e6303f32SDanilo Krummrich * 211e6303f32SDanilo Krummrich * 0 b 1 212e6303f32SDanilo Krummrich * req: |-----| (bo_offset=n) 213e6303f32SDanilo Krummrich * 214e6303f32SDanilo Krummrich * 0 b 1 a' 2 215e6303f32SDanilo Krummrich * new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1) 216e6303f32SDanilo Krummrich * 217e6303f32SDanilo Krummrich * .. note:: 218e6303f32SDanilo Krummrich * We expect to see the same result for a request with a different BO 219e6303f32SDanilo Krummrich * and/or non-contiguous BO offset. 220e6303f32SDanilo Krummrich * 221e6303f32SDanilo Krummrich * 222e6303f32SDanilo Krummrich * 6) Existent mapping is a superset of the requested mapping. Split it up, but 223e6303f32SDanilo Krummrich * indicate that the backing PTEs could be kept. 224e6303f32SDanilo Krummrich * 225e6303f32SDanilo Krummrich * :: 226e6303f32SDanilo Krummrich * 227e6303f32SDanilo Krummrich * 0 a 2 228e6303f32SDanilo Krummrich * old: |-----------| (bo_offset=n) 229e6303f32SDanilo Krummrich * 230e6303f32SDanilo Krummrich * 0 a 1 231e6303f32SDanilo Krummrich * req: |-----| (bo_offset=n) 232e6303f32SDanilo Krummrich * 233e6303f32SDanilo Krummrich * 0 a 1 a' 2 234e6303f32SDanilo Krummrich * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1) 235e6303f32SDanilo Krummrich * 236e6303f32SDanilo Krummrich * 237e6303f32SDanilo Krummrich * 7) Requested mapping's range is a right aligned subset of the existent one, 238e6303f32SDanilo Krummrich * but backed by a different BO. Hence, map the requested mapping and split 239e6303f32SDanilo Krummrich * the existent one, without adjusting the BO offset. 240e6303f32SDanilo Krummrich * 241e6303f32SDanilo Krummrich * :: 242e6303f32SDanilo Krummrich * 243e6303f32SDanilo Krummrich * 0 a 2 244e6303f32SDanilo Krummrich * old: |-----------| (bo_offset=n) 245e6303f32SDanilo Krummrich * 246e6303f32SDanilo Krummrich * 1 b 2 247e6303f32SDanilo Krummrich * req: |-----| (bo_offset=m) 248e6303f32SDanilo Krummrich * 249e6303f32SDanilo Krummrich * 0 a 1 b 2 250e6303f32SDanilo Krummrich * new: |-----|-----| (a.bo_offset=n,b.bo_offset=m) 251e6303f32SDanilo Krummrich * 252e6303f32SDanilo Krummrich * 253e6303f32SDanilo Krummrich * 8) Existent mapping is a superset of the requested mapping. Split it up, but 254e6303f32SDanilo Krummrich * indicate that the backing PTEs could be kept. 255e6303f32SDanilo Krummrich * 256e6303f32SDanilo Krummrich * :: 257e6303f32SDanilo Krummrich * 258e6303f32SDanilo Krummrich * 0 a 2 259e6303f32SDanilo Krummrich * old: |-----------| (bo_offset=n) 260e6303f32SDanilo Krummrich * 261e6303f32SDanilo Krummrich * 1 a 2 262e6303f32SDanilo Krummrich * req: |-----| (bo_offset=n+1) 263e6303f32SDanilo Krummrich * 264e6303f32SDanilo Krummrich * 0 a' 1 a 2 265e6303f32SDanilo Krummrich * new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1) 266e6303f32SDanilo Krummrich * 267e6303f32SDanilo Krummrich * 268e6303f32SDanilo Krummrich * 9) Existent mapping is overlapped at the end by the requested mapping backed 269e6303f32SDanilo Krummrich * by a different BO. Hence, map the requested mapping and split up the 270e6303f32SDanilo Krummrich * existent one, without adjusting the BO offset. 271e6303f32SDanilo Krummrich * 272e6303f32SDanilo Krummrich * :: 273e6303f32SDanilo Krummrich * 274e6303f32SDanilo Krummrich * 0 a 2 275e6303f32SDanilo Krummrich * old: |-----------| (bo_offset=n) 276e6303f32SDanilo Krummrich * 277e6303f32SDanilo Krummrich * 1 b 3 278e6303f32SDanilo Krummrich * req: |-----------| (bo_offset=m) 279e6303f32SDanilo Krummrich * 280e6303f32SDanilo Krummrich * 0 a 1 b 3 281e6303f32SDanilo Krummrich * new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m) 282e6303f32SDanilo Krummrich * 283e6303f32SDanilo Krummrich * 284e6303f32SDanilo Krummrich * 10) Existent mapping is overlapped by the requested mapping, both having the 285e6303f32SDanilo Krummrich * same backing BO with a contiguous offset. Indicate the backing PTEs of 286e6303f32SDanilo Krummrich * the old mapping could be kept. 287e6303f32SDanilo Krummrich * 288e6303f32SDanilo Krummrich * :: 289e6303f32SDanilo Krummrich * 290e6303f32SDanilo Krummrich * 0 a 2 291e6303f32SDanilo Krummrich * old: |-----------| (bo_offset=n) 292e6303f32SDanilo Krummrich * 293e6303f32SDanilo Krummrich * 1 a 3 294e6303f32SDanilo Krummrich * req: |-----------| (bo_offset=n+1) 295e6303f32SDanilo Krummrich * 296e6303f32SDanilo Krummrich * 0 a' 1 a 3 297e6303f32SDanilo Krummrich * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1) 298e6303f32SDanilo Krummrich * 299e6303f32SDanilo Krummrich * 300e6303f32SDanilo Krummrich * 11) Requested mapping's range is a centered subset of the existent one 301e6303f32SDanilo Krummrich * having a different backing BO. Hence, map the requested mapping and split 302e6303f32SDanilo Krummrich * up the existent one in two mappings, adjusting the BO offset of the right 303e6303f32SDanilo Krummrich * one accordingly. 304e6303f32SDanilo Krummrich * 305e6303f32SDanilo Krummrich * :: 306e6303f32SDanilo Krummrich * 307e6303f32SDanilo Krummrich * 0 a 3 308e6303f32SDanilo Krummrich * old: |-----------------| (bo_offset=n) 309e6303f32SDanilo Krummrich * 310e6303f32SDanilo Krummrich * 1 b 2 311e6303f32SDanilo Krummrich * req: |-----| (bo_offset=m) 312e6303f32SDanilo Krummrich * 313e6303f32SDanilo Krummrich * 0 a 1 b 2 a' 3 314e6303f32SDanilo Krummrich * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2) 315e6303f32SDanilo Krummrich * 316e6303f32SDanilo Krummrich * 317e6303f32SDanilo Krummrich * 12) Requested mapping is a contiguous subset of the existent one. Split it 318e6303f32SDanilo Krummrich * up, but indicate that the backing PTEs could be kept. 319e6303f32SDanilo Krummrich * 320e6303f32SDanilo Krummrich * :: 321e6303f32SDanilo Krummrich * 322e6303f32SDanilo Krummrich * 0 a 3 323e6303f32SDanilo Krummrich * old: |-----------------| (bo_offset=n) 324e6303f32SDanilo Krummrich * 325e6303f32SDanilo Krummrich * 1 a 2 326e6303f32SDanilo Krummrich * req: |-----| (bo_offset=n+1) 327e6303f32SDanilo Krummrich * 328e6303f32SDanilo Krummrich * 0 a' 1 a 2 a'' 3 329e6303f32SDanilo Krummrich * old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2) 330e6303f32SDanilo Krummrich * 331e6303f32SDanilo Krummrich * 332e6303f32SDanilo Krummrich * 13) Existent mapping is a right aligned subset of the requested one, hence 333e6303f32SDanilo Krummrich * replace the existent one. 334e6303f32SDanilo Krummrich * 335e6303f32SDanilo Krummrich * :: 336e6303f32SDanilo Krummrich * 337e6303f32SDanilo Krummrich * 1 a 2 338e6303f32SDanilo Krummrich * old: |-----| (bo_offset=n+1) 339e6303f32SDanilo Krummrich * 340e6303f32SDanilo Krummrich * 0 a 2 341e6303f32SDanilo Krummrich * req: |-----------| (bo_offset=n) 342e6303f32SDanilo Krummrich * 343e6303f32SDanilo Krummrich * 0 a 2 344e6303f32SDanilo Krummrich * new: |-----------| (bo_offset=n) 345e6303f32SDanilo Krummrich * 346e6303f32SDanilo Krummrich * .. note:: 347e6303f32SDanilo Krummrich * We expect to see the same result for a request with a different bo 348e6303f32SDanilo Krummrich * and/or non-contiguous bo_offset. 349e6303f32SDanilo Krummrich * 350e6303f32SDanilo Krummrich * 351e6303f32SDanilo Krummrich * 14) Existent mapping is a centered subset of the requested one, hence 352e6303f32SDanilo Krummrich * replace the existent one. 353e6303f32SDanilo Krummrich * 354e6303f32SDanilo Krummrich * :: 355e6303f32SDanilo Krummrich * 356e6303f32SDanilo Krummrich * 1 a 2 357e6303f32SDanilo Krummrich * old: |-----| (bo_offset=n+1) 358e6303f32SDanilo Krummrich * 359e6303f32SDanilo Krummrich * 0 a 3 360e6303f32SDanilo Krummrich * req: |----------------| (bo_offset=n) 361e6303f32SDanilo Krummrich * 362e6303f32SDanilo Krummrich * 0 a 3 363e6303f32SDanilo Krummrich * new: |----------------| (bo_offset=n) 364e6303f32SDanilo Krummrich * 365e6303f32SDanilo Krummrich * .. note:: 366e6303f32SDanilo Krummrich * We expect to see the same result for a request with a different bo 367e6303f32SDanilo Krummrich * and/or non-contiguous bo_offset. 368e6303f32SDanilo Krummrich * 369e6303f32SDanilo Krummrich * 370e6303f32SDanilo Krummrich * 15) Existent mappings is overlapped at the beginning by the requested mapping 371e6303f32SDanilo Krummrich * backed by a different BO. Hence, map the requested mapping and split up 372e6303f32SDanilo Krummrich * the existent one, adjusting its BO offset accordingly. 373e6303f32SDanilo Krummrich * 374e6303f32SDanilo Krummrich * :: 375e6303f32SDanilo Krummrich * 376e6303f32SDanilo Krummrich * 1 a 3 377e6303f32SDanilo Krummrich * old: |-----------| (bo_offset=n) 378e6303f32SDanilo Krummrich * 379e6303f32SDanilo Krummrich * 0 b 2 380e6303f32SDanilo Krummrich * req: |-----------| (bo_offset=m) 381e6303f32SDanilo Krummrich * 382e6303f32SDanilo Krummrich * 0 b 2 a' 3 383e6303f32SDanilo Krummrich * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2) 384e6303f32SDanilo Krummrich */ 385e6303f32SDanilo Krummrich 386e6303f32SDanilo Krummrich /** 387e6303f32SDanilo Krummrich * DOC: Locking 388e6303f32SDanilo Krummrich * 389e6303f32SDanilo Krummrich * Generally, the GPU VA manager does not take care of locking itself, it is 390e6303f32SDanilo Krummrich * the drivers responsibility to take care about locking. Drivers might want to 391e6303f32SDanilo Krummrich * protect the following operations: inserting, removing and iterating 392e6303f32SDanilo Krummrich * &drm_gpuva objects as well as generating all kinds of operations, such as 393e6303f32SDanilo Krummrich * split / merge or prefetch. 394e6303f32SDanilo Krummrich * 395e6303f32SDanilo Krummrich * The GPU VA manager also does not take care of the locking of the backing 396e6303f32SDanilo Krummrich * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to 397e6303f32SDanilo Krummrich * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively 398e6303f32SDanilo Krummrich * a driver specific external lock. For the latter see also 399e6303f32SDanilo Krummrich * drm_gem_gpuva_set_lock(). 400e6303f32SDanilo Krummrich * 401e6303f32SDanilo Krummrich * However, the GPU VA manager contains lockdep checks to ensure callers of its 402e6303f32SDanilo Krummrich * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is 403e6303f32SDanilo Krummrich * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink(). 404e6303f32SDanilo Krummrich */ 405e6303f32SDanilo Krummrich 406e6303f32SDanilo Krummrich /** 407e6303f32SDanilo Krummrich * DOC: Examples 408e6303f32SDanilo Krummrich * 409e6303f32SDanilo Krummrich * This section gives two examples on how to let the DRM GPUVA Manager generate 410e6303f32SDanilo Krummrich * &drm_gpuva_op in order to satisfy a given map or unmap request and how to 411e6303f32SDanilo Krummrich * make use of them. 412e6303f32SDanilo Krummrich * 413e6303f32SDanilo Krummrich * The below code is strictly limited to illustrate the generic usage pattern. 414e6303f32SDanilo Krummrich * To maintain simplicitly, it doesn't make use of any abstractions for common 415e6303f32SDanilo Krummrich * code, different (asyncronous) stages with fence signalling critical paths, 416e6303f32SDanilo Krummrich * any other helpers or error handling in terms of freeing memory and dropping 417e6303f32SDanilo Krummrich * previously taken locks. 418e6303f32SDanilo Krummrich * 419e6303f32SDanilo Krummrich * 1) Obtain a list of &drm_gpuva_op to create a new mapping:: 420e6303f32SDanilo Krummrich * 421e6303f32SDanilo Krummrich * // Allocates a new &drm_gpuva. 422e6303f32SDanilo Krummrich * struct drm_gpuva * driver_gpuva_alloc(void); 423e6303f32SDanilo Krummrich * 424e6303f32SDanilo Krummrich * // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva 425e6303f32SDanilo Krummrich * // structure in individual driver structures and lock the dma-resv with 426e6303f32SDanilo Krummrich * // drm_exec or similar helpers. 427e6303f32SDanilo Krummrich * int driver_mapping_create(struct drm_gpuva_manager *mgr, 428e6303f32SDanilo Krummrich * u64 addr, u64 range, 429e6303f32SDanilo Krummrich * struct drm_gem_object *obj, u64 offset) 430e6303f32SDanilo Krummrich * { 431e6303f32SDanilo Krummrich * struct drm_gpuva_ops *ops; 432e6303f32SDanilo Krummrich * struct drm_gpuva_op *op 433e6303f32SDanilo Krummrich * 434e6303f32SDanilo Krummrich * driver_lock_va_space(); 435e6303f32SDanilo Krummrich * ops = drm_gpuva_sm_map_ops_create(mgr, addr, range, 436e6303f32SDanilo Krummrich * obj, offset); 437e6303f32SDanilo Krummrich * if (IS_ERR(ops)) 438e6303f32SDanilo Krummrich * return PTR_ERR(ops); 439e6303f32SDanilo Krummrich * 440e6303f32SDanilo Krummrich * drm_gpuva_for_each_op(op, ops) { 441e6303f32SDanilo Krummrich * struct drm_gpuva *va; 442e6303f32SDanilo Krummrich * 443e6303f32SDanilo Krummrich * switch (op->op) { 444e6303f32SDanilo Krummrich * case DRM_GPUVA_OP_MAP: 445e6303f32SDanilo Krummrich * va = driver_gpuva_alloc(); 446e6303f32SDanilo Krummrich * if (!va) 447e6303f32SDanilo Krummrich * ; // unwind previous VA space updates, 448e6303f32SDanilo Krummrich * // free memory and unlock 449e6303f32SDanilo Krummrich * 450e6303f32SDanilo Krummrich * driver_vm_map(); 451e6303f32SDanilo Krummrich * drm_gpuva_map(mgr, va, &op->map); 452e6303f32SDanilo Krummrich * drm_gpuva_link(va); 453e6303f32SDanilo Krummrich * 454e6303f32SDanilo Krummrich * break; 455e6303f32SDanilo Krummrich * case DRM_GPUVA_OP_REMAP: { 456e6303f32SDanilo Krummrich * struct drm_gpuva *prev = NULL, *next = NULL; 457e6303f32SDanilo Krummrich * 458e6303f32SDanilo Krummrich * va = op->remap.unmap->va; 459e6303f32SDanilo Krummrich * 460e6303f32SDanilo Krummrich * if (op->remap.prev) { 461e6303f32SDanilo Krummrich * prev = driver_gpuva_alloc(); 462e6303f32SDanilo Krummrich * if (!prev) 463e6303f32SDanilo Krummrich * ; // unwind previous VA space 464e6303f32SDanilo Krummrich * // updates, free memory and 465e6303f32SDanilo Krummrich * // unlock 466e6303f32SDanilo Krummrich * } 467e6303f32SDanilo Krummrich * 468e6303f32SDanilo Krummrich * if (op->remap.next) { 469e6303f32SDanilo Krummrich * next = driver_gpuva_alloc(); 470e6303f32SDanilo Krummrich * if (!next) 471e6303f32SDanilo Krummrich * ; // unwind previous VA space 472e6303f32SDanilo Krummrich * // updates, free memory and 473e6303f32SDanilo Krummrich * // unlock 474e6303f32SDanilo Krummrich * } 475e6303f32SDanilo Krummrich * 476e6303f32SDanilo Krummrich * driver_vm_remap(); 477e6303f32SDanilo Krummrich * drm_gpuva_remap(prev, next, &op->remap); 478e6303f32SDanilo Krummrich * 479e6303f32SDanilo Krummrich * drm_gpuva_unlink(va); 480e6303f32SDanilo Krummrich * if (prev) 481e6303f32SDanilo Krummrich * drm_gpuva_link(prev); 482e6303f32SDanilo Krummrich * if (next) 483e6303f32SDanilo Krummrich * drm_gpuva_link(next); 484e6303f32SDanilo Krummrich * 485e6303f32SDanilo Krummrich * break; 486e6303f32SDanilo Krummrich * } 487e6303f32SDanilo Krummrich * case DRM_GPUVA_OP_UNMAP: 488e6303f32SDanilo Krummrich * va = op->unmap->va; 489e6303f32SDanilo Krummrich * 490e6303f32SDanilo Krummrich * driver_vm_unmap(); 491e6303f32SDanilo Krummrich * drm_gpuva_unlink(va); 492e6303f32SDanilo Krummrich * drm_gpuva_unmap(&op->unmap); 493e6303f32SDanilo Krummrich * 494e6303f32SDanilo Krummrich * break; 495e6303f32SDanilo Krummrich * default: 496e6303f32SDanilo Krummrich * break; 497e6303f32SDanilo Krummrich * } 498e6303f32SDanilo Krummrich * } 499e6303f32SDanilo Krummrich * driver_unlock_va_space(); 500e6303f32SDanilo Krummrich * 501e6303f32SDanilo Krummrich * return 0; 502e6303f32SDanilo Krummrich * } 503e6303f32SDanilo Krummrich * 504e6303f32SDanilo Krummrich * 2) Receive a callback for each &drm_gpuva_op to create a new mapping:: 505e6303f32SDanilo Krummrich * 506e6303f32SDanilo Krummrich * struct driver_context { 507e6303f32SDanilo Krummrich * struct drm_gpuva_manager *mgr; 508e6303f32SDanilo Krummrich * struct drm_gpuva *new_va; 509e6303f32SDanilo Krummrich * struct drm_gpuva *prev_va; 510e6303f32SDanilo Krummrich * struct drm_gpuva *next_va; 511e6303f32SDanilo Krummrich * }; 512e6303f32SDanilo Krummrich * 513e6303f32SDanilo Krummrich * // ops to pass to drm_gpuva_manager_init() 514e6303f32SDanilo Krummrich * static const struct drm_gpuva_fn_ops driver_gpuva_ops = { 515e6303f32SDanilo Krummrich * .sm_step_map = driver_gpuva_map, 516e6303f32SDanilo Krummrich * .sm_step_remap = driver_gpuva_remap, 517e6303f32SDanilo Krummrich * .sm_step_unmap = driver_gpuva_unmap, 518e6303f32SDanilo Krummrich * }; 519e6303f32SDanilo Krummrich * 520e6303f32SDanilo Krummrich * // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva 521e6303f32SDanilo Krummrich * // structure in individual driver structures and lock the dma-resv with 522e6303f32SDanilo Krummrich * // drm_exec or similar helpers. 523e6303f32SDanilo Krummrich * int driver_mapping_create(struct drm_gpuva_manager *mgr, 524e6303f32SDanilo Krummrich * u64 addr, u64 range, 525e6303f32SDanilo Krummrich * struct drm_gem_object *obj, u64 offset) 526e6303f32SDanilo Krummrich * { 527e6303f32SDanilo Krummrich * struct driver_context ctx; 528e6303f32SDanilo Krummrich * struct drm_gpuva_ops *ops; 529e6303f32SDanilo Krummrich * struct drm_gpuva_op *op; 530e6303f32SDanilo Krummrich * int ret = 0; 531e6303f32SDanilo Krummrich * 532e6303f32SDanilo Krummrich * ctx.mgr = mgr; 533e6303f32SDanilo Krummrich * 534e6303f32SDanilo Krummrich * ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL); 535e6303f32SDanilo Krummrich * ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL); 536e6303f32SDanilo Krummrich * ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL); 537e6303f32SDanilo Krummrich * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) { 538e6303f32SDanilo Krummrich * ret = -ENOMEM; 539e6303f32SDanilo Krummrich * goto out; 540e6303f32SDanilo Krummrich * } 541e6303f32SDanilo Krummrich * 542e6303f32SDanilo Krummrich * driver_lock_va_space(); 543e6303f32SDanilo Krummrich * ret = drm_gpuva_sm_map(mgr, &ctx, addr, range, obj, offset); 544e6303f32SDanilo Krummrich * driver_unlock_va_space(); 545e6303f32SDanilo Krummrich * 546e6303f32SDanilo Krummrich * out: 547e6303f32SDanilo Krummrich * kfree(ctx.new_va); 548e6303f32SDanilo Krummrich * kfree(ctx.prev_va); 549e6303f32SDanilo Krummrich * kfree(ctx.next_va); 550e6303f32SDanilo Krummrich * return ret; 551e6303f32SDanilo Krummrich * } 552e6303f32SDanilo Krummrich * 553e6303f32SDanilo Krummrich * int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx) 554e6303f32SDanilo Krummrich * { 555e6303f32SDanilo Krummrich * struct driver_context *ctx = __ctx; 556e6303f32SDanilo Krummrich * 557e6303f32SDanilo Krummrich * drm_gpuva_map(ctx->mgr, ctx->new_va, &op->map); 558e6303f32SDanilo Krummrich * 559e6303f32SDanilo Krummrich * drm_gpuva_link(ctx->new_va); 560e6303f32SDanilo Krummrich * 561e6303f32SDanilo Krummrich * // prevent the new GPUVA from being freed in 562e6303f32SDanilo Krummrich * // driver_mapping_create() 563e6303f32SDanilo Krummrich * ctx->new_va = NULL; 564e6303f32SDanilo Krummrich * 565e6303f32SDanilo Krummrich * return 0; 566e6303f32SDanilo Krummrich * } 567e6303f32SDanilo Krummrich * 568e6303f32SDanilo Krummrich * int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx) 569e6303f32SDanilo Krummrich * { 570e6303f32SDanilo Krummrich * struct driver_context *ctx = __ctx; 571e6303f32SDanilo Krummrich * 572e6303f32SDanilo Krummrich * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap); 573e6303f32SDanilo Krummrich * 574e6303f32SDanilo Krummrich * drm_gpuva_unlink(op->remap.unmap->va); 575e6303f32SDanilo Krummrich * kfree(op->remap.unmap->va); 576e6303f32SDanilo Krummrich * 577e6303f32SDanilo Krummrich * if (op->remap.prev) { 578e6303f32SDanilo Krummrich * drm_gpuva_link(ctx->prev_va); 579e6303f32SDanilo Krummrich * ctx->prev_va = NULL; 580e6303f32SDanilo Krummrich * } 581e6303f32SDanilo Krummrich * 582e6303f32SDanilo Krummrich * if (op->remap.next) { 583e6303f32SDanilo Krummrich * drm_gpuva_link(ctx->next_va); 584e6303f32SDanilo Krummrich * ctx->next_va = NULL; 585e6303f32SDanilo Krummrich * } 586e6303f32SDanilo Krummrich * 587e6303f32SDanilo Krummrich * return 0; 588e6303f32SDanilo Krummrich * } 589e6303f32SDanilo Krummrich * 590e6303f32SDanilo Krummrich * int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx) 591e6303f32SDanilo Krummrich * { 592e6303f32SDanilo Krummrich * drm_gpuva_unlink(op->unmap.va); 593e6303f32SDanilo Krummrich * drm_gpuva_unmap(&op->unmap); 594e6303f32SDanilo Krummrich * kfree(op->unmap.va); 595e6303f32SDanilo Krummrich * 596e6303f32SDanilo Krummrich * return 0; 597e6303f32SDanilo Krummrich * } 598e6303f32SDanilo Krummrich */ 599e6303f32SDanilo Krummrich 600e6303f32SDanilo Krummrich #define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node) 601e6303f32SDanilo Krummrich 602e6303f32SDanilo Krummrich #define GPUVA_START(node) ((node)->va.addr) 603e6303f32SDanilo Krummrich #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1) 604e6303f32SDanilo Krummrich 605e6303f32SDanilo Krummrich /* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain 606e6303f32SDanilo Krummrich * about this. 607e6303f32SDanilo Krummrich */ 608e6303f32SDanilo Krummrich INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last, 609e6303f32SDanilo Krummrich GPUVA_START, GPUVA_LAST, static __maybe_unused, 610e6303f32SDanilo Krummrich drm_gpuva_it) 611e6303f32SDanilo Krummrich 612e6303f32SDanilo Krummrich static int __drm_gpuva_insert(struct drm_gpuva_manager *mgr, 613e6303f32SDanilo Krummrich struct drm_gpuva *va); 614e6303f32SDanilo Krummrich static void __drm_gpuva_remove(struct drm_gpuva *va); 615e6303f32SDanilo Krummrich 616e6303f32SDanilo Krummrich static bool 617e6303f32SDanilo Krummrich drm_gpuva_check_overflow(u64 addr, u64 range) 618e6303f32SDanilo Krummrich { 619e6303f32SDanilo Krummrich u64 end; 620e6303f32SDanilo Krummrich 621e6303f32SDanilo Krummrich return WARN(check_add_overflow(addr, range, &end), 62274a65b3eSSteven Price "GPUVA address limited to %zu bytes.\n", sizeof(end)); 623e6303f32SDanilo Krummrich } 624e6303f32SDanilo Krummrich 625e6303f32SDanilo Krummrich static bool 626e6303f32SDanilo Krummrich drm_gpuva_in_mm_range(struct drm_gpuva_manager *mgr, u64 addr, u64 range) 627e6303f32SDanilo Krummrich { 628e6303f32SDanilo Krummrich u64 end = addr + range; 629e6303f32SDanilo Krummrich u64 mm_start = mgr->mm_start; 630e6303f32SDanilo Krummrich u64 mm_end = mm_start + mgr->mm_range; 631e6303f32SDanilo Krummrich 632e6303f32SDanilo Krummrich return addr >= mm_start && end <= mm_end; 633e6303f32SDanilo Krummrich } 634e6303f32SDanilo Krummrich 635e6303f32SDanilo Krummrich static bool 636e6303f32SDanilo Krummrich drm_gpuva_in_kernel_node(struct drm_gpuva_manager *mgr, u64 addr, u64 range) 637e6303f32SDanilo Krummrich { 638e6303f32SDanilo Krummrich u64 end = addr + range; 639e6303f32SDanilo Krummrich u64 kstart = mgr->kernel_alloc_node.va.addr; 640e6303f32SDanilo Krummrich u64 krange = mgr->kernel_alloc_node.va.range; 641e6303f32SDanilo Krummrich u64 kend = kstart + krange; 642e6303f32SDanilo Krummrich 643e6303f32SDanilo Krummrich return krange && addr < kend && kstart < end; 644e6303f32SDanilo Krummrich } 645e6303f32SDanilo Krummrich 646e6303f32SDanilo Krummrich static bool 647e6303f32SDanilo Krummrich drm_gpuva_range_valid(struct drm_gpuva_manager *mgr, 648e6303f32SDanilo Krummrich u64 addr, u64 range) 649e6303f32SDanilo Krummrich { 650e6303f32SDanilo Krummrich return !drm_gpuva_check_overflow(addr, range) && 651e6303f32SDanilo Krummrich drm_gpuva_in_mm_range(mgr, addr, range) && 652e6303f32SDanilo Krummrich !drm_gpuva_in_kernel_node(mgr, addr, range); 653e6303f32SDanilo Krummrich } 654e6303f32SDanilo Krummrich 655e6303f32SDanilo Krummrich /** 656e6303f32SDanilo Krummrich * drm_gpuva_manager_init() - initialize a &drm_gpuva_manager 657e6303f32SDanilo Krummrich * @mgr: pointer to the &drm_gpuva_manager to initialize 658e6303f32SDanilo Krummrich * @name: the name of the GPU VA space 659e6303f32SDanilo Krummrich * @start_offset: the start offset of the GPU VA space 660e6303f32SDanilo Krummrich * @range: the size of the GPU VA space 661e6303f32SDanilo Krummrich * @reserve_offset: the start of the kernel reserved GPU VA area 662e6303f32SDanilo Krummrich * @reserve_range: the size of the kernel reserved GPU VA area 663e6303f32SDanilo Krummrich * @ops: &drm_gpuva_fn_ops called on &drm_gpuva_sm_map / &drm_gpuva_sm_unmap 664e6303f32SDanilo Krummrich * 665e6303f32SDanilo Krummrich * The &drm_gpuva_manager must be initialized with this function before use. 666e6303f32SDanilo Krummrich * 667e6303f32SDanilo Krummrich * Note that @mgr must be cleared to 0 before calling this function. The given 668e6303f32SDanilo Krummrich * &name is expected to be managed by the surrounding driver structures. 669e6303f32SDanilo Krummrich */ 670e6303f32SDanilo Krummrich void 671e6303f32SDanilo Krummrich drm_gpuva_manager_init(struct drm_gpuva_manager *mgr, 672e6303f32SDanilo Krummrich const char *name, 673e6303f32SDanilo Krummrich u64 start_offset, u64 range, 674e6303f32SDanilo Krummrich u64 reserve_offset, u64 reserve_range, 675e6303f32SDanilo Krummrich const struct drm_gpuva_fn_ops *ops) 676e6303f32SDanilo Krummrich { 677e6303f32SDanilo Krummrich mgr->rb.tree = RB_ROOT_CACHED; 678e6303f32SDanilo Krummrich INIT_LIST_HEAD(&mgr->rb.list); 679e6303f32SDanilo Krummrich 680e6303f32SDanilo Krummrich drm_gpuva_check_overflow(start_offset, range); 681e6303f32SDanilo Krummrich mgr->mm_start = start_offset; 682e6303f32SDanilo Krummrich mgr->mm_range = range; 683e6303f32SDanilo Krummrich 684e6303f32SDanilo Krummrich mgr->name = name ? name : "unknown"; 685e6303f32SDanilo Krummrich mgr->ops = ops; 686e6303f32SDanilo Krummrich 687e6303f32SDanilo Krummrich memset(&mgr->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); 688e6303f32SDanilo Krummrich 689e6303f32SDanilo Krummrich if (reserve_range) { 690e6303f32SDanilo Krummrich mgr->kernel_alloc_node.va.addr = reserve_offset; 691e6303f32SDanilo Krummrich mgr->kernel_alloc_node.va.range = reserve_range; 692e6303f32SDanilo Krummrich 693e6303f32SDanilo Krummrich if (likely(!drm_gpuva_check_overflow(reserve_offset, 694e6303f32SDanilo Krummrich reserve_range))) 695e6303f32SDanilo Krummrich __drm_gpuva_insert(mgr, &mgr->kernel_alloc_node); 696e6303f32SDanilo Krummrich } 697e6303f32SDanilo Krummrich } 698e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_manager_init); 699e6303f32SDanilo Krummrich 700e6303f32SDanilo Krummrich /** 701e6303f32SDanilo Krummrich * drm_gpuva_manager_destroy() - cleanup a &drm_gpuva_manager 702e6303f32SDanilo Krummrich * @mgr: pointer to the &drm_gpuva_manager to clean up 703e6303f32SDanilo Krummrich * 704e6303f32SDanilo Krummrich * Note that it is a bug to call this function on a manager that still 705e6303f32SDanilo Krummrich * holds GPU VA mappings. 706e6303f32SDanilo Krummrich */ 707e6303f32SDanilo Krummrich void 708e6303f32SDanilo Krummrich drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr) 709e6303f32SDanilo Krummrich { 710e6303f32SDanilo Krummrich mgr->name = NULL; 711e6303f32SDanilo Krummrich 712e6303f32SDanilo Krummrich if (mgr->kernel_alloc_node.va.range) 713e6303f32SDanilo Krummrich __drm_gpuva_remove(&mgr->kernel_alloc_node); 714e6303f32SDanilo Krummrich 715e6303f32SDanilo Krummrich WARN(!RB_EMPTY_ROOT(&mgr->rb.tree.rb_root), 716e6303f32SDanilo Krummrich "GPUVA tree is not empty, potentially leaking memory."); 717e6303f32SDanilo Krummrich } 718e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_manager_destroy); 719e6303f32SDanilo Krummrich 720e6303f32SDanilo Krummrich static int 721e6303f32SDanilo Krummrich __drm_gpuva_insert(struct drm_gpuva_manager *mgr, 722e6303f32SDanilo Krummrich struct drm_gpuva *va) 723e6303f32SDanilo Krummrich { 724e6303f32SDanilo Krummrich struct rb_node *node; 725e6303f32SDanilo Krummrich struct list_head *head; 726e6303f32SDanilo Krummrich 727e6303f32SDanilo Krummrich if (drm_gpuva_it_iter_first(&mgr->rb.tree, 728e6303f32SDanilo Krummrich GPUVA_START(va), 729e6303f32SDanilo Krummrich GPUVA_LAST(va))) 730e6303f32SDanilo Krummrich return -EEXIST; 731e6303f32SDanilo Krummrich 732e6303f32SDanilo Krummrich va->mgr = mgr; 733e6303f32SDanilo Krummrich 734e6303f32SDanilo Krummrich drm_gpuva_it_insert(va, &mgr->rb.tree); 735e6303f32SDanilo Krummrich 736e6303f32SDanilo Krummrich node = rb_prev(&va->rb.node); 737e6303f32SDanilo Krummrich if (node) 738e6303f32SDanilo Krummrich head = &(to_drm_gpuva(node))->rb.entry; 739e6303f32SDanilo Krummrich else 740e6303f32SDanilo Krummrich head = &mgr->rb.list; 741e6303f32SDanilo Krummrich 742e6303f32SDanilo Krummrich list_add(&va->rb.entry, head); 743e6303f32SDanilo Krummrich 744e6303f32SDanilo Krummrich return 0; 745e6303f32SDanilo Krummrich } 746e6303f32SDanilo Krummrich 747e6303f32SDanilo Krummrich /** 748e6303f32SDanilo Krummrich * drm_gpuva_insert() - insert a &drm_gpuva 749e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager to insert the &drm_gpuva in 750e6303f32SDanilo Krummrich * @va: the &drm_gpuva to insert 751e6303f32SDanilo Krummrich * 752e6303f32SDanilo Krummrich * Insert a &drm_gpuva with a given address and range into a 753e6303f32SDanilo Krummrich * &drm_gpuva_manager. 754e6303f32SDanilo Krummrich * 755e6303f32SDanilo Krummrich * It is safe to use this function using the safe versions of iterating the GPU 756e6303f32SDanilo Krummrich * VA space, such as drm_gpuva_for_each_va_safe() and 757e6303f32SDanilo Krummrich * drm_gpuva_for_each_va_range_safe(). 758e6303f32SDanilo Krummrich * 759e6303f32SDanilo Krummrich * Returns: 0 on success, negative error code on failure. 760e6303f32SDanilo Krummrich */ 761e6303f32SDanilo Krummrich int 762e6303f32SDanilo Krummrich drm_gpuva_insert(struct drm_gpuva_manager *mgr, 763e6303f32SDanilo Krummrich struct drm_gpuva *va) 764e6303f32SDanilo Krummrich { 765e6303f32SDanilo Krummrich u64 addr = va->va.addr; 766e6303f32SDanilo Krummrich u64 range = va->va.range; 767e6303f32SDanilo Krummrich 768e6303f32SDanilo Krummrich if (unlikely(!drm_gpuva_range_valid(mgr, addr, range))) 769e6303f32SDanilo Krummrich return -EINVAL; 770e6303f32SDanilo Krummrich 771e6303f32SDanilo Krummrich return __drm_gpuva_insert(mgr, va); 772e6303f32SDanilo Krummrich } 773e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_insert); 774e6303f32SDanilo Krummrich 775e6303f32SDanilo Krummrich static void 776e6303f32SDanilo Krummrich __drm_gpuva_remove(struct drm_gpuva *va) 777e6303f32SDanilo Krummrich { 778e6303f32SDanilo Krummrich drm_gpuva_it_remove(va, &va->mgr->rb.tree); 779e6303f32SDanilo Krummrich list_del_init(&va->rb.entry); 780e6303f32SDanilo Krummrich } 781e6303f32SDanilo Krummrich 782e6303f32SDanilo Krummrich /** 783e6303f32SDanilo Krummrich * drm_gpuva_remove() - remove a &drm_gpuva 784e6303f32SDanilo Krummrich * @va: the &drm_gpuva to remove 785e6303f32SDanilo Krummrich * 786e6303f32SDanilo Krummrich * This removes the given &va from the underlaying tree. 787e6303f32SDanilo Krummrich * 788e6303f32SDanilo Krummrich * It is safe to use this function using the safe versions of iterating the GPU 789e6303f32SDanilo Krummrich * VA space, such as drm_gpuva_for_each_va_safe() and 790e6303f32SDanilo Krummrich * drm_gpuva_for_each_va_range_safe(). 791e6303f32SDanilo Krummrich */ 792e6303f32SDanilo Krummrich void 793e6303f32SDanilo Krummrich drm_gpuva_remove(struct drm_gpuva *va) 794e6303f32SDanilo Krummrich { 795e6303f32SDanilo Krummrich struct drm_gpuva_manager *mgr = va->mgr; 796e6303f32SDanilo Krummrich 797e6303f32SDanilo Krummrich if (unlikely(va == &mgr->kernel_alloc_node)) { 798e6303f32SDanilo Krummrich WARN(1, "Can't destroy kernel reserved node.\n"); 799e6303f32SDanilo Krummrich return; 800e6303f32SDanilo Krummrich } 801e6303f32SDanilo Krummrich 802e6303f32SDanilo Krummrich __drm_gpuva_remove(va); 803e6303f32SDanilo Krummrich } 804e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_remove); 805e6303f32SDanilo Krummrich 806e6303f32SDanilo Krummrich /** 807e6303f32SDanilo Krummrich * drm_gpuva_link() - link a &drm_gpuva 808e6303f32SDanilo Krummrich * @va: the &drm_gpuva to link 809e6303f32SDanilo Krummrich * 810e6303f32SDanilo Krummrich * This adds the given &va to the GPU VA list of the &drm_gem_object it is 811e6303f32SDanilo Krummrich * associated with. 812e6303f32SDanilo Krummrich * 813e6303f32SDanilo Krummrich * This function expects the caller to protect the GEM's GPUVA list against 814e6303f32SDanilo Krummrich * concurrent access using the GEMs dma_resv lock. 815e6303f32SDanilo Krummrich */ 816e6303f32SDanilo Krummrich void 817e6303f32SDanilo Krummrich drm_gpuva_link(struct drm_gpuva *va) 818e6303f32SDanilo Krummrich { 819e6303f32SDanilo Krummrich struct drm_gem_object *obj = va->gem.obj; 820e6303f32SDanilo Krummrich 821e6303f32SDanilo Krummrich if (unlikely(!obj)) 822e6303f32SDanilo Krummrich return; 823e6303f32SDanilo Krummrich 824e6303f32SDanilo Krummrich drm_gem_gpuva_assert_lock_held(obj); 825e6303f32SDanilo Krummrich 826e6303f32SDanilo Krummrich list_add_tail(&va->gem.entry, &obj->gpuva.list); 827e6303f32SDanilo Krummrich } 828e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_link); 829e6303f32SDanilo Krummrich 830e6303f32SDanilo Krummrich /** 831e6303f32SDanilo Krummrich * drm_gpuva_unlink() - unlink a &drm_gpuva 832e6303f32SDanilo Krummrich * @va: the &drm_gpuva to unlink 833e6303f32SDanilo Krummrich * 834e6303f32SDanilo Krummrich * This removes the given &va from the GPU VA list of the &drm_gem_object it is 835e6303f32SDanilo Krummrich * associated with. 836e6303f32SDanilo Krummrich * 837e6303f32SDanilo Krummrich * This function expects the caller to protect the GEM's GPUVA list against 838e6303f32SDanilo Krummrich * concurrent access using the GEMs dma_resv lock. 839e6303f32SDanilo Krummrich */ 840e6303f32SDanilo Krummrich void 841e6303f32SDanilo Krummrich drm_gpuva_unlink(struct drm_gpuva *va) 842e6303f32SDanilo Krummrich { 843e6303f32SDanilo Krummrich struct drm_gem_object *obj = va->gem.obj; 844e6303f32SDanilo Krummrich 845e6303f32SDanilo Krummrich if (unlikely(!obj)) 846e6303f32SDanilo Krummrich return; 847e6303f32SDanilo Krummrich 848e6303f32SDanilo Krummrich drm_gem_gpuva_assert_lock_held(obj); 849e6303f32SDanilo Krummrich 850e6303f32SDanilo Krummrich list_del_init(&va->gem.entry); 851e6303f32SDanilo Krummrich } 852e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_unlink); 853e6303f32SDanilo Krummrich 854e6303f32SDanilo Krummrich /** 855e6303f32SDanilo Krummrich * drm_gpuva_find_first() - find the first &drm_gpuva in the given range 856e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager to search in 857e6303f32SDanilo Krummrich * @addr: the &drm_gpuvas address 858e6303f32SDanilo Krummrich * @range: the &drm_gpuvas range 859e6303f32SDanilo Krummrich * 860e6303f32SDanilo Krummrich * Returns: the first &drm_gpuva within the given range 861e6303f32SDanilo Krummrich */ 862e6303f32SDanilo Krummrich struct drm_gpuva * 863e6303f32SDanilo Krummrich drm_gpuva_find_first(struct drm_gpuva_manager *mgr, 864e6303f32SDanilo Krummrich u64 addr, u64 range) 865e6303f32SDanilo Krummrich { 866e6303f32SDanilo Krummrich u64 last = addr + range - 1; 867e6303f32SDanilo Krummrich 868e6303f32SDanilo Krummrich return drm_gpuva_it_iter_first(&mgr->rb.tree, addr, last); 869e6303f32SDanilo Krummrich } 870e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_find_first); 871e6303f32SDanilo Krummrich 872e6303f32SDanilo Krummrich /** 873e6303f32SDanilo Krummrich * drm_gpuva_find() - find a &drm_gpuva 874e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager to search in 875e6303f32SDanilo Krummrich * @addr: the &drm_gpuvas address 876e6303f32SDanilo Krummrich * @range: the &drm_gpuvas range 877e6303f32SDanilo Krummrich * 878e6303f32SDanilo Krummrich * Returns: the &drm_gpuva at a given &addr and with a given &range 879e6303f32SDanilo Krummrich */ 880e6303f32SDanilo Krummrich struct drm_gpuva * 881e6303f32SDanilo Krummrich drm_gpuva_find(struct drm_gpuva_manager *mgr, 882e6303f32SDanilo Krummrich u64 addr, u64 range) 883e6303f32SDanilo Krummrich { 884e6303f32SDanilo Krummrich struct drm_gpuva *va; 885e6303f32SDanilo Krummrich 886e6303f32SDanilo Krummrich va = drm_gpuva_find_first(mgr, addr, range); 887e6303f32SDanilo Krummrich if (!va) 888e6303f32SDanilo Krummrich goto out; 889e6303f32SDanilo Krummrich 890e6303f32SDanilo Krummrich if (va->va.addr != addr || 891e6303f32SDanilo Krummrich va->va.range != range) 892e6303f32SDanilo Krummrich goto out; 893e6303f32SDanilo Krummrich 894e6303f32SDanilo Krummrich return va; 895e6303f32SDanilo Krummrich 896e6303f32SDanilo Krummrich out: 897e6303f32SDanilo Krummrich return NULL; 898e6303f32SDanilo Krummrich } 899e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_find); 900e6303f32SDanilo Krummrich 901e6303f32SDanilo Krummrich /** 902e6303f32SDanilo Krummrich * drm_gpuva_find_prev() - find the &drm_gpuva before the given address 903e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager to search in 904e6303f32SDanilo Krummrich * @start: the given GPU VA's start address 905e6303f32SDanilo Krummrich * 906e6303f32SDanilo Krummrich * Find the adjacent &drm_gpuva before the GPU VA with given &start address. 907e6303f32SDanilo Krummrich * 908e6303f32SDanilo Krummrich * Note that if there is any free space between the GPU VA mappings no mapping 909e6303f32SDanilo Krummrich * is returned. 910e6303f32SDanilo Krummrich * 911e6303f32SDanilo Krummrich * Returns: a pointer to the found &drm_gpuva or NULL if none was found 912e6303f32SDanilo Krummrich */ 913e6303f32SDanilo Krummrich struct drm_gpuva * 914e6303f32SDanilo Krummrich drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start) 915e6303f32SDanilo Krummrich { 916e6303f32SDanilo Krummrich if (!drm_gpuva_range_valid(mgr, start - 1, 1)) 917e6303f32SDanilo Krummrich return NULL; 918e6303f32SDanilo Krummrich 919e6303f32SDanilo Krummrich return drm_gpuva_it_iter_first(&mgr->rb.tree, start - 1, start); 920e6303f32SDanilo Krummrich } 921e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_find_prev); 922e6303f32SDanilo Krummrich 923e6303f32SDanilo Krummrich /** 924e6303f32SDanilo Krummrich * drm_gpuva_find_next() - find the &drm_gpuva after the given address 925e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager to search in 926e6303f32SDanilo Krummrich * @end: the given GPU VA's end address 927e6303f32SDanilo Krummrich * 928e6303f32SDanilo Krummrich * Find the adjacent &drm_gpuva after the GPU VA with given &end address. 929e6303f32SDanilo Krummrich * 930e6303f32SDanilo Krummrich * Note that if there is any free space between the GPU VA mappings no mapping 931e6303f32SDanilo Krummrich * is returned. 932e6303f32SDanilo Krummrich * 933e6303f32SDanilo Krummrich * Returns: a pointer to the found &drm_gpuva or NULL if none was found 934e6303f32SDanilo Krummrich */ 935e6303f32SDanilo Krummrich struct drm_gpuva * 936e6303f32SDanilo Krummrich drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end) 937e6303f32SDanilo Krummrich { 938e6303f32SDanilo Krummrich if (!drm_gpuva_range_valid(mgr, end, 1)) 939e6303f32SDanilo Krummrich return NULL; 940e6303f32SDanilo Krummrich 941e6303f32SDanilo Krummrich return drm_gpuva_it_iter_first(&mgr->rb.tree, end, end + 1); 942e6303f32SDanilo Krummrich } 943e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_find_next); 944e6303f32SDanilo Krummrich 945e6303f32SDanilo Krummrich /** 946e6303f32SDanilo Krummrich * drm_gpuva_interval_empty() - indicate whether a given interval of the VA space 947e6303f32SDanilo Krummrich * is empty 948e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager to check the range for 949e6303f32SDanilo Krummrich * @addr: the start address of the range 950e6303f32SDanilo Krummrich * @range: the range of the interval 951e6303f32SDanilo Krummrich * 952e6303f32SDanilo Krummrich * Returns: true if the interval is empty, false otherwise 953e6303f32SDanilo Krummrich */ 954e6303f32SDanilo Krummrich bool 955e6303f32SDanilo Krummrich drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range) 956e6303f32SDanilo Krummrich { 957e6303f32SDanilo Krummrich return !drm_gpuva_find_first(mgr, addr, range); 958e6303f32SDanilo Krummrich } 959e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_interval_empty); 960e6303f32SDanilo Krummrich 961e6303f32SDanilo Krummrich /** 962e6303f32SDanilo Krummrich * drm_gpuva_map() - helper to insert a &drm_gpuva according to a 963e6303f32SDanilo Krummrich * &drm_gpuva_op_map 964e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager 965e6303f32SDanilo Krummrich * @va: the &drm_gpuva to insert 966e6303f32SDanilo Krummrich * @op: the &drm_gpuva_op_map to initialize @va with 967e6303f32SDanilo Krummrich * 968e6303f32SDanilo Krummrich * Initializes the @va from the @op and inserts it into the given @mgr. 969e6303f32SDanilo Krummrich */ 970e6303f32SDanilo Krummrich void 971e6303f32SDanilo Krummrich drm_gpuva_map(struct drm_gpuva_manager *mgr, 972e6303f32SDanilo Krummrich struct drm_gpuva *va, 973e6303f32SDanilo Krummrich struct drm_gpuva_op_map *op) 974e6303f32SDanilo Krummrich { 975e6303f32SDanilo Krummrich drm_gpuva_init_from_op(va, op); 976e6303f32SDanilo Krummrich drm_gpuva_insert(mgr, va); 977e6303f32SDanilo Krummrich } 978e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_map); 979e6303f32SDanilo Krummrich 980e6303f32SDanilo Krummrich /** 981e6303f32SDanilo Krummrich * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a 982e6303f32SDanilo Krummrich * &drm_gpuva_op_remap 983e6303f32SDanilo Krummrich * @prev: the &drm_gpuva to remap when keeping the start of a mapping 984e6303f32SDanilo Krummrich * @next: the &drm_gpuva to remap when keeping the end of a mapping 985e6303f32SDanilo Krummrich * @op: the &drm_gpuva_op_remap to initialize @prev and @next with 986e6303f32SDanilo Krummrich * 987e6303f32SDanilo Krummrich * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or 988e6303f32SDanilo Krummrich * @next. 989e6303f32SDanilo Krummrich */ 990e6303f32SDanilo Krummrich void 991e6303f32SDanilo Krummrich drm_gpuva_remap(struct drm_gpuva *prev, 992e6303f32SDanilo Krummrich struct drm_gpuva *next, 993e6303f32SDanilo Krummrich struct drm_gpuva_op_remap *op) 994e6303f32SDanilo Krummrich { 995e6303f32SDanilo Krummrich struct drm_gpuva *curr = op->unmap->va; 996e6303f32SDanilo Krummrich struct drm_gpuva_manager *mgr = curr->mgr; 997e6303f32SDanilo Krummrich 998e6303f32SDanilo Krummrich drm_gpuva_remove(curr); 999e6303f32SDanilo Krummrich 1000e6303f32SDanilo Krummrich if (op->prev) { 1001e6303f32SDanilo Krummrich drm_gpuva_init_from_op(prev, op->prev); 1002e6303f32SDanilo Krummrich drm_gpuva_insert(mgr, prev); 1003e6303f32SDanilo Krummrich } 1004e6303f32SDanilo Krummrich 1005e6303f32SDanilo Krummrich if (op->next) { 1006e6303f32SDanilo Krummrich drm_gpuva_init_from_op(next, op->next); 1007e6303f32SDanilo Krummrich drm_gpuva_insert(mgr, next); 1008e6303f32SDanilo Krummrich } 1009e6303f32SDanilo Krummrich } 1010e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_remap); 1011e6303f32SDanilo Krummrich 1012e6303f32SDanilo Krummrich /** 1013e6303f32SDanilo Krummrich * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a 1014e6303f32SDanilo Krummrich * &drm_gpuva_op_unmap 1015e6303f32SDanilo Krummrich * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove 1016e6303f32SDanilo Krummrich * 1017e6303f32SDanilo Krummrich * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap. 1018e6303f32SDanilo Krummrich */ 1019e6303f32SDanilo Krummrich void 1020e6303f32SDanilo Krummrich drm_gpuva_unmap(struct drm_gpuva_op_unmap *op) 1021e6303f32SDanilo Krummrich { 1022e6303f32SDanilo Krummrich drm_gpuva_remove(op->va); 1023e6303f32SDanilo Krummrich } 1024e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_unmap); 1025e6303f32SDanilo Krummrich 1026e6303f32SDanilo Krummrich static int 1027e6303f32SDanilo Krummrich op_map_cb(const struct drm_gpuva_fn_ops *fn, void *priv, 1028e6303f32SDanilo Krummrich u64 addr, u64 range, 1029e6303f32SDanilo Krummrich struct drm_gem_object *obj, u64 offset) 1030e6303f32SDanilo Krummrich { 1031e6303f32SDanilo Krummrich struct drm_gpuva_op op = {}; 1032e6303f32SDanilo Krummrich 1033e6303f32SDanilo Krummrich op.op = DRM_GPUVA_OP_MAP; 1034e6303f32SDanilo Krummrich op.map.va.addr = addr; 1035e6303f32SDanilo Krummrich op.map.va.range = range; 1036e6303f32SDanilo Krummrich op.map.gem.obj = obj; 1037e6303f32SDanilo Krummrich op.map.gem.offset = offset; 1038e6303f32SDanilo Krummrich 1039e6303f32SDanilo Krummrich return fn->sm_step_map(&op, priv); 1040e6303f32SDanilo Krummrich } 1041e6303f32SDanilo Krummrich 1042e6303f32SDanilo Krummrich static int 1043e6303f32SDanilo Krummrich op_remap_cb(const struct drm_gpuva_fn_ops *fn, void *priv, 1044e6303f32SDanilo Krummrich struct drm_gpuva_op_map *prev, 1045e6303f32SDanilo Krummrich struct drm_gpuva_op_map *next, 1046e6303f32SDanilo Krummrich struct drm_gpuva_op_unmap *unmap) 1047e6303f32SDanilo Krummrich { 1048e6303f32SDanilo Krummrich struct drm_gpuva_op op = {}; 1049e6303f32SDanilo Krummrich struct drm_gpuva_op_remap *r; 1050e6303f32SDanilo Krummrich 1051e6303f32SDanilo Krummrich op.op = DRM_GPUVA_OP_REMAP; 1052e6303f32SDanilo Krummrich r = &op.remap; 1053e6303f32SDanilo Krummrich r->prev = prev; 1054e6303f32SDanilo Krummrich r->next = next; 1055e6303f32SDanilo Krummrich r->unmap = unmap; 1056e6303f32SDanilo Krummrich 1057e6303f32SDanilo Krummrich return fn->sm_step_remap(&op, priv); 1058e6303f32SDanilo Krummrich } 1059e6303f32SDanilo Krummrich 1060e6303f32SDanilo Krummrich static int 1061e6303f32SDanilo Krummrich op_unmap_cb(const struct drm_gpuva_fn_ops *fn, void *priv, 1062e6303f32SDanilo Krummrich struct drm_gpuva *va, bool merge) 1063e6303f32SDanilo Krummrich { 1064e6303f32SDanilo Krummrich struct drm_gpuva_op op = {}; 1065e6303f32SDanilo Krummrich 1066e6303f32SDanilo Krummrich op.op = DRM_GPUVA_OP_UNMAP; 1067e6303f32SDanilo Krummrich op.unmap.va = va; 1068e6303f32SDanilo Krummrich op.unmap.keep = merge; 1069e6303f32SDanilo Krummrich 1070e6303f32SDanilo Krummrich return fn->sm_step_unmap(&op, priv); 1071e6303f32SDanilo Krummrich } 1072e6303f32SDanilo Krummrich 1073e6303f32SDanilo Krummrich static int 1074e6303f32SDanilo Krummrich __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, 1075e6303f32SDanilo Krummrich const struct drm_gpuva_fn_ops *ops, void *priv, 1076e6303f32SDanilo Krummrich u64 req_addr, u64 req_range, 1077e6303f32SDanilo Krummrich struct drm_gem_object *req_obj, u64 req_offset) 1078e6303f32SDanilo Krummrich { 1079*cdf4100eSDanilo Krummrich struct drm_gpuva *va, *next; 1080e6303f32SDanilo Krummrich u64 req_end = req_addr + req_range; 1081e6303f32SDanilo Krummrich int ret; 1082e6303f32SDanilo Krummrich 1083e6303f32SDanilo Krummrich if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range))) 1084e6303f32SDanilo Krummrich return -EINVAL; 1085e6303f32SDanilo Krummrich 1086e6303f32SDanilo Krummrich drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) { 1087e6303f32SDanilo Krummrich struct drm_gem_object *obj = va->gem.obj; 1088e6303f32SDanilo Krummrich u64 offset = va->gem.offset; 1089e6303f32SDanilo Krummrich u64 addr = va->va.addr; 1090e6303f32SDanilo Krummrich u64 range = va->va.range; 1091e6303f32SDanilo Krummrich u64 end = addr + range; 1092e6303f32SDanilo Krummrich bool merge = !!va->gem.obj; 1093e6303f32SDanilo Krummrich 1094e6303f32SDanilo Krummrich if (addr == req_addr) { 1095e6303f32SDanilo Krummrich merge &= obj == req_obj && 1096e6303f32SDanilo Krummrich offset == req_offset; 1097e6303f32SDanilo Krummrich 1098e6303f32SDanilo Krummrich if (end == req_end) { 1099e6303f32SDanilo Krummrich ret = op_unmap_cb(ops, priv, va, merge); 1100e6303f32SDanilo Krummrich if (ret) 1101e6303f32SDanilo Krummrich return ret; 1102e6303f32SDanilo Krummrich break; 1103e6303f32SDanilo Krummrich } 1104e6303f32SDanilo Krummrich 1105e6303f32SDanilo Krummrich if (end < req_end) { 1106e6303f32SDanilo Krummrich ret = op_unmap_cb(ops, priv, va, merge); 1107e6303f32SDanilo Krummrich if (ret) 1108e6303f32SDanilo Krummrich return ret; 1109*cdf4100eSDanilo Krummrich continue; 1110e6303f32SDanilo Krummrich } 1111e6303f32SDanilo Krummrich 1112e6303f32SDanilo Krummrich if (end > req_end) { 1113e6303f32SDanilo Krummrich struct drm_gpuva_op_map n = { 1114e6303f32SDanilo Krummrich .va.addr = req_end, 1115e6303f32SDanilo Krummrich .va.range = range - req_range, 1116e6303f32SDanilo Krummrich .gem.obj = obj, 1117e6303f32SDanilo Krummrich .gem.offset = offset + req_range, 1118e6303f32SDanilo Krummrich }; 1119e6303f32SDanilo Krummrich struct drm_gpuva_op_unmap u = { 1120e6303f32SDanilo Krummrich .va = va, 1121e6303f32SDanilo Krummrich .keep = merge, 1122e6303f32SDanilo Krummrich }; 1123e6303f32SDanilo Krummrich 1124e6303f32SDanilo Krummrich ret = op_remap_cb(ops, priv, NULL, &n, &u); 1125e6303f32SDanilo Krummrich if (ret) 1126e6303f32SDanilo Krummrich return ret; 1127e6303f32SDanilo Krummrich break; 1128e6303f32SDanilo Krummrich } 1129e6303f32SDanilo Krummrich } else if (addr < req_addr) { 1130e6303f32SDanilo Krummrich u64 ls_range = req_addr - addr; 1131e6303f32SDanilo Krummrich struct drm_gpuva_op_map p = { 1132e6303f32SDanilo Krummrich .va.addr = addr, 1133e6303f32SDanilo Krummrich .va.range = ls_range, 1134e6303f32SDanilo Krummrich .gem.obj = obj, 1135e6303f32SDanilo Krummrich .gem.offset = offset, 1136e6303f32SDanilo Krummrich }; 1137e6303f32SDanilo Krummrich struct drm_gpuva_op_unmap u = { .va = va }; 1138e6303f32SDanilo Krummrich 1139e6303f32SDanilo Krummrich merge &= obj == req_obj && 1140e6303f32SDanilo Krummrich offset + ls_range == req_offset; 1141e6303f32SDanilo Krummrich u.keep = merge; 1142e6303f32SDanilo Krummrich 1143e6303f32SDanilo Krummrich if (end == req_end) { 1144e6303f32SDanilo Krummrich ret = op_remap_cb(ops, priv, &p, NULL, &u); 1145e6303f32SDanilo Krummrich if (ret) 1146e6303f32SDanilo Krummrich return ret; 1147e6303f32SDanilo Krummrich break; 1148e6303f32SDanilo Krummrich } 1149e6303f32SDanilo Krummrich 1150e6303f32SDanilo Krummrich if (end < req_end) { 1151e6303f32SDanilo Krummrich ret = op_remap_cb(ops, priv, &p, NULL, &u); 1152e6303f32SDanilo Krummrich if (ret) 1153e6303f32SDanilo Krummrich return ret; 1154*cdf4100eSDanilo Krummrich continue; 1155e6303f32SDanilo Krummrich } 1156e6303f32SDanilo Krummrich 1157e6303f32SDanilo Krummrich if (end > req_end) { 1158e6303f32SDanilo Krummrich struct drm_gpuva_op_map n = { 1159e6303f32SDanilo Krummrich .va.addr = req_end, 1160e6303f32SDanilo Krummrich .va.range = end - req_end, 1161e6303f32SDanilo Krummrich .gem.obj = obj, 1162e6303f32SDanilo Krummrich .gem.offset = offset + ls_range + 1163e6303f32SDanilo Krummrich req_range, 1164e6303f32SDanilo Krummrich }; 1165e6303f32SDanilo Krummrich 1166e6303f32SDanilo Krummrich ret = op_remap_cb(ops, priv, &p, &n, &u); 1167e6303f32SDanilo Krummrich if (ret) 1168e6303f32SDanilo Krummrich return ret; 1169e6303f32SDanilo Krummrich break; 1170e6303f32SDanilo Krummrich } 1171e6303f32SDanilo Krummrich } else if (addr > req_addr) { 1172e6303f32SDanilo Krummrich merge &= obj == req_obj && 1173e6303f32SDanilo Krummrich offset == req_offset + 1174e6303f32SDanilo Krummrich (addr - req_addr); 1175e6303f32SDanilo Krummrich 1176e6303f32SDanilo Krummrich if (end == req_end) { 1177e6303f32SDanilo Krummrich ret = op_unmap_cb(ops, priv, va, merge); 1178e6303f32SDanilo Krummrich if (ret) 1179e6303f32SDanilo Krummrich return ret; 1180e6303f32SDanilo Krummrich break; 1181e6303f32SDanilo Krummrich } 1182e6303f32SDanilo Krummrich 1183e6303f32SDanilo Krummrich if (end < req_end) { 1184e6303f32SDanilo Krummrich ret = op_unmap_cb(ops, priv, va, merge); 1185e6303f32SDanilo Krummrich if (ret) 1186e6303f32SDanilo Krummrich return ret; 1187*cdf4100eSDanilo Krummrich continue; 1188e6303f32SDanilo Krummrich } 1189e6303f32SDanilo Krummrich 1190e6303f32SDanilo Krummrich if (end > req_end) { 1191e6303f32SDanilo Krummrich struct drm_gpuva_op_map n = { 1192e6303f32SDanilo Krummrich .va.addr = req_end, 1193e6303f32SDanilo Krummrich .va.range = end - req_end, 1194e6303f32SDanilo Krummrich .gem.obj = obj, 1195e6303f32SDanilo Krummrich .gem.offset = offset + req_end - addr, 1196e6303f32SDanilo Krummrich }; 1197e6303f32SDanilo Krummrich struct drm_gpuva_op_unmap u = { 1198e6303f32SDanilo Krummrich .va = va, 1199e6303f32SDanilo Krummrich .keep = merge, 1200e6303f32SDanilo Krummrich }; 1201e6303f32SDanilo Krummrich 1202e6303f32SDanilo Krummrich ret = op_remap_cb(ops, priv, NULL, &n, &u); 1203e6303f32SDanilo Krummrich if (ret) 1204e6303f32SDanilo Krummrich return ret; 1205e6303f32SDanilo Krummrich break; 1206e6303f32SDanilo Krummrich } 1207e6303f32SDanilo Krummrich } 1208e6303f32SDanilo Krummrich } 1209e6303f32SDanilo Krummrich 1210e6303f32SDanilo Krummrich return op_map_cb(ops, priv, 1211e6303f32SDanilo Krummrich req_addr, req_range, 1212e6303f32SDanilo Krummrich req_obj, req_offset); 1213e6303f32SDanilo Krummrich } 1214e6303f32SDanilo Krummrich 1215e6303f32SDanilo Krummrich static int 1216e6303f32SDanilo Krummrich __drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, 1217e6303f32SDanilo Krummrich const struct drm_gpuva_fn_ops *ops, void *priv, 1218e6303f32SDanilo Krummrich u64 req_addr, u64 req_range) 1219e6303f32SDanilo Krummrich { 1220e6303f32SDanilo Krummrich struct drm_gpuva *va, *next; 1221e6303f32SDanilo Krummrich u64 req_end = req_addr + req_range; 1222e6303f32SDanilo Krummrich int ret; 1223e6303f32SDanilo Krummrich 1224e6303f32SDanilo Krummrich if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range))) 1225e6303f32SDanilo Krummrich return -EINVAL; 1226e6303f32SDanilo Krummrich 1227e6303f32SDanilo Krummrich drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) { 1228e6303f32SDanilo Krummrich struct drm_gpuva_op_map prev = {}, next = {}; 1229e6303f32SDanilo Krummrich bool prev_split = false, next_split = false; 1230e6303f32SDanilo Krummrich struct drm_gem_object *obj = va->gem.obj; 1231e6303f32SDanilo Krummrich u64 offset = va->gem.offset; 1232e6303f32SDanilo Krummrich u64 addr = va->va.addr; 1233e6303f32SDanilo Krummrich u64 range = va->va.range; 1234e6303f32SDanilo Krummrich u64 end = addr + range; 1235e6303f32SDanilo Krummrich 1236e6303f32SDanilo Krummrich if (addr < req_addr) { 1237e6303f32SDanilo Krummrich prev.va.addr = addr; 1238e6303f32SDanilo Krummrich prev.va.range = req_addr - addr; 1239e6303f32SDanilo Krummrich prev.gem.obj = obj; 1240e6303f32SDanilo Krummrich prev.gem.offset = offset; 1241e6303f32SDanilo Krummrich 1242e6303f32SDanilo Krummrich prev_split = true; 1243e6303f32SDanilo Krummrich } 1244e6303f32SDanilo Krummrich 1245e6303f32SDanilo Krummrich if (end > req_end) { 1246e6303f32SDanilo Krummrich next.va.addr = req_end; 1247e6303f32SDanilo Krummrich next.va.range = end - req_end; 1248e6303f32SDanilo Krummrich next.gem.obj = obj; 1249e6303f32SDanilo Krummrich next.gem.offset = offset + (req_end - addr); 1250e6303f32SDanilo Krummrich 1251e6303f32SDanilo Krummrich next_split = true; 1252e6303f32SDanilo Krummrich } 1253e6303f32SDanilo Krummrich 1254e6303f32SDanilo Krummrich if (prev_split || next_split) { 1255e6303f32SDanilo Krummrich struct drm_gpuva_op_unmap unmap = { .va = va }; 1256e6303f32SDanilo Krummrich 1257e6303f32SDanilo Krummrich ret = op_remap_cb(ops, priv, 1258e6303f32SDanilo Krummrich prev_split ? &prev : NULL, 1259e6303f32SDanilo Krummrich next_split ? &next : NULL, 1260e6303f32SDanilo Krummrich &unmap); 1261e6303f32SDanilo Krummrich if (ret) 1262e6303f32SDanilo Krummrich return ret; 1263e6303f32SDanilo Krummrich } else { 1264e6303f32SDanilo Krummrich ret = op_unmap_cb(ops, priv, va, false); 1265e6303f32SDanilo Krummrich if (ret) 1266e6303f32SDanilo Krummrich return ret; 1267e6303f32SDanilo Krummrich } 1268e6303f32SDanilo Krummrich } 1269e6303f32SDanilo Krummrich 1270e6303f32SDanilo Krummrich return 0; 1271e6303f32SDanilo Krummrich } 1272e6303f32SDanilo Krummrich 1273e6303f32SDanilo Krummrich /** 1274e6303f32SDanilo Krummrich * drm_gpuva_sm_map() - creates the &drm_gpuva_op split/merge steps 1275e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager representing the GPU VA space 1276e6303f32SDanilo Krummrich * @req_addr: the start address of the new mapping 1277e6303f32SDanilo Krummrich * @req_range: the range of the new mapping 1278e6303f32SDanilo Krummrich * @req_obj: the &drm_gem_object to map 1279e6303f32SDanilo Krummrich * @req_offset: the offset within the &drm_gem_object 1280e6303f32SDanilo Krummrich * @priv: pointer to a driver private data structure 1281e6303f32SDanilo Krummrich * 1282e6303f32SDanilo Krummrich * This function iterates the given range of the GPU VA space. It utilizes the 1283e6303f32SDanilo Krummrich * &drm_gpuva_fn_ops to call back into the driver providing the split and merge 1284e6303f32SDanilo Krummrich * steps. 1285e6303f32SDanilo Krummrich * 1286e6303f32SDanilo Krummrich * Drivers may use these callbacks to update the GPU VA space right away within 1287e6303f32SDanilo Krummrich * the callback. In case the driver decides to copy and store the operations for 1288e6303f32SDanilo Krummrich * later processing neither this function nor &drm_gpuva_sm_unmap is allowed to 1289e6303f32SDanilo Krummrich * be called before the &drm_gpuva_manager's view of the GPU VA space was 1290e6303f32SDanilo Krummrich * updated with the previous set of operations. To update the 1291e6303f32SDanilo Krummrich * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(), 1292e6303f32SDanilo Krummrich * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be 1293e6303f32SDanilo Krummrich * used. 1294e6303f32SDanilo Krummrich * 1295e6303f32SDanilo Krummrich * A sequence of callbacks can contain map, unmap and remap operations, but 1296e6303f32SDanilo Krummrich * the sequence of callbacks might also be empty if no operation is required, 1297e6303f32SDanilo Krummrich * e.g. if the requested mapping already exists in the exact same way. 1298e6303f32SDanilo Krummrich * 1299e6303f32SDanilo Krummrich * There can be an arbitrary amount of unmap operations, a maximum of two remap 1300e6303f32SDanilo Krummrich * operations and a single map operation. The latter one represents the original 1301e6303f32SDanilo Krummrich * map operation requested by the caller. 1302e6303f32SDanilo Krummrich * 1303e6303f32SDanilo Krummrich * Returns: 0 on success or a negative error code 1304e6303f32SDanilo Krummrich */ 1305e6303f32SDanilo Krummrich int 1306e6303f32SDanilo Krummrich drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv, 1307e6303f32SDanilo Krummrich u64 req_addr, u64 req_range, 1308e6303f32SDanilo Krummrich struct drm_gem_object *req_obj, u64 req_offset) 1309e6303f32SDanilo Krummrich { 1310e6303f32SDanilo Krummrich const struct drm_gpuva_fn_ops *ops = mgr->ops; 1311e6303f32SDanilo Krummrich 1312e6303f32SDanilo Krummrich if (unlikely(!(ops && ops->sm_step_map && 1313e6303f32SDanilo Krummrich ops->sm_step_remap && 1314e6303f32SDanilo Krummrich ops->sm_step_unmap))) 1315e6303f32SDanilo Krummrich return -EINVAL; 1316e6303f32SDanilo Krummrich 1317e6303f32SDanilo Krummrich return __drm_gpuva_sm_map(mgr, ops, priv, 1318e6303f32SDanilo Krummrich req_addr, req_range, 1319e6303f32SDanilo Krummrich req_obj, req_offset); 1320e6303f32SDanilo Krummrich } 1321e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_sm_map); 1322e6303f32SDanilo Krummrich 1323e6303f32SDanilo Krummrich /** 1324e6303f32SDanilo Krummrich * drm_gpuva_sm_unmap() - creates the &drm_gpuva_ops to split on unmap 1325e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager representing the GPU VA space 1326e6303f32SDanilo Krummrich * @priv: pointer to a driver private data structure 1327e6303f32SDanilo Krummrich * @req_addr: the start address of the range to unmap 1328e6303f32SDanilo Krummrich * @req_range: the range of the mappings to unmap 1329e6303f32SDanilo Krummrich * 1330e6303f32SDanilo Krummrich * This function iterates the given range of the GPU VA space. It utilizes the 1331e6303f32SDanilo Krummrich * &drm_gpuva_fn_ops to call back into the driver providing the operations to 1332e6303f32SDanilo Krummrich * unmap and, if required, split existent mappings. 1333e6303f32SDanilo Krummrich * 1334e6303f32SDanilo Krummrich * Drivers may use these callbacks to update the GPU VA space right away within 1335e6303f32SDanilo Krummrich * the callback. In case the driver decides to copy and store the operations for 1336e6303f32SDanilo Krummrich * later processing neither this function nor &drm_gpuva_sm_map is allowed to be 1337e6303f32SDanilo Krummrich * called before the &drm_gpuva_manager's view of the GPU VA space was updated 1338e6303f32SDanilo Krummrich * with the previous set of operations. To update the &drm_gpuva_manager's view 1339e6303f32SDanilo Krummrich * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or 1340e6303f32SDanilo Krummrich * drm_gpuva_destroy_unlocked() should be used. 1341e6303f32SDanilo Krummrich * 1342e6303f32SDanilo Krummrich * A sequence of callbacks can contain unmap and remap operations, depending on 1343e6303f32SDanilo Krummrich * whether there are actual overlapping mappings to split. 1344e6303f32SDanilo Krummrich * 1345e6303f32SDanilo Krummrich * There can be an arbitrary amount of unmap operations and a maximum of two 1346e6303f32SDanilo Krummrich * remap operations. 1347e6303f32SDanilo Krummrich * 1348e6303f32SDanilo Krummrich * Returns: 0 on success or a negative error code 1349e6303f32SDanilo Krummrich */ 1350e6303f32SDanilo Krummrich int 1351e6303f32SDanilo Krummrich drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv, 1352e6303f32SDanilo Krummrich u64 req_addr, u64 req_range) 1353e6303f32SDanilo Krummrich { 1354e6303f32SDanilo Krummrich const struct drm_gpuva_fn_ops *ops = mgr->ops; 1355e6303f32SDanilo Krummrich 1356e6303f32SDanilo Krummrich if (unlikely(!(ops && ops->sm_step_remap && 1357e6303f32SDanilo Krummrich ops->sm_step_unmap))) 1358e6303f32SDanilo Krummrich return -EINVAL; 1359e6303f32SDanilo Krummrich 1360e6303f32SDanilo Krummrich return __drm_gpuva_sm_unmap(mgr, ops, priv, 1361e6303f32SDanilo Krummrich req_addr, req_range); 1362e6303f32SDanilo Krummrich } 1363e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap); 1364e6303f32SDanilo Krummrich 1365e6303f32SDanilo Krummrich static struct drm_gpuva_op * 1366e6303f32SDanilo Krummrich gpuva_op_alloc(struct drm_gpuva_manager *mgr) 1367e6303f32SDanilo Krummrich { 1368e6303f32SDanilo Krummrich const struct drm_gpuva_fn_ops *fn = mgr->ops; 1369e6303f32SDanilo Krummrich struct drm_gpuva_op *op; 1370e6303f32SDanilo Krummrich 1371e6303f32SDanilo Krummrich if (fn && fn->op_alloc) 1372e6303f32SDanilo Krummrich op = fn->op_alloc(); 1373e6303f32SDanilo Krummrich else 1374e6303f32SDanilo Krummrich op = kzalloc(sizeof(*op), GFP_KERNEL); 1375e6303f32SDanilo Krummrich 1376e6303f32SDanilo Krummrich if (unlikely(!op)) 1377e6303f32SDanilo Krummrich return NULL; 1378e6303f32SDanilo Krummrich 1379e6303f32SDanilo Krummrich return op; 1380e6303f32SDanilo Krummrich } 1381e6303f32SDanilo Krummrich 1382e6303f32SDanilo Krummrich static void 1383e6303f32SDanilo Krummrich gpuva_op_free(struct drm_gpuva_manager *mgr, 1384e6303f32SDanilo Krummrich struct drm_gpuva_op *op) 1385e6303f32SDanilo Krummrich { 1386e6303f32SDanilo Krummrich const struct drm_gpuva_fn_ops *fn = mgr->ops; 1387e6303f32SDanilo Krummrich 1388e6303f32SDanilo Krummrich if (fn && fn->op_free) 1389e6303f32SDanilo Krummrich fn->op_free(op); 1390e6303f32SDanilo Krummrich else 1391e6303f32SDanilo Krummrich kfree(op); 1392e6303f32SDanilo Krummrich } 1393e6303f32SDanilo Krummrich 1394e6303f32SDanilo Krummrich static int 1395e6303f32SDanilo Krummrich drm_gpuva_sm_step(struct drm_gpuva_op *__op, 1396e6303f32SDanilo Krummrich void *priv) 1397e6303f32SDanilo Krummrich { 1398e6303f32SDanilo Krummrich struct { 1399e6303f32SDanilo Krummrich struct drm_gpuva_manager *mgr; 1400e6303f32SDanilo Krummrich struct drm_gpuva_ops *ops; 1401e6303f32SDanilo Krummrich } *args = priv; 1402e6303f32SDanilo Krummrich struct drm_gpuva_manager *mgr = args->mgr; 1403e6303f32SDanilo Krummrich struct drm_gpuva_ops *ops = args->ops; 1404e6303f32SDanilo Krummrich struct drm_gpuva_op *op; 1405e6303f32SDanilo Krummrich 1406e6303f32SDanilo Krummrich op = gpuva_op_alloc(mgr); 1407e6303f32SDanilo Krummrich if (unlikely(!op)) 1408e6303f32SDanilo Krummrich goto err; 1409e6303f32SDanilo Krummrich 1410e6303f32SDanilo Krummrich memcpy(op, __op, sizeof(*op)); 1411e6303f32SDanilo Krummrich 1412e6303f32SDanilo Krummrich if (op->op == DRM_GPUVA_OP_REMAP) { 1413e6303f32SDanilo Krummrich struct drm_gpuva_op_remap *__r = &__op->remap; 1414e6303f32SDanilo Krummrich struct drm_gpuva_op_remap *r = &op->remap; 1415e6303f32SDanilo Krummrich 1416e6303f32SDanilo Krummrich r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap), 1417e6303f32SDanilo Krummrich GFP_KERNEL); 1418e6303f32SDanilo Krummrich if (unlikely(!r->unmap)) 1419e6303f32SDanilo Krummrich goto err_free_op; 1420e6303f32SDanilo Krummrich 1421e6303f32SDanilo Krummrich if (__r->prev) { 1422e6303f32SDanilo Krummrich r->prev = kmemdup(__r->prev, sizeof(*r->prev), 1423e6303f32SDanilo Krummrich GFP_KERNEL); 1424e6303f32SDanilo Krummrich if (unlikely(!r->prev)) 1425e6303f32SDanilo Krummrich goto err_free_unmap; 1426e6303f32SDanilo Krummrich } 1427e6303f32SDanilo Krummrich 1428e6303f32SDanilo Krummrich if (__r->next) { 1429e6303f32SDanilo Krummrich r->next = kmemdup(__r->next, sizeof(*r->next), 1430e6303f32SDanilo Krummrich GFP_KERNEL); 1431e6303f32SDanilo Krummrich if (unlikely(!r->next)) 1432e6303f32SDanilo Krummrich goto err_free_prev; 1433e6303f32SDanilo Krummrich } 1434e6303f32SDanilo Krummrich } 1435e6303f32SDanilo Krummrich 1436e6303f32SDanilo Krummrich list_add_tail(&op->entry, &ops->list); 1437e6303f32SDanilo Krummrich 1438e6303f32SDanilo Krummrich return 0; 1439e6303f32SDanilo Krummrich 1440e6303f32SDanilo Krummrich err_free_unmap: 1441e6303f32SDanilo Krummrich kfree(op->remap.unmap); 1442e6303f32SDanilo Krummrich err_free_prev: 1443e6303f32SDanilo Krummrich kfree(op->remap.prev); 1444e6303f32SDanilo Krummrich err_free_op: 1445e6303f32SDanilo Krummrich gpuva_op_free(mgr, op); 1446e6303f32SDanilo Krummrich err: 1447e6303f32SDanilo Krummrich return -ENOMEM; 1448e6303f32SDanilo Krummrich } 1449e6303f32SDanilo Krummrich 1450e6303f32SDanilo Krummrich static const struct drm_gpuva_fn_ops gpuva_list_ops = { 1451e6303f32SDanilo Krummrich .sm_step_map = drm_gpuva_sm_step, 1452e6303f32SDanilo Krummrich .sm_step_remap = drm_gpuva_sm_step, 1453e6303f32SDanilo Krummrich .sm_step_unmap = drm_gpuva_sm_step, 1454e6303f32SDanilo Krummrich }; 1455e6303f32SDanilo Krummrich 1456e6303f32SDanilo Krummrich /** 1457e6303f32SDanilo Krummrich * drm_gpuva_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge 1458e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager representing the GPU VA space 1459e6303f32SDanilo Krummrich * @req_addr: the start address of the new mapping 1460e6303f32SDanilo Krummrich * @req_range: the range of the new mapping 1461e6303f32SDanilo Krummrich * @req_obj: the &drm_gem_object to map 1462e6303f32SDanilo Krummrich * @req_offset: the offset within the &drm_gem_object 1463e6303f32SDanilo Krummrich * 1464e6303f32SDanilo Krummrich * This function creates a list of operations to perform splitting and merging 1465e6303f32SDanilo Krummrich * of existent mapping(s) with the newly requested one. 1466e6303f32SDanilo Krummrich * 1467e6303f32SDanilo Krummrich * The list can be iterated with &drm_gpuva_for_each_op and must be processed 1468e6303f32SDanilo Krummrich * in the given order. It can contain map, unmap and remap operations, but it 1469e6303f32SDanilo Krummrich * also can be empty if no operation is required, e.g. if the requested mapping 1470e6303f32SDanilo Krummrich * already exists is the exact same way. 1471e6303f32SDanilo Krummrich * 1472e6303f32SDanilo Krummrich * There can be an arbitrary amount of unmap operations, a maximum of two remap 1473e6303f32SDanilo Krummrich * operations and a single map operation. The latter one represents the original 1474e6303f32SDanilo Krummrich * map operation requested by the caller. 1475e6303f32SDanilo Krummrich * 1476e6303f32SDanilo Krummrich * Note that before calling this function again with another mapping request it 1477e6303f32SDanilo Krummrich * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The 1478e6303f32SDanilo Krummrich * previously obtained operations must be either processed or abandoned. To 1479e6303f32SDanilo Krummrich * update the &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(), 1480e6303f32SDanilo Krummrich * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be 1481e6303f32SDanilo Krummrich * used. 1482e6303f32SDanilo Krummrich * 1483e6303f32SDanilo Krummrich * After the caller finished processing the returned &drm_gpuva_ops, they must 1484e6303f32SDanilo Krummrich * be freed with &drm_gpuva_ops_free. 1485e6303f32SDanilo Krummrich * 1486e6303f32SDanilo Krummrich * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure 1487e6303f32SDanilo Krummrich */ 1488e6303f32SDanilo Krummrich struct drm_gpuva_ops * 1489e6303f32SDanilo Krummrich drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr, 1490e6303f32SDanilo Krummrich u64 req_addr, u64 req_range, 1491e6303f32SDanilo Krummrich struct drm_gem_object *req_obj, u64 req_offset) 1492e6303f32SDanilo Krummrich { 1493e6303f32SDanilo Krummrich struct drm_gpuva_ops *ops; 1494e6303f32SDanilo Krummrich struct { 1495e6303f32SDanilo Krummrich struct drm_gpuva_manager *mgr; 1496e6303f32SDanilo Krummrich struct drm_gpuva_ops *ops; 1497e6303f32SDanilo Krummrich } args; 1498e6303f32SDanilo Krummrich int ret; 1499e6303f32SDanilo Krummrich 1500e6303f32SDanilo Krummrich ops = kzalloc(sizeof(*ops), GFP_KERNEL); 1501e6303f32SDanilo Krummrich if (unlikely(!ops)) 1502e6303f32SDanilo Krummrich return ERR_PTR(-ENOMEM); 1503e6303f32SDanilo Krummrich 1504e6303f32SDanilo Krummrich INIT_LIST_HEAD(&ops->list); 1505e6303f32SDanilo Krummrich 1506e6303f32SDanilo Krummrich args.mgr = mgr; 1507e6303f32SDanilo Krummrich args.ops = ops; 1508e6303f32SDanilo Krummrich 1509e6303f32SDanilo Krummrich ret = __drm_gpuva_sm_map(mgr, &gpuva_list_ops, &args, 1510e6303f32SDanilo Krummrich req_addr, req_range, 1511e6303f32SDanilo Krummrich req_obj, req_offset); 1512e6303f32SDanilo Krummrich if (ret) 1513e6303f32SDanilo Krummrich goto err_free_ops; 1514e6303f32SDanilo Krummrich 1515e6303f32SDanilo Krummrich return ops; 1516e6303f32SDanilo Krummrich 1517e6303f32SDanilo Krummrich err_free_ops: 1518e6303f32SDanilo Krummrich drm_gpuva_ops_free(mgr, ops); 1519e6303f32SDanilo Krummrich return ERR_PTR(ret); 1520e6303f32SDanilo Krummrich } 1521e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create); 1522e6303f32SDanilo Krummrich 1523e6303f32SDanilo Krummrich /** 1524e6303f32SDanilo Krummrich * drm_gpuva_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on 1525e6303f32SDanilo Krummrich * unmap 1526e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager representing the GPU VA space 1527e6303f32SDanilo Krummrich * @req_addr: the start address of the range to unmap 1528e6303f32SDanilo Krummrich * @req_range: the range of the mappings to unmap 1529e6303f32SDanilo Krummrich * 1530e6303f32SDanilo Krummrich * This function creates a list of operations to perform unmapping and, if 1531e6303f32SDanilo Krummrich * required, splitting of the mappings overlapping the unmap range. 1532e6303f32SDanilo Krummrich * 1533e6303f32SDanilo Krummrich * The list can be iterated with &drm_gpuva_for_each_op and must be processed 1534e6303f32SDanilo Krummrich * in the given order. It can contain unmap and remap operations, depending on 1535e6303f32SDanilo Krummrich * whether there are actual overlapping mappings to split. 1536e6303f32SDanilo Krummrich * 1537e6303f32SDanilo Krummrich * There can be an arbitrary amount of unmap operations and a maximum of two 1538e6303f32SDanilo Krummrich * remap operations. 1539e6303f32SDanilo Krummrich * 1540e6303f32SDanilo Krummrich * Note that before calling this function again with another range to unmap it 1541e6303f32SDanilo Krummrich * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The 1542e6303f32SDanilo Krummrich * previously obtained operations must be processed or abandoned. To update the 1543e6303f32SDanilo Krummrich * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(), 1544e6303f32SDanilo Krummrich * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be 1545e6303f32SDanilo Krummrich * used. 1546e6303f32SDanilo Krummrich * 1547e6303f32SDanilo Krummrich * After the caller finished processing the returned &drm_gpuva_ops, they must 1548e6303f32SDanilo Krummrich * be freed with &drm_gpuva_ops_free. 1549e6303f32SDanilo Krummrich * 1550e6303f32SDanilo Krummrich * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure 1551e6303f32SDanilo Krummrich */ 1552e6303f32SDanilo Krummrich struct drm_gpuva_ops * 1553e6303f32SDanilo Krummrich drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr, 1554e6303f32SDanilo Krummrich u64 req_addr, u64 req_range) 1555e6303f32SDanilo Krummrich { 1556e6303f32SDanilo Krummrich struct drm_gpuva_ops *ops; 1557e6303f32SDanilo Krummrich struct { 1558e6303f32SDanilo Krummrich struct drm_gpuva_manager *mgr; 1559e6303f32SDanilo Krummrich struct drm_gpuva_ops *ops; 1560e6303f32SDanilo Krummrich } args; 1561e6303f32SDanilo Krummrich int ret; 1562e6303f32SDanilo Krummrich 1563e6303f32SDanilo Krummrich ops = kzalloc(sizeof(*ops), GFP_KERNEL); 1564e6303f32SDanilo Krummrich if (unlikely(!ops)) 1565e6303f32SDanilo Krummrich return ERR_PTR(-ENOMEM); 1566e6303f32SDanilo Krummrich 1567e6303f32SDanilo Krummrich INIT_LIST_HEAD(&ops->list); 1568e6303f32SDanilo Krummrich 1569e6303f32SDanilo Krummrich args.mgr = mgr; 1570e6303f32SDanilo Krummrich args.ops = ops; 1571e6303f32SDanilo Krummrich 1572e6303f32SDanilo Krummrich ret = __drm_gpuva_sm_unmap(mgr, &gpuva_list_ops, &args, 1573e6303f32SDanilo Krummrich req_addr, req_range); 1574e6303f32SDanilo Krummrich if (ret) 1575e6303f32SDanilo Krummrich goto err_free_ops; 1576e6303f32SDanilo Krummrich 1577e6303f32SDanilo Krummrich return ops; 1578e6303f32SDanilo Krummrich 1579e6303f32SDanilo Krummrich err_free_ops: 1580e6303f32SDanilo Krummrich drm_gpuva_ops_free(mgr, ops); 1581e6303f32SDanilo Krummrich return ERR_PTR(ret); 1582e6303f32SDanilo Krummrich } 1583e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap_ops_create); 1584e6303f32SDanilo Krummrich 1585e6303f32SDanilo Krummrich /** 1586e6303f32SDanilo Krummrich * drm_gpuva_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch 1587e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager representing the GPU VA space 1588e6303f32SDanilo Krummrich * @addr: the start address of the range to prefetch 1589e6303f32SDanilo Krummrich * @range: the range of the mappings to prefetch 1590e6303f32SDanilo Krummrich * 1591e6303f32SDanilo Krummrich * This function creates a list of operations to perform prefetching. 1592e6303f32SDanilo Krummrich * 1593e6303f32SDanilo Krummrich * The list can be iterated with &drm_gpuva_for_each_op and must be processed 1594e6303f32SDanilo Krummrich * in the given order. It can contain prefetch operations. 1595e6303f32SDanilo Krummrich * 1596e6303f32SDanilo Krummrich * There can be an arbitrary amount of prefetch operations. 1597e6303f32SDanilo Krummrich * 1598e6303f32SDanilo Krummrich * After the caller finished processing the returned &drm_gpuva_ops, they must 1599e6303f32SDanilo Krummrich * be freed with &drm_gpuva_ops_free. 1600e6303f32SDanilo Krummrich * 1601e6303f32SDanilo Krummrich * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure 1602e6303f32SDanilo Krummrich */ 1603e6303f32SDanilo Krummrich struct drm_gpuva_ops * 1604e6303f32SDanilo Krummrich drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr, 1605e6303f32SDanilo Krummrich u64 addr, u64 range) 1606e6303f32SDanilo Krummrich { 1607e6303f32SDanilo Krummrich struct drm_gpuva_ops *ops; 1608e6303f32SDanilo Krummrich struct drm_gpuva_op *op; 1609e6303f32SDanilo Krummrich struct drm_gpuva *va; 1610e6303f32SDanilo Krummrich u64 end = addr + range; 1611e6303f32SDanilo Krummrich int ret; 1612e6303f32SDanilo Krummrich 1613e6303f32SDanilo Krummrich ops = kzalloc(sizeof(*ops), GFP_KERNEL); 1614e6303f32SDanilo Krummrich if (!ops) 1615e6303f32SDanilo Krummrich return ERR_PTR(-ENOMEM); 1616e6303f32SDanilo Krummrich 1617e6303f32SDanilo Krummrich INIT_LIST_HEAD(&ops->list); 1618e6303f32SDanilo Krummrich 1619e6303f32SDanilo Krummrich drm_gpuva_for_each_va_range(va, mgr, addr, end) { 1620e6303f32SDanilo Krummrich op = gpuva_op_alloc(mgr); 1621e6303f32SDanilo Krummrich if (!op) { 1622e6303f32SDanilo Krummrich ret = -ENOMEM; 1623e6303f32SDanilo Krummrich goto err_free_ops; 1624e6303f32SDanilo Krummrich } 1625e6303f32SDanilo Krummrich 1626e6303f32SDanilo Krummrich op->op = DRM_GPUVA_OP_PREFETCH; 1627e6303f32SDanilo Krummrich op->prefetch.va = va; 1628e6303f32SDanilo Krummrich list_add_tail(&op->entry, &ops->list); 1629e6303f32SDanilo Krummrich } 1630e6303f32SDanilo Krummrich 1631e6303f32SDanilo Krummrich return ops; 1632e6303f32SDanilo Krummrich 1633e6303f32SDanilo Krummrich err_free_ops: 1634e6303f32SDanilo Krummrich drm_gpuva_ops_free(mgr, ops); 1635e6303f32SDanilo Krummrich return ERR_PTR(ret); 1636e6303f32SDanilo Krummrich } 1637e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_prefetch_ops_create); 1638e6303f32SDanilo Krummrich 1639e6303f32SDanilo Krummrich /** 1640e6303f32SDanilo Krummrich * drm_gpuva_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM 1641e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager representing the GPU VA space 1642e6303f32SDanilo Krummrich * @obj: the &drm_gem_object to unmap 1643e6303f32SDanilo Krummrich * 1644e6303f32SDanilo Krummrich * This function creates a list of operations to perform unmapping for every 1645e6303f32SDanilo Krummrich * GPUVA attached to a GEM. 1646e6303f32SDanilo Krummrich * 1647e6303f32SDanilo Krummrich * The list can be iterated with &drm_gpuva_for_each_op and consists out of an 1648e6303f32SDanilo Krummrich * arbitrary amount of unmap operations. 1649e6303f32SDanilo Krummrich * 1650e6303f32SDanilo Krummrich * After the caller finished processing the returned &drm_gpuva_ops, they must 1651e6303f32SDanilo Krummrich * be freed with &drm_gpuva_ops_free. 1652e6303f32SDanilo Krummrich * 1653e6303f32SDanilo Krummrich * It is the callers responsibility to protect the GEMs GPUVA list against 1654e6303f32SDanilo Krummrich * concurrent access using the GEMs dma_resv lock. 1655e6303f32SDanilo Krummrich * 1656e6303f32SDanilo Krummrich * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure 1657e6303f32SDanilo Krummrich */ 1658e6303f32SDanilo Krummrich struct drm_gpuva_ops * 1659e6303f32SDanilo Krummrich drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr, 1660e6303f32SDanilo Krummrich struct drm_gem_object *obj) 1661e6303f32SDanilo Krummrich { 1662e6303f32SDanilo Krummrich struct drm_gpuva_ops *ops; 1663e6303f32SDanilo Krummrich struct drm_gpuva_op *op; 1664e6303f32SDanilo Krummrich struct drm_gpuva *va; 1665e6303f32SDanilo Krummrich int ret; 1666e6303f32SDanilo Krummrich 1667e6303f32SDanilo Krummrich drm_gem_gpuva_assert_lock_held(obj); 1668e6303f32SDanilo Krummrich 1669e6303f32SDanilo Krummrich ops = kzalloc(sizeof(*ops), GFP_KERNEL); 1670e6303f32SDanilo Krummrich if (!ops) 1671e6303f32SDanilo Krummrich return ERR_PTR(-ENOMEM); 1672e6303f32SDanilo Krummrich 1673e6303f32SDanilo Krummrich INIT_LIST_HEAD(&ops->list); 1674e6303f32SDanilo Krummrich 1675e6303f32SDanilo Krummrich drm_gem_for_each_gpuva(va, obj) { 1676e6303f32SDanilo Krummrich op = gpuva_op_alloc(mgr); 1677e6303f32SDanilo Krummrich if (!op) { 1678e6303f32SDanilo Krummrich ret = -ENOMEM; 1679e6303f32SDanilo Krummrich goto err_free_ops; 1680e6303f32SDanilo Krummrich } 1681e6303f32SDanilo Krummrich 1682e6303f32SDanilo Krummrich op->op = DRM_GPUVA_OP_UNMAP; 1683e6303f32SDanilo Krummrich op->unmap.va = va; 1684e6303f32SDanilo Krummrich list_add_tail(&op->entry, &ops->list); 1685e6303f32SDanilo Krummrich } 1686e6303f32SDanilo Krummrich 1687e6303f32SDanilo Krummrich return ops; 1688e6303f32SDanilo Krummrich 1689e6303f32SDanilo Krummrich err_free_ops: 1690e6303f32SDanilo Krummrich drm_gpuva_ops_free(mgr, ops); 1691e6303f32SDanilo Krummrich return ERR_PTR(ret); 1692e6303f32SDanilo Krummrich } 1693e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_gem_unmap_ops_create); 1694e6303f32SDanilo Krummrich 1695e6303f32SDanilo Krummrich /** 1696e6303f32SDanilo Krummrich * drm_gpuva_ops_free() - free the given &drm_gpuva_ops 1697e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager the ops were created for 1698e6303f32SDanilo Krummrich * @ops: the &drm_gpuva_ops to free 1699e6303f32SDanilo Krummrich * 1700e6303f32SDanilo Krummrich * Frees the given &drm_gpuva_ops structure including all the ops associated 1701e6303f32SDanilo Krummrich * with it. 1702e6303f32SDanilo Krummrich */ 1703e6303f32SDanilo Krummrich void 1704e6303f32SDanilo Krummrich drm_gpuva_ops_free(struct drm_gpuva_manager *mgr, 1705e6303f32SDanilo Krummrich struct drm_gpuva_ops *ops) 1706e6303f32SDanilo Krummrich { 1707e6303f32SDanilo Krummrich struct drm_gpuva_op *op, *next; 1708e6303f32SDanilo Krummrich 1709e6303f32SDanilo Krummrich drm_gpuva_for_each_op_safe(op, next, ops) { 1710e6303f32SDanilo Krummrich list_del(&op->entry); 1711e6303f32SDanilo Krummrich 1712e6303f32SDanilo Krummrich if (op->op == DRM_GPUVA_OP_REMAP) { 1713e6303f32SDanilo Krummrich kfree(op->remap.prev); 1714e6303f32SDanilo Krummrich kfree(op->remap.next); 1715e6303f32SDanilo Krummrich kfree(op->remap.unmap); 1716e6303f32SDanilo Krummrich } 1717e6303f32SDanilo Krummrich 1718e6303f32SDanilo Krummrich gpuva_op_free(mgr, op); 1719e6303f32SDanilo Krummrich } 1720e6303f32SDanilo Krummrich 1721e6303f32SDanilo Krummrich kfree(ops); 1722e6303f32SDanilo Krummrich } 1723e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_ops_free); 1724