xref: /openbmc/linux/drivers/gpu/drm/drm_gpuva_mgr.c (revision e6303f323b1ad9c02ae813fc3dedeaa9dadfd3b0)
1*e6303f32SDanilo Krummrich // SPDX-License-Identifier: GPL-2.0-only
2*e6303f32SDanilo Krummrich /*
3*e6303f32SDanilo Krummrich  * Copyright (c) 2022 Red Hat.
4*e6303f32SDanilo Krummrich  *
5*e6303f32SDanilo Krummrich  * Permission is hereby granted, free of charge, to any person obtaining a
6*e6303f32SDanilo Krummrich  * copy of this software and associated documentation files (the "Software"),
7*e6303f32SDanilo Krummrich  * to deal in the Software without restriction, including without limitation
8*e6303f32SDanilo Krummrich  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9*e6303f32SDanilo Krummrich  * and/or sell copies of the Software, and to permit persons to whom the
10*e6303f32SDanilo Krummrich  * Software is furnished to do so, subject to the following conditions:
11*e6303f32SDanilo Krummrich  *
12*e6303f32SDanilo Krummrich  * The above copyright notice and this permission notice shall be included in
13*e6303f32SDanilo Krummrich  * all copies or substantial portions of the Software.
14*e6303f32SDanilo Krummrich  *
15*e6303f32SDanilo Krummrich  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*e6303f32SDanilo Krummrich  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*e6303f32SDanilo Krummrich  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18*e6303f32SDanilo Krummrich  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19*e6303f32SDanilo Krummrich  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20*e6303f32SDanilo Krummrich  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21*e6303f32SDanilo Krummrich  * OTHER DEALINGS IN THE SOFTWARE.
22*e6303f32SDanilo Krummrich  *
23*e6303f32SDanilo Krummrich  * Authors:
24*e6303f32SDanilo Krummrich  *     Danilo Krummrich <dakr@redhat.com>
25*e6303f32SDanilo Krummrich  *
26*e6303f32SDanilo Krummrich  */
27*e6303f32SDanilo Krummrich 
28*e6303f32SDanilo Krummrich #include <drm/drm_gpuva_mgr.h>
29*e6303f32SDanilo Krummrich 
30*e6303f32SDanilo Krummrich #include <linux/interval_tree_generic.h>
31*e6303f32SDanilo Krummrich #include <linux/mm.h>
32*e6303f32SDanilo Krummrich 
33*e6303f32SDanilo Krummrich /**
34*e6303f32SDanilo Krummrich  * DOC: Overview
35*e6303f32SDanilo Krummrich  *
36*e6303f32SDanilo Krummrich  * The DRM GPU VA Manager, represented by struct drm_gpuva_manager keeps track
37*e6303f32SDanilo Krummrich  * of a GPU's virtual address (VA) space and manages the corresponding virtual
38*e6303f32SDanilo Krummrich  * mappings represented by &drm_gpuva objects. It also keeps track of the
39*e6303f32SDanilo Krummrich  * mapping's backing &drm_gem_object buffers.
40*e6303f32SDanilo Krummrich  *
41*e6303f32SDanilo Krummrich  * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
42*e6303f32SDanilo Krummrich  * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
43*e6303f32SDanilo Krummrich  *
44*e6303f32SDanilo Krummrich  * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
45*e6303f32SDanilo Krummrich  * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
46*e6303f32SDanilo Krummrich  *
47*e6303f32SDanilo Krummrich  * The GPU VA manager internally uses a rb-tree to manage the
48*e6303f32SDanilo Krummrich  * &drm_gpuva mappings within a GPU's virtual address space.
49*e6303f32SDanilo Krummrich  *
50*e6303f32SDanilo Krummrich  * The &drm_gpuva_manager contains a special &drm_gpuva representing the
51*e6303f32SDanilo Krummrich  * portion of VA space reserved by the kernel. This node is initialized together
52*e6303f32SDanilo Krummrich  * with the GPU VA manager instance and removed when the GPU VA manager is
53*e6303f32SDanilo Krummrich  * destroyed.
54*e6303f32SDanilo Krummrich  *
55*e6303f32SDanilo Krummrich  * In a typical application drivers would embed struct drm_gpuva_manager and
56*e6303f32SDanilo Krummrich  * struct drm_gpuva within their own driver specific structures, there won't be
57*e6303f32SDanilo Krummrich  * any memory allocations of its own nor memory allocations of &drm_gpuva
58*e6303f32SDanilo Krummrich  * entries.
59*e6303f32SDanilo Krummrich  *
60*e6303f32SDanilo Krummrich  * The data structures needed to store &drm_gpuvas within the &drm_gpuva_manager
61*e6303f32SDanilo Krummrich  * are contained within struct drm_gpuva already. Hence, for inserting
62*e6303f32SDanilo Krummrich  * &drm_gpuva entries from within dma-fence signalling critical sections it is
63*e6303f32SDanilo Krummrich  * enough to pre-allocate the &drm_gpuva structures.
64*e6303f32SDanilo Krummrich  */
65*e6303f32SDanilo Krummrich 
66*e6303f32SDanilo Krummrich /**
67*e6303f32SDanilo Krummrich  * DOC: Split and Merge
68*e6303f32SDanilo Krummrich  *
69*e6303f32SDanilo Krummrich  * Besides its capability to manage and represent a GPU VA space, the
70*e6303f32SDanilo Krummrich  * &drm_gpuva_manager also provides functions to let the &drm_gpuva_manager
71*e6303f32SDanilo Krummrich  * calculate a sequence of operations to satisfy a given map or unmap request.
72*e6303f32SDanilo Krummrich  *
73*e6303f32SDanilo Krummrich  * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
74*e6303f32SDanilo Krummrich  * and merging of existent GPU VA mappings with the ones that are requested to
75*e6303f32SDanilo Krummrich  * be mapped or unmapped. This feature is required by the Vulkan API to
76*e6303f32SDanilo Krummrich  * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
77*e6303f32SDanilo Krummrich  * as VM BIND.
78*e6303f32SDanilo Krummrich  *
79*e6303f32SDanilo Krummrich  * Drivers can call drm_gpuva_sm_map() to receive a sequence of callbacks
80*e6303f32SDanilo Krummrich  * containing map, unmap and remap operations for a given newly requested
81*e6303f32SDanilo Krummrich  * mapping. The sequence of callbacks represents the set of operations to
82*e6303f32SDanilo Krummrich  * execute in order to integrate the new mapping cleanly into the current state
83*e6303f32SDanilo Krummrich  * of the GPU VA space.
84*e6303f32SDanilo Krummrich  *
85*e6303f32SDanilo Krummrich  * Depending on how the new GPU VA mapping intersects with the existent mappings
86*e6303f32SDanilo Krummrich  * of the GPU VA space the &drm_gpuva_fn_ops callbacks contain an arbitrary
87*e6303f32SDanilo Krummrich  * amount of unmap operations, a maximum of two remap operations and a single
88*e6303f32SDanilo Krummrich  * map operation. The caller might receive no callback at all if no operation is
89*e6303f32SDanilo Krummrich  * required, e.g. if the requested mapping already exists in the exact same way.
90*e6303f32SDanilo Krummrich  *
91*e6303f32SDanilo Krummrich  * The single map operation represents the original map operation requested by
92*e6303f32SDanilo Krummrich  * the caller.
93*e6303f32SDanilo Krummrich  *
94*e6303f32SDanilo Krummrich  * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the
95*e6303f32SDanilo Krummrich  * &drm_gpuva to unmap is physically contiguous with the original mapping
96*e6303f32SDanilo Krummrich  * request. Optionally, if 'keep' is set, drivers may keep the actual page table
97*e6303f32SDanilo Krummrich  * entries for this &drm_gpuva, adding the missing page table entries only and
98*e6303f32SDanilo Krummrich  * update the &drm_gpuva_manager's view of things accordingly.
99*e6303f32SDanilo Krummrich  *
100*e6303f32SDanilo Krummrich  * Drivers may do the same optimization, namely delta page table updates, also
101*e6303f32SDanilo Krummrich  * for remap operations. This is possible since &drm_gpuva_op_remap consists of
102*e6303f32SDanilo Krummrich  * one unmap operation and one or two map operations, such that drivers can
103*e6303f32SDanilo Krummrich  * derive the page table update delta accordingly.
104*e6303f32SDanilo Krummrich  *
105*e6303f32SDanilo Krummrich  * Note that there can't be more than two existent mappings to split up, one at
106*e6303f32SDanilo Krummrich  * the beginning and one at the end of the new mapping, hence there is a
107*e6303f32SDanilo Krummrich  * maximum of two remap operations.
108*e6303f32SDanilo Krummrich  *
109*e6303f32SDanilo Krummrich  * Analogous to drm_gpuva_sm_map() drm_gpuva_sm_unmap() uses &drm_gpuva_fn_ops
110*e6303f32SDanilo Krummrich  * to call back into the driver in order to unmap a range of GPU VA space. The
111*e6303f32SDanilo Krummrich  * logic behind this function is way simpler though: For all existent mappings
112*e6303f32SDanilo Krummrich  * enclosed by the given range unmap operations are created. For mappings which
113*e6303f32SDanilo Krummrich  * are only partically located within the given range, remap operations are
114*e6303f32SDanilo Krummrich  * created such that those mappings are split up and re-mapped partically.
115*e6303f32SDanilo Krummrich  *
116*e6303f32SDanilo Krummrich  * As an alternative to drm_gpuva_sm_map() and drm_gpuva_sm_unmap(),
117*e6303f32SDanilo Krummrich  * drm_gpuva_sm_map_ops_create() and drm_gpuva_sm_unmap_ops_create() can be used
118*e6303f32SDanilo Krummrich  * to directly obtain an instance of struct drm_gpuva_ops containing a list of
119*e6303f32SDanilo Krummrich  * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
120*e6303f32SDanilo Krummrich  * contains the &drm_gpuva_ops analogous to the callbacks one would receive when
121*e6303f32SDanilo Krummrich  * calling drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). While this way requires
122*e6303f32SDanilo Krummrich  * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
123*e6303f32SDanilo Krummrich  * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
124*e6303f32SDanilo Krummrich  * allocations are possible (e.g. to allocate GPU page tables) and once in the
125*e6303f32SDanilo Krummrich  * dma-fence signalling critical path.
126*e6303f32SDanilo Krummrich  *
127*e6303f32SDanilo Krummrich  * To update the &drm_gpuva_manager's view of the GPU VA space
128*e6303f32SDanilo Krummrich  * drm_gpuva_insert() and drm_gpuva_remove() may be used. These functions can
129*e6303f32SDanilo Krummrich  * safely be used from &drm_gpuva_fn_ops callbacks originating from
130*e6303f32SDanilo Krummrich  * drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). However, it might be more
131*e6303f32SDanilo Krummrich  * convenient to use the provided helper functions drm_gpuva_map(),
132*e6303f32SDanilo Krummrich  * drm_gpuva_remap() and drm_gpuva_unmap() instead.
133*e6303f32SDanilo Krummrich  *
134*e6303f32SDanilo Krummrich  * The following diagram depicts the basic relationships of existent GPU VA
135*e6303f32SDanilo Krummrich  * mappings, a newly requested mapping and the resulting mappings as implemented
136*e6303f32SDanilo Krummrich  * by drm_gpuva_sm_map() - it doesn't cover any arbitrary combinations of these.
137*e6303f32SDanilo Krummrich  *
138*e6303f32SDanilo Krummrich  * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
139*e6303f32SDanilo Krummrich  *    could be kept.
140*e6303f32SDanilo Krummrich  *
141*e6303f32SDanilo Krummrich  *    ::
142*e6303f32SDanilo Krummrich  *
143*e6303f32SDanilo Krummrich  *	     0     a     1
144*e6303f32SDanilo Krummrich  *	old: |-----------| (bo_offset=n)
145*e6303f32SDanilo Krummrich  *
146*e6303f32SDanilo Krummrich  *	     0     a     1
147*e6303f32SDanilo Krummrich  *	req: |-----------| (bo_offset=n)
148*e6303f32SDanilo Krummrich  *
149*e6303f32SDanilo Krummrich  *	     0     a     1
150*e6303f32SDanilo Krummrich  *	new: |-----------| (bo_offset=n)
151*e6303f32SDanilo Krummrich  *
152*e6303f32SDanilo Krummrich  *
153*e6303f32SDanilo Krummrich  * 2) Requested mapping is identical, except for the BO offset, hence replace
154*e6303f32SDanilo Krummrich  *    the mapping.
155*e6303f32SDanilo Krummrich  *
156*e6303f32SDanilo Krummrich  *    ::
157*e6303f32SDanilo Krummrich  *
158*e6303f32SDanilo Krummrich  *	     0     a     1
159*e6303f32SDanilo Krummrich  *	old: |-----------| (bo_offset=n)
160*e6303f32SDanilo Krummrich  *
161*e6303f32SDanilo Krummrich  *	     0     a     1
162*e6303f32SDanilo Krummrich  *	req: |-----------| (bo_offset=m)
163*e6303f32SDanilo Krummrich  *
164*e6303f32SDanilo Krummrich  *	     0     a     1
165*e6303f32SDanilo Krummrich  *	new: |-----------| (bo_offset=m)
166*e6303f32SDanilo Krummrich  *
167*e6303f32SDanilo Krummrich  *
168*e6303f32SDanilo Krummrich  * 3) Requested mapping is identical, except for the backing BO, hence replace
169*e6303f32SDanilo Krummrich  *    the mapping.
170*e6303f32SDanilo Krummrich  *
171*e6303f32SDanilo Krummrich  *    ::
172*e6303f32SDanilo Krummrich  *
173*e6303f32SDanilo Krummrich  *	     0     a     1
174*e6303f32SDanilo Krummrich  *	old: |-----------| (bo_offset=n)
175*e6303f32SDanilo Krummrich  *
176*e6303f32SDanilo Krummrich  *	     0     b     1
177*e6303f32SDanilo Krummrich  *	req: |-----------| (bo_offset=n)
178*e6303f32SDanilo Krummrich  *
179*e6303f32SDanilo Krummrich  *	     0     b     1
180*e6303f32SDanilo Krummrich  *	new: |-----------| (bo_offset=n)
181*e6303f32SDanilo Krummrich  *
182*e6303f32SDanilo Krummrich  *
183*e6303f32SDanilo Krummrich  * 4) Existent mapping is a left aligned subset of the requested one, hence
184*e6303f32SDanilo Krummrich  *    replace the existent one.
185*e6303f32SDanilo Krummrich  *
186*e6303f32SDanilo Krummrich  *    ::
187*e6303f32SDanilo Krummrich  *
188*e6303f32SDanilo Krummrich  *	     0  a  1
189*e6303f32SDanilo Krummrich  *	old: |-----|       (bo_offset=n)
190*e6303f32SDanilo Krummrich  *
191*e6303f32SDanilo Krummrich  *	     0     a     2
192*e6303f32SDanilo Krummrich  *	req: |-----------| (bo_offset=n)
193*e6303f32SDanilo Krummrich  *
194*e6303f32SDanilo Krummrich  *	     0     a     2
195*e6303f32SDanilo Krummrich  *	new: |-----------| (bo_offset=n)
196*e6303f32SDanilo Krummrich  *
197*e6303f32SDanilo Krummrich  *    .. note::
198*e6303f32SDanilo Krummrich  *       We expect to see the same result for a request with a different BO
199*e6303f32SDanilo Krummrich  *       and/or non-contiguous BO offset.
200*e6303f32SDanilo Krummrich  *
201*e6303f32SDanilo Krummrich  *
202*e6303f32SDanilo Krummrich  * 5) Requested mapping's range is a left aligned subset of the existent one,
203*e6303f32SDanilo Krummrich  *    but backed by a different BO. Hence, map the requested mapping and split
204*e6303f32SDanilo Krummrich  *    the existent one adjusting its BO offset.
205*e6303f32SDanilo Krummrich  *
206*e6303f32SDanilo Krummrich  *    ::
207*e6303f32SDanilo Krummrich  *
208*e6303f32SDanilo Krummrich  *	     0     a     2
209*e6303f32SDanilo Krummrich  *	old: |-----------| (bo_offset=n)
210*e6303f32SDanilo Krummrich  *
211*e6303f32SDanilo Krummrich  *	     0  b  1
212*e6303f32SDanilo Krummrich  *	req: |-----|       (bo_offset=n)
213*e6303f32SDanilo Krummrich  *
214*e6303f32SDanilo Krummrich  *	     0  b  1  a' 2
215*e6303f32SDanilo Krummrich  *	new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
216*e6303f32SDanilo Krummrich  *
217*e6303f32SDanilo Krummrich  *    .. note::
218*e6303f32SDanilo Krummrich  *       We expect to see the same result for a request with a different BO
219*e6303f32SDanilo Krummrich  *       and/or non-contiguous BO offset.
220*e6303f32SDanilo Krummrich  *
221*e6303f32SDanilo Krummrich  *
222*e6303f32SDanilo Krummrich  * 6) Existent mapping is a superset of the requested mapping. Split it up, but
223*e6303f32SDanilo Krummrich  *    indicate that the backing PTEs could be kept.
224*e6303f32SDanilo Krummrich  *
225*e6303f32SDanilo Krummrich  *    ::
226*e6303f32SDanilo Krummrich  *
227*e6303f32SDanilo Krummrich  *	     0     a     2
228*e6303f32SDanilo Krummrich  *	old: |-----------| (bo_offset=n)
229*e6303f32SDanilo Krummrich  *
230*e6303f32SDanilo Krummrich  *	     0  a  1
231*e6303f32SDanilo Krummrich  *	req: |-----|       (bo_offset=n)
232*e6303f32SDanilo Krummrich  *
233*e6303f32SDanilo Krummrich  *	     0  a  1  a' 2
234*e6303f32SDanilo Krummrich  *	new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
235*e6303f32SDanilo Krummrich  *
236*e6303f32SDanilo Krummrich  *
237*e6303f32SDanilo Krummrich  * 7) Requested mapping's range is a right aligned subset of the existent one,
238*e6303f32SDanilo Krummrich  *    but backed by a different BO. Hence, map the requested mapping and split
239*e6303f32SDanilo Krummrich  *    the existent one, without adjusting the BO offset.
240*e6303f32SDanilo Krummrich  *
241*e6303f32SDanilo Krummrich  *    ::
242*e6303f32SDanilo Krummrich  *
243*e6303f32SDanilo Krummrich  *	     0     a     2
244*e6303f32SDanilo Krummrich  *	old: |-----------| (bo_offset=n)
245*e6303f32SDanilo Krummrich  *
246*e6303f32SDanilo Krummrich  *	           1  b  2
247*e6303f32SDanilo Krummrich  *	req:       |-----| (bo_offset=m)
248*e6303f32SDanilo Krummrich  *
249*e6303f32SDanilo Krummrich  *	     0  a  1  b  2
250*e6303f32SDanilo Krummrich  *	new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
251*e6303f32SDanilo Krummrich  *
252*e6303f32SDanilo Krummrich  *
253*e6303f32SDanilo Krummrich  * 8) Existent mapping is a superset of the requested mapping. Split it up, but
254*e6303f32SDanilo Krummrich  *    indicate that the backing PTEs could be kept.
255*e6303f32SDanilo Krummrich  *
256*e6303f32SDanilo Krummrich  *    ::
257*e6303f32SDanilo Krummrich  *
258*e6303f32SDanilo Krummrich  *	      0     a     2
259*e6303f32SDanilo Krummrich  *	old: |-----------| (bo_offset=n)
260*e6303f32SDanilo Krummrich  *
261*e6303f32SDanilo Krummrich  *	           1  a  2
262*e6303f32SDanilo Krummrich  *	req:       |-----| (bo_offset=n+1)
263*e6303f32SDanilo Krummrich  *
264*e6303f32SDanilo Krummrich  *	     0  a' 1  a  2
265*e6303f32SDanilo Krummrich  *	new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
266*e6303f32SDanilo Krummrich  *
267*e6303f32SDanilo Krummrich  *
268*e6303f32SDanilo Krummrich  * 9) Existent mapping is overlapped at the end by the requested mapping backed
269*e6303f32SDanilo Krummrich  *    by a different BO. Hence, map the requested mapping and split up the
270*e6303f32SDanilo Krummrich  *    existent one, without adjusting the BO offset.
271*e6303f32SDanilo Krummrich  *
272*e6303f32SDanilo Krummrich  *    ::
273*e6303f32SDanilo Krummrich  *
274*e6303f32SDanilo Krummrich  *	     0     a     2
275*e6303f32SDanilo Krummrich  *	old: |-----------|       (bo_offset=n)
276*e6303f32SDanilo Krummrich  *
277*e6303f32SDanilo Krummrich  *	           1     b     3
278*e6303f32SDanilo Krummrich  *	req:       |-----------| (bo_offset=m)
279*e6303f32SDanilo Krummrich  *
280*e6303f32SDanilo Krummrich  *	     0  a  1     b     3
281*e6303f32SDanilo Krummrich  *	new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
282*e6303f32SDanilo Krummrich  *
283*e6303f32SDanilo Krummrich  *
284*e6303f32SDanilo Krummrich  * 10) Existent mapping is overlapped by the requested mapping, both having the
285*e6303f32SDanilo Krummrich  *     same backing BO with a contiguous offset. Indicate the backing PTEs of
286*e6303f32SDanilo Krummrich  *     the old mapping could be kept.
287*e6303f32SDanilo Krummrich  *
288*e6303f32SDanilo Krummrich  *     ::
289*e6303f32SDanilo Krummrich  *
290*e6303f32SDanilo Krummrich  *	      0     a     2
291*e6303f32SDanilo Krummrich  *	 old: |-----------|       (bo_offset=n)
292*e6303f32SDanilo Krummrich  *
293*e6303f32SDanilo Krummrich  *	            1     a     3
294*e6303f32SDanilo Krummrich  *	 req:       |-----------| (bo_offset=n+1)
295*e6303f32SDanilo Krummrich  *
296*e6303f32SDanilo Krummrich  *	      0  a' 1     a     3
297*e6303f32SDanilo Krummrich  *	 new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
298*e6303f32SDanilo Krummrich  *
299*e6303f32SDanilo Krummrich  *
300*e6303f32SDanilo Krummrich  * 11) Requested mapping's range is a centered subset of the existent one
301*e6303f32SDanilo Krummrich  *     having a different backing BO. Hence, map the requested mapping and split
302*e6303f32SDanilo Krummrich  *     up the existent one in two mappings, adjusting the BO offset of the right
303*e6303f32SDanilo Krummrich  *     one accordingly.
304*e6303f32SDanilo Krummrich  *
305*e6303f32SDanilo Krummrich  *     ::
306*e6303f32SDanilo Krummrich  *
307*e6303f32SDanilo Krummrich  *	      0        a        3
308*e6303f32SDanilo Krummrich  *	 old: |-----------------| (bo_offset=n)
309*e6303f32SDanilo Krummrich  *
310*e6303f32SDanilo Krummrich  *	            1  b  2
311*e6303f32SDanilo Krummrich  *	 req:       |-----|       (bo_offset=m)
312*e6303f32SDanilo Krummrich  *
313*e6303f32SDanilo Krummrich  *	      0  a  1  b  2  a' 3
314*e6303f32SDanilo Krummrich  *	 new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
315*e6303f32SDanilo Krummrich  *
316*e6303f32SDanilo Krummrich  *
317*e6303f32SDanilo Krummrich  * 12) Requested mapping is a contiguous subset of the existent one. Split it
318*e6303f32SDanilo Krummrich  *     up, but indicate that the backing PTEs could be kept.
319*e6303f32SDanilo Krummrich  *
320*e6303f32SDanilo Krummrich  *     ::
321*e6303f32SDanilo Krummrich  *
322*e6303f32SDanilo Krummrich  *	      0        a        3
323*e6303f32SDanilo Krummrich  *	 old: |-----------------| (bo_offset=n)
324*e6303f32SDanilo Krummrich  *
325*e6303f32SDanilo Krummrich  *	            1  a  2
326*e6303f32SDanilo Krummrich  *	 req:       |-----|       (bo_offset=n+1)
327*e6303f32SDanilo Krummrich  *
328*e6303f32SDanilo Krummrich  *	      0  a' 1  a  2 a'' 3
329*e6303f32SDanilo Krummrich  *	 old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
330*e6303f32SDanilo Krummrich  *
331*e6303f32SDanilo Krummrich  *
332*e6303f32SDanilo Krummrich  * 13) Existent mapping is a right aligned subset of the requested one, hence
333*e6303f32SDanilo Krummrich  *     replace the existent one.
334*e6303f32SDanilo Krummrich  *
335*e6303f32SDanilo Krummrich  *     ::
336*e6303f32SDanilo Krummrich  *
337*e6303f32SDanilo Krummrich  *	            1  a  2
338*e6303f32SDanilo Krummrich  *	 old:       |-----| (bo_offset=n+1)
339*e6303f32SDanilo Krummrich  *
340*e6303f32SDanilo Krummrich  *	      0     a     2
341*e6303f32SDanilo Krummrich  *	 req: |-----------| (bo_offset=n)
342*e6303f32SDanilo Krummrich  *
343*e6303f32SDanilo Krummrich  *	      0     a     2
344*e6303f32SDanilo Krummrich  *	 new: |-----------| (bo_offset=n)
345*e6303f32SDanilo Krummrich  *
346*e6303f32SDanilo Krummrich  *     .. note::
347*e6303f32SDanilo Krummrich  *        We expect to see the same result for a request with a different bo
348*e6303f32SDanilo Krummrich  *        and/or non-contiguous bo_offset.
349*e6303f32SDanilo Krummrich  *
350*e6303f32SDanilo Krummrich  *
351*e6303f32SDanilo Krummrich  * 14) Existent mapping is a centered subset of the requested one, hence
352*e6303f32SDanilo Krummrich  *     replace the existent one.
353*e6303f32SDanilo Krummrich  *
354*e6303f32SDanilo Krummrich  *     ::
355*e6303f32SDanilo Krummrich  *
356*e6303f32SDanilo Krummrich  *	            1  a  2
357*e6303f32SDanilo Krummrich  *	 old:       |-----| (bo_offset=n+1)
358*e6303f32SDanilo Krummrich  *
359*e6303f32SDanilo Krummrich  *	      0        a       3
360*e6303f32SDanilo Krummrich  *	 req: |----------------| (bo_offset=n)
361*e6303f32SDanilo Krummrich  *
362*e6303f32SDanilo Krummrich  *	      0        a       3
363*e6303f32SDanilo Krummrich  *	 new: |----------------| (bo_offset=n)
364*e6303f32SDanilo Krummrich  *
365*e6303f32SDanilo Krummrich  *     .. note::
366*e6303f32SDanilo Krummrich  *        We expect to see the same result for a request with a different bo
367*e6303f32SDanilo Krummrich  *        and/or non-contiguous bo_offset.
368*e6303f32SDanilo Krummrich  *
369*e6303f32SDanilo Krummrich  *
370*e6303f32SDanilo Krummrich  * 15) Existent mappings is overlapped at the beginning by the requested mapping
371*e6303f32SDanilo Krummrich  *     backed by a different BO. Hence, map the requested mapping and split up
372*e6303f32SDanilo Krummrich  *     the existent one, adjusting its BO offset accordingly.
373*e6303f32SDanilo Krummrich  *
374*e6303f32SDanilo Krummrich  *     ::
375*e6303f32SDanilo Krummrich  *
376*e6303f32SDanilo Krummrich  *	            1     a     3
377*e6303f32SDanilo Krummrich  *	 old:       |-----------| (bo_offset=n)
378*e6303f32SDanilo Krummrich  *
379*e6303f32SDanilo Krummrich  *	      0     b     2
380*e6303f32SDanilo Krummrich  *	 req: |-----------|       (bo_offset=m)
381*e6303f32SDanilo Krummrich  *
382*e6303f32SDanilo Krummrich  *	      0     b     2  a' 3
383*e6303f32SDanilo Krummrich  *	 new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
384*e6303f32SDanilo Krummrich  */
385*e6303f32SDanilo Krummrich 
386*e6303f32SDanilo Krummrich /**
387*e6303f32SDanilo Krummrich  * DOC: Locking
388*e6303f32SDanilo Krummrich  *
389*e6303f32SDanilo Krummrich  * Generally, the GPU VA manager does not take care of locking itself, it is
390*e6303f32SDanilo Krummrich  * the drivers responsibility to take care about locking. Drivers might want to
391*e6303f32SDanilo Krummrich  * protect the following operations: inserting, removing and iterating
392*e6303f32SDanilo Krummrich  * &drm_gpuva objects as well as generating all kinds of operations, such as
393*e6303f32SDanilo Krummrich  * split / merge or prefetch.
394*e6303f32SDanilo Krummrich  *
395*e6303f32SDanilo Krummrich  * The GPU VA manager also does not take care of the locking of the backing
396*e6303f32SDanilo Krummrich  * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to
397*e6303f32SDanilo Krummrich  * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively
398*e6303f32SDanilo Krummrich  * a driver specific external lock. For the latter see also
399*e6303f32SDanilo Krummrich  * drm_gem_gpuva_set_lock().
400*e6303f32SDanilo Krummrich  *
401*e6303f32SDanilo Krummrich  * However, the GPU VA manager contains lockdep checks to ensure callers of its
402*e6303f32SDanilo Krummrich  * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is
403*e6303f32SDanilo Krummrich  * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink().
404*e6303f32SDanilo Krummrich  */
405*e6303f32SDanilo Krummrich 
406*e6303f32SDanilo Krummrich /**
407*e6303f32SDanilo Krummrich  * DOC: Examples
408*e6303f32SDanilo Krummrich  *
409*e6303f32SDanilo Krummrich  * This section gives two examples on how to let the DRM GPUVA Manager generate
410*e6303f32SDanilo Krummrich  * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
411*e6303f32SDanilo Krummrich  * make use of them.
412*e6303f32SDanilo Krummrich  *
413*e6303f32SDanilo Krummrich  * The below code is strictly limited to illustrate the generic usage pattern.
414*e6303f32SDanilo Krummrich  * To maintain simplicitly, it doesn't make use of any abstractions for common
415*e6303f32SDanilo Krummrich  * code, different (asyncronous) stages with fence signalling critical paths,
416*e6303f32SDanilo Krummrich  * any other helpers or error handling in terms of freeing memory and dropping
417*e6303f32SDanilo Krummrich  * previously taken locks.
418*e6303f32SDanilo Krummrich  *
419*e6303f32SDanilo Krummrich  * 1) Obtain a list of &drm_gpuva_op to create a new mapping::
420*e6303f32SDanilo Krummrich  *
421*e6303f32SDanilo Krummrich  *	// Allocates a new &drm_gpuva.
422*e6303f32SDanilo Krummrich  *	struct drm_gpuva * driver_gpuva_alloc(void);
423*e6303f32SDanilo Krummrich  *
424*e6303f32SDanilo Krummrich  *	// Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
425*e6303f32SDanilo Krummrich  *	// structure in individual driver structures and lock the dma-resv with
426*e6303f32SDanilo Krummrich  *	// drm_exec or similar helpers.
427*e6303f32SDanilo Krummrich  *	int driver_mapping_create(struct drm_gpuva_manager *mgr,
428*e6303f32SDanilo Krummrich  *				  u64 addr, u64 range,
429*e6303f32SDanilo Krummrich  *				  struct drm_gem_object *obj, u64 offset)
430*e6303f32SDanilo Krummrich  *	{
431*e6303f32SDanilo Krummrich  *		struct drm_gpuva_ops *ops;
432*e6303f32SDanilo Krummrich  *		struct drm_gpuva_op *op
433*e6303f32SDanilo Krummrich  *
434*e6303f32SDanilo Krummrich  *		driver_lock_va_space();
435*e6303f32SDanilo Krummrich  *		ops = drm_gpuva_sm_map_ops_create(mgr, addr, range,
436*e6303f32SDanilo Krummrich  *						  obj, offset);
437*e6303f32SDanilo Krummrich  *		if (IS_ERR(ops))
438*e6303f32SDanilo Krummrich  *			return PTR_ERR(ops);
439*e6303f32SDanilo Krummrich  *
440*e6303f32SDanilo Krummrich  *		drm_gpuva_for_each_op(op, ops) {
441*e6303f32SDanilo Krummrich  *			struct drm_gpuva *va;
442*e6303f32SDanilo Krummrich  *
443*e6303f32SDanilo Krummrich  *			switch (op->op) {
444*e6303f32SDanilo Krummrich  *			case DRM_GPUVA_OP_MAP:
445*e6303f32SDanilo Krummrich  *				va = driver_gpuva_alloc();
446*e6303f32SDanilo Krummrich  *				if (!va)
447*e6303f32SDanilo Krummrich  *					; // unwind previous VA space updates,
448*e6303f32SDanilo Krummrich  *					  // free memory and unlock
449*e6303f32SDanilo Krummrich  *
450*e6303f32SDanilo Krummrich  *				driver_vm_map();
451*e6303f32SDanilo Krummrich  *				drm_gpuva_map(mgr, va, &op->map);
452*e6303f32SDanilo Krummrich  *				drm_gpuva_link(va);
453*e6303f32SDanilo Krummrich  *
454*e6303f32SDanilo Krummrich  *				break;
455*e6303f32SDanilo Krummrich  *			case DRM_GPUVA_OP_REMAP: {
456*e6303f32SDanilo Krummrich  *				struct drm_gpuva *prev = NULL, *next = NULL;
457*e6303f32SDanilo Krummrich  *
458*e6303f32SDanilo Krummrich  *				va = op->remap.unmap->va;
459*e6303f32SDanilo Krummrich  *
460*e6303f32SDanilo Krummrich  *				if (op->remap.prev) {
461*e6303f32SDanilo Krummrich  *					prev = driver_gpuva_alloc();
462*e6303f32SDanilo Krummrich  *					if (!prev)
463*e6303f32SDanilo Krummrich  *						; // unwind previous VA space
464*e6303f32SDanilo Krummrich  *						  // updates, free memory and
465*e6303f32SDanilo Krummrich  *						  // unlock
466*e6303f32SDanilo Krummrich  *				}
467*e6303f32SDanilo Krummrich  *
468*e6303f32SDanilo Krummrich  *				if (op->remap.next) {
469*e6303f32SDanilo Krummrich  *					next = driver_gpuva_alloc();
470*e6303f32SDanilo Krummrich  *					if (!next)
471*e6303f32SDanilo Krummrich  *						; // unwind previous VA space
472*e6303f32SDanilo Krummrich  *						  // updates, free memory and
473*e6303f32SDanilo Krummrich  *						  // unlock
474*e6303f32SDanilo Krummrich  *				}
475*e6303f32SDanilo Krummrich  *
476*e6303f32SDanilo Krummrich  *				driver_vm_remap();
477*e6303f32SDanilo Krummrich  *				drm_gpuva_remap(prev, next, &op->remap);
478*e6303f32SDanilo Krummrich  *
479*e6303f32SDanilo Krummrich  *				drm_gpuva_unlink(va);
480*e6303f32SDanilo Krummrich  *				if (prev)
481*e6303f32SDanilo Krummrich  *					drm_gpuva_link(prev);
482*e6303f32SDanilo Krummrich  *				if (next)
483*e6303f32SDanilo Krummrich  *					drm_gpuva_link(next);
484*e6303f32SDanilo Krummrich  *
485*e6303f32SDanilo Krummrich  *				break;
486*e6303f32SDanilo Krummrich  *			}
487*e6303f32SDanilo Krummrich  *			case DRM_GPUVA_OP_UNMAP:
488*e6303f32SDanilo Krummrich  *				va = op->unmap->va;
489*e6303f32SDanilo Krummrich  *
490*e6303f32SDanilo Krummrich  *				driver_vm_unmap();
491*e6303f32SDanilo Krummrich  *				drm_gpuva_unlink(va);
492*e6303f32SDanilo Krummrich  *				drm_gpuva_unmap(&op->unmap);
493*e6303f32SDanilo Krummrich  *
494*e6303f32SDanilo Krummrich  *				break;
495*e6303f32SDanilo Krummrich  *			default:
496*e6303f32SDanilo Krummrich  *				break;
497*e6303f32SDanilo Krummrich  *			}
498*e6303f32SDanilo Krummrich  *		}
499*e6303f32SDanilo Krummrich  *		driver_unlock_va_space();
500*e6303f32SDanilo Krummrich  *
501*e6303f32SDanilo Krummrich  *		return 0;
502*e6303f32SDanilo Krummrich  *	}
503*e6303f32SDanilo Krummrich  *
504*e6303f32SDanilo Krummrich  * 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
505*e6303f32SDanilo Krummrich  *
506*e6303f32SDanilo Krummrich  *	struct driver_context {
507*e6303f32SDanilo Krummrich  *		struct drm_gpuva_manager *mgr;
508*e6303f32SDanilo Krummrich  *		struct drm_gpuva *new_va;
509*e6303f32SDanilo Krummrich  *		struct drm_gpuva *prev_va;
510*e6303f32SDanilo Krummrich  *		struct drm_gpuva *next_va;
511*e6303f32SDanilo Krummrich  *	};
512*e6303f32SDanilo Krummrich  *
513*e6303f32SDanilo Krummrich  *	// ops to pass to drm_gpuva_manager_init()
514*e6303f32SDanilo Krummrich  *	static const struct drm_gpuva_fn_ops driver_gpuva_ops = {
515*e6303f32SDanilo Krummrich  *		.sm_step_map = driver_gpuva_map,
516*e6303f32SDanilo Krummrich  *		.sm_step_remap = driver_gpuva_remap,
517*e6303f32SDanilo Krummrich  *		.sm_step_unmap = driver_gpuva_unmap,
518*e6303f32SDanilo Krummrich  *	};
519*e6303f32SDanilo Krummrich  *
520*e6303f32SDanilo Krummrich  *	// Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
521*e6303f32SDanilo Krummrich  *	// structure in individual driver structures and lock the dma-resv with
522*e6303f32SDanilo Krummrich  *	// drm_exec or similar helpers.
523*e6303f32SDanilo Krummrich  *	int driver_mapping_create(struct drm_gpuva_manager *mgr,
524*e6303f32SDanilo Krummrich  *				  u64 addr, u64 range,
525*e6303f32SDanilo Krummrich  *				  struct drm_gem_object *obj, u64 offset)
526*e6303f32SDanilo Krummrich  *	{
527*e6303f32SDanilo Krummrich  *		struct driver_context ctx;
528*e6303f32SDanilo Krummrich  *		struct drm_gpuva_ops *ops;
529*e6303f32SDanilo Krummrich  *		struct drm_gpuva_op *op;
530*e6303f32SDanilo Krummrich  *		int ret = 0;
531*e6303f32SDanilo Krummrich  *
532*e6303f32SDanilo Krummrich  *		ctx.mgr = mgr;
533*e6303f32SDanilo Krummrich  *
534*e6303f32SDanilo Krummrich  *		ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
535*e6303f32SDanilo Krummrich  *		ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
536*e6303f32SDanilo Krummrich  *		ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
537*e6303f32SDanilo Krummrich  *		if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) {
538*e6303f32SDanilo Krummrich  *			ret = -ENOMEM;
539*e6303f32SDanilo Krummrich  *			goto out;
540*e6303f32SDanilo Krummrich  *		}
541*e6303f32SDanilo Krummrich  *
542*e6303f32SDanilo Krummrich  *		driver_lock_va_space();
543*e6303f32SDanilo Krummrich  *		ret = drm_gpuva_sm_map(mgr, &ctx, addr, range, obj, offset);
544*e6303f32SDanilo Krummrich  *		driver_unlock_va_space();
545*e6303f32SDanilo Krummrich  *
546*e6303f32SDanilo Krummrich  *	out:
547*e6303f32SDanilo Krummrich  *		kfree(ctx.new_va);
548*e6303f32SDanilo Krummrich  *		kfree(ctx.prev_va);
549*e6303f32SDanilo Krummrich  *		kfree(ctx.next_va);
550*e6303f32SDanilo Krummrich  *		return ret;
551*e6303f32SDanilo Krummrich  *	}
552*e6303f32SDanilo Krummrich  *
553*e6303f32SDanilo Krummrich  *	int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx)
554*e6303f32SDanilo Krummrich  *	{
555*e6303f32SDanilo Krummrich  *		struct driver_context *ctx = __ctx;
556*e6303f32SDanilo Krummrich  *
557*e6303f32SDanilo Krummrich  *		drm_gpuva_map(ctx->mgr, ctx->new_va, &op->map);
558*e6303f32SDanilo Krummrich  *
559*e6303f32SDanilo Krummrich  *		drm_gpuva_link(ctx->new_va);
560*e6303f32SDanilo Krummrich  *
561*e6303f32SDanilo Krummrich  *		// prevent the new GPUVA from being freed in
562*e6303f32SDanilo Krummrich  *		// driver_mapping_create()
563*e6303f32SDanilo Krummrich  *		ctx->new_va = NULL;
564*e6303f32SDanilo Krummrich  *
565*e6303f32SDanilo Krummrich  *		return 0;
566*e6303f32SDanilo Krummrich  *	}
567*e6303f32SDanilo Krummrich  *
568*e6303f32SDanilo Krummrich  *	int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
569*e6303f32SDanilo Krummrich  *	{
570*e6303f32SDanilo Krummrich  *		struct driver_context *ctx = __ctx;
571*e6303f32SDanilo Krummrich  *
572*e6303f32SDanilo Krummrich  *		drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
573*e6303f32SDanilo Krummrich  *
574*e6303f32SDanilo Krummrich  *		drm_gpuva_unlink(op->remap.unmap->va);
575*e6303f32SDanilo Krummrich  *		kfree(op->remap.unmap->va);
576*e6303f32SDanilo Krummrich  *
577*e6303f32SDanilo Krummrich  *		if (op->remap.prev) {
578*e6303f32SDanilo Krummrich  *			drm_gpuva_link(ctx->prev_va);
579*e6303f32SDanilo Krummrich  *			ctx->prev_va = NULL;
580*e6303f32SDanilo Krummrich  *		}
581*e6303f32SDanilo Krummrich  *
582*e6303f32SDanilo Krummrich  *		if (op->remap.next) {
583*e6303f32SDanilo Krummrich  *			drm_gpuva_link(ctx->next_va);
584*e6303f32SDanilo Krummrich  *			ctx->next_va = NULL;
585*e6303f32SDanilo Krummrich  *		}
586*e6303f32SDanilo Krummrich  *
587*e6303f32SDanilo Krummrich  *		return 0;
588*e6303f32SDanilo Krummrich  *	}
589*e6303f32SDanilo Krummrich  *
590*e6303f32SDanilo Krummrich  *	int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx)
591*e6303f32SDanilo Krummrich  *	{
592*e6303f32SDanilo Krummrich  *		drm_gpuva_unlink(op->unmap.va);
593*e6303f32SDanilo Krummrich  *		drm_gpuva_unmap(&op->unmap);
594*e6303f32SDanilo Krummrich  *		kfree(op->unmap.va);
595*e6303f32SDanilo Krummrich  *
596*e6303f32SDanilo Krummrich  *		return 0;
597*e6303f32SDanilo Krummrich  *	}
598*e6303f32SDanilo Krummrich  */
599*e6303f32SDanilo Krummrich 
600*e6303f32SDanilo Krummrich #define to_drm_gpuva(__node)	container_of((__node), struct drm_gpuva, rb.node)
601*e6303f32SDanilo Krummrich 
602*e6303f32SDanilo Krummrich #define GPUVA_START(node) ((node)->va.addr)
603*e6303f32SDanilo Krummrich #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
604*e6303f32SDanilo Krummrich 
605*e6303f32SDanilo Krummrich /* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain
606*e6303f32SDanilo Krummrich  * about this.
607*e6303f32SDanilo Krummrich  */
608*e6303f32SDanilo Krummrich INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
609*e6303f32SDanilo Krummrich 		     GPUVA_START, GPUVA_LAST, static __maybe_unused,
610*e6303f32SDanilo Krummrich 		     drm_gpuva_it)
611*e6303f32SDanilo Krummrich 
612*e6303f32SDanilo Krummrich static int __drm_gpuva_insert(struct drm_gpuva_manager *mgr,
613*e6303f32SDanilo Krummrich 			      struct drm_gpuva *va);
614*e6303f32SDanilo Krummrich static void __drm_gpuva_remove(struct drm_gpuva *va);
615*e6303f32SDanilo Krummrich 
616*e6303f32SDanilo Krummrich static bool
617*e6303f32SDanilo Krummrich drm_gpuva_check_overflow(u64 addr, u64 range)
618*e6303f32SDanilo Krummrich {
619*e6303f32SDanilo Krummrich 	u64 end;
620*e6303f32SDanilo Krummrich 
621*e6303f32SDanilo Krummrich 	return WARN(check_add_overflow(addr, range, &end),
622*e6303f32SDanilo Krummrich 		    "GPUVA address limited to %lu bytes.\n", sizeof(end));
623*e6303f32SDanilo Krummrich }
624*e6303f32SDanilo Krummrich 
625*e6303f32SDanilo Krummrich static bool
626*e6303f32SDanilo Krummrich drm_gpuva_in_mm_range(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
627*e6303f32SDanilo Krummrich {
628*e6303f32SDanilo Krummrich 	u64 end = addr + range;
629*e6303f32SDanilo Krummrich 	u64 mm_start = mgr->mm_start;
630*e6303f32SDanilo Krummrich 	u64 mm_end = mm_start + mgr->mm_range;
631*e6303f32SDanilo Krummrich 
632*e6303f32SDanilo Krummrich 	return addr >= mm_start && end <= mm_end;
633*e6303f32SDanilo Krummrich }
634*e6303f32SDanilo Krummrich 
635*e6303f32SDanilo Krummrich static bool
636*e6303f32SDanilo Krummrich drm_gpuva_in_kernel_node(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
637*e6303f32SDanilo Krummrich {
638*e6303f32SDanilo Krummrich 	u64 end = addr + range;
639*e6303f32SDanilo Krummrich 	u64 kstart = mgr->kernel_alloc_node.va.addr;
640*e6303f32SDanilo Krummrich 	u64 krange = mgr->kernel_alloc_node.va.range;
641*e6303f32SDanilo Krummrich 	u64 kend = kstart + krange;
642*e6303f32SDanilo Krummrich 
643*e6303f32SDanilo Krummrich 	return krange && addr < kend && kstart < end;
644*e6303f32SDanilo Krummrich }
645*e6303f32SDanilo Krummrich 
646*e6303f32SDanilo Krummrich static bool
647*e6303f32SDanilo Krummrich drm_gpuva_range_valid(struct drm_gpuva_manager *mgr,
648*e6303f32SDanilo Krummrich 		      u64 addr, u64 range)
649*e6303f32SDanilo Krummrich {
650*e6303f32SDanilo Krummrich 	return !drm_gpuva_check_overflow(addr, range) &&
651*e6303f32SDanilo Krummrich 	       drm_gpuva_in_mm_range(mgr, addr, range) &&
652*e6303f32SDanilo Krummrich 	       !drm_gpuva_in_kernel_node(mgr, addr, range);
653*e6303f32SDanilo Krummrich }
654*e6303f32SDanilo Krummrich 
655*e6303f32SDanilo Krummrich /**
656*e6303f32SDanilo Krummrich  * drm_gpuva_manager_init() - initialize a &drm_gpuva_manager
657*e6303f32SDanilo Krummrich  * @mgr: pointer to the &drm_gpuva_manager to initialize
658*e6303f32SDanilo Krummrich  * @name: the name of the GPU VA space
659*e6303f32SDanilo Krummrich  * @start_offset: the start offset of the GPU VA space
660*e6303f32SDanilo Krummrich  * @range: the size of the GPU VA space
661*e6303f32SDanilo Krummrich  * @reserve_offset: the start of the kernel reserved GPU VA area
662*e6303f32SDanilo Krummrich  * @reserve_range: the size of the kernel reserved GPU VA area
663*e6303f32SDanilo Krummrich  * @ops: &drm_gpuva_fn_ops called on &drm_gpuva_sm_map / &drm_gpuva_sm_unmap
664*e6303f32SDanilo Krummrich  *
665*e6303f32SDanilo Krummrich  * The &drm_gpuva_manager must be initialized with this function before use.
666*e6303f32SDanilo Krummrich  *
667*e6303f32SDanilo Krummrich  * Note that @mgr must be cleared to 0 before calling this function. The given
668*e6303f32SDanilo Krummrich  * &name is expected to be managed by the surrounding driver structures.
669*e6303f32SDanilo Krummrich  */
670*e6303f32SDanilo Krummrich void
671*e6303f32SDanilo Krummrich drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
672*e6303f32SDanilo Krummrich 		       const char *name,
673*e6303f32SDanilo Krummrich 		       u64 start_offset, u64 range,
674*e6303f32SDanilo Krummrich 		       u64 reserve_offset, u64 reserve_range,
675*e6303f32SDanilo Krummrich 		       const struct drm_gpuva_fn_ops *ops)
676*e6303f32SDanilo Krummrich {
677*e6303f32SDanilo Krummrich 	mgr->rb.tree = RB_ROOT_CACHED;
678*e6303f32SDanilo Krummrich 	INIT_LIST_HEAD(&mgr->rb.list);
679*e6303f32SDanilo Krummrich 
680*e6303f32SDanilo Krummrich 	drm_gpuva_check_overflow(start_offset, range);
681*e6303f32SDanilo Krummrich 	mgr->mm_start = start_offset;
682*e6303f32SDanilo Krummrich 	mgr->mm_range = range;
683*e6303f32SDanilo Krummrich 
684*e6303f32SDanilo Krummrich 	mgr->name = name ? name : "unknown";
685*e6303f32SDanilo Krummrich 	mgr->ops = ops;
686*e6303f32SDanilo Krummrich 
687*e6303f32SDanilo Krummrich 	memset(&mgr->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
688*e6303f32SDanilo Krummrich 
689*e6303f32SDanilo Krummrich 	if (reserve_range) {
690*e6303f32SDanilo Krummrich 		mgr->kernel_alloc_node.va.addr = reserve_offset;
691*e6303f32SDanilo Krummrich 		mgr->kernel_alloc_node.va.range = reserve_range;
692*e6303f32SDanilo Krummrich 
693*e6303f32SDanilo Krummrich 		if (likely(!drm_gpuva_check_overflow(reserve_offset,
694*e6303f32SDanilo Krummrich 						     reserve_range)))
695*e6303f32SDanilo Krummrich 			__drm_gpuva_insert(mgr, &mgr->kernel_alloc_node);
696*e6303f32SDanilo Krummrich 	}
697*e6303f32SDanilo Krummrich }
698*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_manager_init);
699*e6303f32SDanilo Krummrich 
700*e6303f32SDanilo Krummrich /**
701*e6303f32SDanilo Krummrich  * drm_gpuva_manager_destroy() - cleanup a &drm_gpuva_manager
702*e6303f32SDanilo Krummrich  * @mgr: pointer to the &drm_gpuva_manager to clean up
703*e6303f32SDanilo Krummrich  *
704*e6303f32SDanilo Krummrich  * Note that it is a bug to call this function on a manager that still
705*e6303f32SDanilo Krummrich  * holds GPU VA mappings.
706*e6303f32SDanilo Krummrich  */
707*e6303f32SDanilo Krummrich void
708*e6303f32SDanilo Krummrich drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr)
709*e6303f32SDanilo Krummrich {
710*e6303f32SDanilo Krummrich 	mgr->name = NULL;
711*e6303f32SDanilo Krummrich 
712*e6303f32SDanilo Krummrich 	if (mgr->kernel_alloc_node.va.range)
713*e6303f32SDanilo Krummrich 		__drm_gpuva_remove(&mgr->kernel_alloc_node);
714*e6303f32SDanilo Krummrich 
715*e6303f32SDanilo Krummrich 	WARN(!RB_EMPTY_ROOT(&mgr->rb.tree.rb_root),
716*e6303f32SDanilo Krummrich 	     "GPUVA tree is not empty, potentially leaking memory.");
717*e6303f32SDanilo Krummrich }
718*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_manager_destroy);
719*e6303f32SDanilo Krummrich 
720*e6303f32SDanilo Krummrich static int
721*e6303f32SDanilo Krummrich __drm_gpuva_insert(struct drm_gpuva_manager *mgr,
722*e6303f32SDanilo Krummrich 		   struct drm_gpuva *va)
723*e6303f32SDanilo Krummrich {
724*e6303f32SDanilo Krummrich 	struct rb_node *node;
725*e6303f32SDanilo Krummrich 	struct list_head *head;
726*e6303f32SDanilo Krummrich 
727*e6303f32SDanilo Krummrich 	if (drm_gpuva_it_iter_first(&mgr->rb.tree,
728*e6303f32SDanilo Krummrich 				    GPUVA_START(va),
729*e6303f32SDanilo Krummrich 				    GPUVA_LAST(va)))
730*e6303f32SDanilo Krummrich 		return -EEXIST;
731*e6303f32SDanilo Krummrich 
732*e6303f32SDanilo Krummrich 	va->mgr = mgr;
733*e6303f32SDanilo Krummrich 
734*e6303f32SDanilo Krummrich 	drm_gpuva_it_insert(va, &mgr->rb.tree);
735*e6303f32SDanilo Krummrich 
736*e6303f32SDanilo Krummrich 	node = rb_prev(&va->rb.node);
737*e6303f32SDanilo Krummrich 	if (node)
738*e6303f32SDanilo Krummrich 		head = &(to_drm_gpuva(node))->rb.entry;
739*e6303f32SDanilo Krummrich 	else
740*e6303f32SDanilo Krummrich 		head = &mgr->rb.list;
741*e6303f32SDanilo Krummrich 
742*e6303f32SDanilo Krummrich 	list_add(&va->rb.entry, head);
743*e6303f32SDanilo Krummrich 
744*e6303f32SDanilo Krummrich 	return 0;
745*e6303f32SDanilo Krummrich }
746*e6303f32SDanilo Krummrich 
747*e6303f32SDanilo Krummrich /**
748*e6303f32SDanilo Krummrich  * drm_gpuva_insert() - insert a &drm_gpuva
749*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager to insert the &drm_gpuva in
750*e6303f32SDanilo Krummrich  * @va: the &drm_gpuva to insert
751*e6303f32SDanilo Krummrich  *
752*e6303f32SDanilo Krummrich  * Insert a &drm_gpuva with a given address and range into a
753*e6303f32SDanilo Krummrich  * &drm_gpuva_manager.
754*e6303f32SDanilo Krummrich  *
755*e6303f32SDanilo Krummrich  * It is safe to use this function using the safe versions of iterating the GPU
756*e6303f32SDanilo Krummrich  * VA space, such as drm_gpuva_for_each_va_safe() and
757*e6303f32SDanilo Krummrich  * drm_gpuva_for_each_va_range_safe().
758*e6303f32SDanilo Krummrich  *
759*e6303f32SDanilo Krummrich  * Returns: 0 on success, negative error code on failure.
760*e6303f32SDanilo Krummrich  */
761*e6303f32SDanilo Krummrich int
762*e6303f32SDanilo Krummrich drm_gpuva_insert(struct drm_gpuva_manager *mgr,
763*e6303f32SDanilo Krummrich 		 struct drm_gpuva *va)
764*e6303f32SDanilo Krummrich {
765*e6303f32SDanilo Krummrich 	u64 addr = va->va.addr;
766*e6303f32SDanilo Krummrich 	u64 range = va->va.range;
767*e6303f32SDanilo Krummrich 
768*e6303f32SDanilo Krummrich 	if (unlikely(!drm_gpuva_range_valid(mgr, addr, range)))
769*e6303f32SDanilo Krummrich 		return -EINVAL;
770*e6303f32SDanilo Krummrich 
771*e6303f32SDanilo Krummrich 	return __drm_gpuva_insert(mgr, va);
772*e6303f32SDanilo Krummrich }
773*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_insert);
774*e6303f32SDanilo Krummrich 
775*e6303f32SDanilo Krummrich static void
776*e6303f32SDanilo Krummrich __drm_gpuva_remove(struct drm_gpuva *va)
777*e6303f32SDanilo Krummrich {
778*e6303f32SDanilo Krummrich 	drm_gpuva_it_remove(va, &va->mgr->rb.tree);
779*e6303f32SDanilo Krummrich 	list_del_init(&va->rb.entry);
780*e6303f32SDanilo Krummrich }
781*e6303f32SDanilo Krummrich 
782*e6303f32SDanilo Krummrich /**
783*e6303f32SDanilo Krummrich  * drm_gpuva_remove() - remove a &drm_gpuva
784*e6303f32SDanilo Krummrich  * @va: the &drm_gpuva to remove
785*e6303f32SDanilo Krummrich  *
786*e6303f32SDanilo Krummrich  * This removes the given &va from the underlaying tree.
787*e6303f32SDanilo Krummrich  *
788*e6303f32SDanilo Krummrich  * It is safe to use this function using the safe versions of iterating the GPU
789*e6303f32SDanilo Krummrich  * VA space, such as drm_gpuva_for_each_va_safe() and
790*e6303f32SDanilo Krummrich  * drm_gpuva_for_each_va_range_safe().
791*e6303f32SDanilo Krummrich  */
792*e6303f32SDanilo Krummrich void
793*e6303f32SDanilo Krummrich drm_gpuva_remove(struct drm_gpuva *va)
794*e6303f32SDanilo Krummrich {
795*e6303f32SDanilo Krummrich 	struct drm_gpuva_manager *mgr = va->mgr;
796*e6303f32SDanilo Krummrich 
797*e6303f32SDanilo Krummrich 	if (unlikely(va == &mgr->kernel_alloc_node)) {
798*e6303f32SDanilo Krummrich 		WARN(1, "Can't destroy kernel reserved node.\n");
799*e6303f32SDanilo Krummrich 		return;
800*e6303f32SDanilo Krummrich 	}
801*e6303f32SDanilo Krummrich 
802*e6303f32SDanilo Krummrich 	__drm_gpuva_remove(va);
803*e6303f32SDanilo Krummrich }
804*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_remove);
805*e6303f32SDanilo Krummrich 
806*e6303f32SDanilo Krummrich /**
807*e6303f32SDanilo Krummrich  * drm_gpuva_link() - link a &drm_gpuva
808*e6303f32SDanilo Krummrich  * @va: the &drm_gpuva to link
809*e6303f32SDanilo Krummrich  *
810*e6303f32SDanilo Krummrich  * This adds the given &va to the GPU VA list of the &drm_gem_object it is
811*e6303f32SDanilo Krummrich  * associated with.
812*e6303f32SDanilo Krummrich  *
813*e6303f32SDanilo Krummrich  * This function expects the caller to protect the GEM's GPUVA list against
814*e6303f32SDanilo Krummrich  * concurrent access using the GEMs dma_resv lock.
815*e6303f32SDanilo Krummrich  */
816*e6303f32SDanilo Krummrich void
817*e6303f32SDanilo Krummrich drm_gpuva_link(struct drm_gpuva *va)
818*e6303f32SDanilo Krummrich {
819*e6303f32SDanilo Krummrich 	struct drm_gem_object *obj = va->gem.obj;
820*e6303f32SDanilo Krummrich 
821*e6303f32SDanilo Krummrich 	if (unlikely(!obj))
822*e6303f32SDanilo Krummrich 		return;
823*e6303f32SDanilo Krummrich 
824*e6303f32SDanilo Krummrich 	drm_gem_gpuva_assert_lock_held(obj);
825*e6303f32SDanilo Krummrich 
826*e6303f32SDanilo Krummrich 	list_add_tail(&va->gem.entry, &obj->gpuva.list);
827*e6303f32SDanilo Krummrich }
828*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_link);
829*e6303f32SDanilo Krummrich 
830*e6303f32SDanilo Krummrich /**
831*e6303f32SDanilo Krummrich  * drm_gpuva_unlink() - unlink a &drm_gpuva
832*e6303f32SDanilo Krummrich  * @va: the &drm_gpuva to unlink
833*e6303f32SDanilo Krummrich  *
834*e6303f32SDanilo Krummrich  * This removes the given &va from the GPU VA list of the &drm_gem_object it is
835*e6303f32SDanilo Krummrich  * associated with.
836*e6303f32SDanilo Krummrich  *
837*e6303f32SDanilo Krummrich  * This function expects the caller to protect the GEM's GPUVA list against
838*e6303f32SDanilo Krummrich  * concurrent access using the GEMs dma_resv lock.
839*e6303f32SDanilo Krummrich  */
840*e6303f32SDanilo Krummrich void
841*e6303f32SDanilo Krummrich drm_gpuva_unlink(struct drm_gpuva *va)
842*e6303f32SDanilo Krummrich {
843*e6303f32SDanilo Krummrich 	struct drm_gem_object *obj = va->gem.obj;
844*e6303f32SDanilo Krummrich 
845*e6303f32SDanilo Krummrich 	if (unlikely(!obj))
846*e6303f32SDanilo Krummrich 		return;
847*e6303f32SDanilo Krummrich 
848*e6303f32SDanilo Krummrich 	drm_gem_gpuva_assert_lock_held(obj);
849*e6303f32SDanilo Krummrich 
850*e6303f32SDanilo Krummrich 	list_del_init(&va->gem.entry);
851*e6303f32SDanilo Krummrich }
852*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
853*e6303f32SDanilo Krummrich 
854*e6303f32SDanilo Krummrich /**
855*e6303f32SDanilo Krummrich  * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
856*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager to search in
857*e6303f32SDanilo Krummrich  * @addr: the &drm_gpuvas address
858*e6303f32SDanilo Krummrich  * @range: the &drm_gpuvas range
859*e6303f32SDanilo Krummrich  *
860*e6303f32SDanilo Krummrich  * Returns: the first &drm_gpuva within the given range
861*e6303f32SDanilo Krummrich  */
862*e6303f32SDanilo Krummrich struct drm_gpuva *
863*e6303f32SDanilo Krummrich drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
864*e6303f32SDanilo Krummrich 		     u64 addr, u64 range)
865*e6303f32SDanilo Krummrich {
866*e6303f32SDanilo Krummrich 	u64 last = addr + range - 1;
867*e6303f32SDanilo Krummrich 
868*e6303f32SDanilo Krummrich 	return drm_gpuva_it_iter_first(&mgr->rb.tree, addr, last);
869*e6303f32SDanilo Krummrich }
870*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
871*e6303f32SDanilo Krummrich 
872*e6303f32SDanilo Krummrich /**
873*e6303f32SDanilo Krummrich  * drm_gpuva_find() - find a &drm_gpuva
874*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager to search in
875*e6303f32SDanilo Krummrich  * @addr: the &drm_gpuvas address
876*e6303f32SDanilo Krummrich  * @range: the &drm_gpuvas range
877*e6303f32SDanilo Krummrich  *
878*e6303f32SDanilo Krummrich  * Returns: the &drm_gpuva at a given &addr and with a given &range
879*e6303f32SDanilo Krummrich  */
880*e6303f32SDanilo Krummrich struct drm_gpuva *
881*e6303f32SDanilo Krummrich drm_gpuva_find(struct drm_gpuva_manager *mgr,
882*e6303f32SDanilo Krummrich 	       u64 addr, u64 range)
883*e6303f32SDanilo Krummrich {
884*e6303f32SDanilo Krummrich 	struct drm_gpuva *va;
885*e6303f32SDanilo Krummrich 
886*e6303f32SDanilo Krummrich 	va = drm_gpuva_find_first(mgr, addr, range);
887*e6303f32SDanilo Krummrich 	if (!va)
888*e6303f32SDanilo Krummrich 		goto out;
889*e6303f32SDanilo Krummrich 
890*e6303f32SDanilo Krummrich 	if (va->va.addr != addr ||
891*e6303f32SDanilo Krummrich 	    va->va.range != range)
892*e6303f32SDanilo Krummrich 		goto out;
893*e6303f32SDanilo Krummrich 
894*e6303f32SDanilo Krummrich 	return va;
895*e6303f32SDanilo Krummrich 
896*e6303f32SDanilo Krummrich out:
897*e6303f32SDanilo Krummrich 	return NULL;
898*e6303f32SDanilo Krummrich }
899*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_find);
900*e6303f32SDanilo Krummrich 
901*e6303f32SDanilo Krummrich /**
902*e6303f32SDanilo Krummrich  * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
903*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager to search in
904*e6303f32SDanilo Krummrich  * @start: the given GPU VA's start address
905*e6303f32SDanilo Krummrich  *
906*e6303f32SDanilo Krummrich  * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
907*e6303f32SDanilo Krummrich  *
908*e6303f32SDanilo Krummrich  * Note that if there is any free space between the GPU VA mappings no mapping
909*e6303f32SDanilo Krummrich  * is returned.
910*e6303f32SDanilo Krummrich  *
911*e6303f32SDanilo Krummrich  * Returns: a pointer to the found &drm_gpuva or NULL if none was found
912*e6303f32SDanilo Krummrich  */
913*e6303f32SDanilo Krummrich struct drm_gpuva *
914*e6303f32SDanilo Krummrich drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start)
915*e6303f32SDanilo Krummrich {
916*e6303f32SDanilo Krummrich 	if (!drm_gpuva_range_valid(mgr, start - 1, 1))
917*e6303f32SDanilo Krummrich 		return NULL;
918*e6303f32SDanilo Krummrich 
919*e6303f32SDanilo Krummrich 	return drm_gpuva_it_iter_first(&mgr->rb.tree, start - 1, start);
920*e6303f32SDanilo Krummrich }
921*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
922*e6303f32SDanilo Krummrich 
923*e6303f32SDanilo Krummrich /**
924*e6303f32SDanilo Krummrich  * drm_gpuva_find_next() - find the &drm_gpuva after the given address
925*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager to search in
926*e6303f32SDanilo Krummrich  * @end: the given GPU VA's end address
927*e6303f32SDanilo Krummrich  *
928*e6303f32SDanilo Krummrich  * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
929*e6303f32SDanilo Krummrich  *
930*e6303f32SDanilo Krummrich  * Note that if there is any free space between the GPU VA mappings no mapping
931*e6303f32SDanilo Krummrich  * is returned.
932*e6303f32SDanilo Krummrich  *
933*e6303f32SDanilo Krummrich  * Returns: a pointer to the found &drm_gpuva or NULL if none was found
934*e6303f32SDanilo Krummrich  */
935*e6303f32SDanilo Krummrich struct drm_gpuva *
936*e6303f32SDanilo Krummrich drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end)
937*e6303f32SDanilo Krummrich {
938*e6303f32SDanilo Krummrich 	if (!drm_gpuva_range_valid(mgr, end, 1))
939*e6303f32SDanilo Krummrich 		return NULL;
940*e6303f32SDanilo Krummrich 
941*e6303f32SDanilo Krummrich 	return drm_gpuva_it_iter_first(&mgr->rb.tree, end, end + 1);
942*e6303f32SDanilo Krummrich }
943*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
944*e6303f32SDanilo Krummrich 
945*e6303f32SDanilo Krummrich /**
946*e6303f32SDanilo Krummrich  * drm_gpuva_interval_empty() - indicate whether a given interval of the VA space
947*e6303f32SDanilo Krummrich  * is empty
948*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager to check the range for
949*e6303f32SDanilo Krummrich  * @addr: the start address of the range
950*e6303f32SDanilo Krummrich  * @range: the range of the interval
951*e6303f32SDanilo Krummrich  *
952*e6303f32SDanilo Krummrich  * Returns: true if the interval is empty, false otherwise
953*e6303f32SDanilo Krummrich  */
954*e6303f32SDanilo Krummrich bool
955*e6303f32SDanilo Krummrich drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
956*e6303f32SDanilo Krummrich {
957*e6303f32SDanilo Krummrich 	return !drm_gpuva_find_first(mgr, addr, range);
958*e6303f32SDanilo Krummrich }
959*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_interval_empty);
960*e6303f32SDanilo Krummrich 
961*e6303f32SDanilo Krummrich /**
962*e6303f32SDanilo Krummrich  * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
963*e6303f32SDanilo Krummrich  * &drm_gpuva_op_map
964*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager
965*e6303f32SDanilo Krummrich  * @va: the &drm_gpuva to insert
966*e6303f32SDanilo Krummrich  * @op: the &drm_gpuva_op_map to initialize @va with
967*e6303f32SDanilo Krummrich  *
968*e6303f32SDanilo Krummrich  * Initializes the @va from the @op and inserts it into the given @mgr.
969*e6303f32SDanilo Krummrich  */
970*e6303f32SDanilo Krummrich void
971*e6303f32SDanilo Krummrich drm_gpuva_map(struct drm_gpuva_manager *mgr,
972*e6303f32SDanilo Krummrich 	      struct drm_gpuva *va,
973*e6303f32SDanilo Krummrich 	      struct drm_gpuva_op_map *op)
974*e6303f32SDanilo Krummrich {
975*e6303f32SDanilo Krummrich 	drm_gpuva_init_from_op(va, op);
976*e6303f32SDanilo Krummrich 	drm_gpuva_insert(mgr, va);
977*e6303f32SDanilo Krummrich }
978*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_map);
979*e6303f32SDanilo Krummrich 
980*e6303f32SDanilo Krummrich /**
981*e6303f32SDanilo Krummrich  * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
982*e6303f32SDanilo Krummrich  * &drm_gpuva_op_remap
983*e6303f32SDanilo Krummrich  * @prev: the &drm_gpuva to remap when keeping the start of a mapping
984*e6303f32SDanilo Krummrich  * @next: the &drm_gpuva to remap when keeping the end of a mapping
985*e6303f32SDanilo Krummrich  * @op: the &drm_gpuva_op_remap to initialize @prev and @next with
986*e6303f32SDanilo Krummrich  *
987*e6303f32SDanilo Krummrich  * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or
988*e6303f32SDanilo Krummrich  * @next.
989*e6303f32SDanilo Krummrich  */
990*e6303f32SDanilo Krummrich void
991*e6303f32SDanilo Krummrich drm_gpuva_remap(struct drm_gpuva *prev,
992*e6303f32SDanilo Krummrich 		struct drm_gpuva *next,
993*e6303f32SDanilo Krummrich 		struct drm_gpuva_op_remap *op)
994*e6303f32SDanilo Krummrich {
995*e6303f32SDanilo Krummrich 	struct drm_gpuva *curr = op->unmap->va;
996*e6303f32SDanilo Krummrich 	struct drm_gpuva_manager *mgr = curr->mgr;
997*e6303f32SDanilo Krummrich 
998*e6303f32SDanilo Krummrich 	drm_gpuva_remove(curr);
999*e6303f32SDanilo Krummrich 
1000*e6303f32SDanilo Krummrich 	if (op->prev) {
1001*e6303f32SDanilo Krummrich 		drm_gpuva_init_from_op(prev, op->prev);
1002*e6303f32SDanilo Krummrich 		drm_gpuva_insert(mgr, prev);
1003*e6303f32SDanilo Krummrich 	}
1004*e6303f32SDanilo Krummrich 
1005*e6303f32SDanilo Krummrich 	if (op->next) {
1006*e6303f32SDanilo Krummrich 		drm_gpuva_init_from_op(next, op->next);
1007*e6303f32SDanilo Krummrich 		drm_gpuva_insert(mgr, next);
1008*e6303f32SDanilo Krummrich 	}
1009*e6303f32SDanilo Krummrich }
1010*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_remap);
1011*e6303f32SDanilo Krummrich 
1012*e6303f32SDanilo Krummrich /**
1013*e6303f32SDanilo Krummrich  * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
1014*e6303f32SDanilo Krummrich  * &drm_gpuva_op_unmap
1015*e6303f32SDanilo Krummrich  * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove
1016*e6303f32SDanilo Krummrich  *
1017*e6303f32SDanilo Krummrich  * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap.
1018*e6303f32SDanilo Krummrich  */
1019*e6303f32SDanilo Krummrich void
1020*e6303f32SDanilo Krummrich drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
1021*e6303f32SDanilo Krummrich {
1022*e6303f32SDanilo Krummrich 	drm_gpuva_remove(op->va);
1023*e6303f32SDanilo Krummrich }
1024*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
1025*e6303f32SDanilo Krummrich 
1026*e6303f32SDanilo Krummrich static int
1027*e6303f32SDanilo Krummrich op_map_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
1028*e6303f32SDanilo Krummrich 	  u64 addr, u64 range,
1029*e6303f32SDanilo Krummrich 	  struct drm_gem_object *obj, u64 offset)
1030*e6303f32SDanilo Krummrich {
1031*e6303f32SDanilo Krummrich 	struct drm_gpuva_op op = {};
1032*e6303f32SDanilo Krummrich 
1033*e6303f32SDanilo Krummrich 	op.op = DRM_GPUVA_OP_MAP;
1034*e6303f32SDanilo Krummrich 	op.map.va.addr = addr;
1035*e6303f32SDanilo Krummrich 	op.map.va.range = range;
1036*e6303f32SDanilo Krummrich 	op.map.gem.obj = obj;
1037*e6303f32SDanilo Krummrich 	op.map.gem.offset = offset;
1038*e6303f32SDanilo Krummrich 
1039*e6303f32SDanilo Krummrich 	return fn->sm_step_map(&op, priv);
1040*e6303f32SDanilo Krummrich }
1041*e6303f32SDanilo Krummrich 
1042*e6303f32SDanilo Krummrich static int
1043*e6303f32SDanilo Krummrich op_remap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
1044*e6303f32SDanilo Krummrich 	    struct drm_gpuva_op_map *prev,
1045*e6303f32SDanilo Krummrich 	    struct drm_gpuva_op_map *next,
1046*e6303f32SDanilo Krummrich 	    struct drm_gpuva_op_unmap *unmap)
1047*e6303f32SDanilo Krummrich {
1048*e6303f32SDanilo Krummrich 	struct drm_gpuva_op op = {};
1049*e6303f32SDanilo Krummrich 	struct drm_gpuva_op_remap *r;
1050*e6303f32SDanilo Krummrich 
1051*e6303f32SDanilo Krummrich 	op.op = DRM_GPUVA_OP_REMAP;
1052*e6303f32SDanilo Krummrich 	r = &op.remap;
1053*e6303f32SDanilo Krummrich 	r->prev = prev;
1054*e6303f32SDanilo Krummrich 	r->next = next;
1055*e6303f32SDanilo Krummrich 	r->unmap = unmap;
1056*e6303f32SDanilo Krummrich 
1057*e6303f32SDanilo Krummrich 	return fn->sm_step_remap(&op, priv);
1058*e6303f32SDanilo Krummrich }
1059*e6303f32SDanilo Krummrich 
1060*e6303f32SDanilo Krummrich static int
1061*e6303f32SDanilo Krummrich op_unmap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
1062*e6303f32SDanilo Krummrich 	    struct drm_gpuva *va, bool merge)
1063*e6303f32SDanilo Krummrich {
1064*e6303f32SDanilo Krummrich 	struct drm_gpuva_op op = {};
1065*e6303f32SDanilo Krummrich 
1066*e6303f32SDanilo Krummrich 	op.op = DRM_GPUVA_OP_UNMAP;
1067*e6303f32SDanilo Krummrich 	op.unmap.va = va;
1068*e6303f32SDanilo Krummrich 	op.unmap.keep = merge;
1069*e6303f32SDanilo Krummrich 
1070*e6303f32SDanilo Krummrich 	return fn->sm_step_unmap(&op, priv);
1071*e6303f32SDanilo Krummrich }
1072*e6303f32SDanilo Krummrich 
1073*e6303f32SDanilo Krummrich static int
1074*e6303f32SDanilo Krummrich __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
1075*e6303f32SDanilo Krummrich 		   const struct drm_gpuva_fn_ops *ops, void *priv,
1076*e6303f32SDanilo Krummrich 		   u64 req_addr, u64 req_range,
1077*e6303f32SDanilo Krummrich 		   struct drm_gem_object *req_obj, u64 req_offset)
1078*e6303f32SDanilo Krummrich {
1079*e6303f32SDanilo Krummrich 	struct drm_gpuva *va, *next, *prev = NULL;
1080*e6303f32SDanilo Krummrich 	u64 req_end = req_addr + req_range;
1081*e6303f32SDanilo Krummrich 	int ret;
1082*e6303f32SDanilo Krummrich 
1083*e6303f32SDanilo Krummrich 	if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
1084*e6303f32SDanilo Krummrich 		return -EINVAL;
1085*e6303f32SDanilo Krummrich 
1086*e6303f32SDanilo Krummrich 	drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
1087*e6303f32SDanilo Krummrich 		struct drm_gem_object *obj = va->gem.obj;
1088*e6303f32SDanilo Krummrich 		u64 offset = va->gem.offset;
1089*e6303f32SDanilo Krummrich 		u64 addr = va->va.addr;
1090*e6303f32SDanilo Krummrich 		u64 range = va->va.range;
1091*e6303f32SDanilo Krummrich 		u64 end = addr + range;
1092*e6303f32SDanilo Krummrich 		bool merge = !!va->gem.obj;
1093*e6303f32SDanilo Krummrich 
1094*e6303f32SDanilo Krummrich 		if (addr == req_addr) {
1095*e6303f32SDanilo Krummrich 			merge &= obj == req_obj &&
1096*e6303f32SDanilo Krummrich 				 offset == req_offset;
1097*e6303f32SDanilo Krummrich 
1098*e6303f32SDanilo Krummrich 			if (end == req_end) {
1099*e6303f32SDanilo Krummrich 				ret = op_unmap_cb(ops, priv, va, merge);
1100*e6303f32SDanilo Krummrich 				if (ret)
1101*e6303f32SDanilo Krummrich 					return ret;
1102*e6303f32SDanilo Krummrich 				break;
1103*e6303f32SDanilo Krummrich 			}
1104*e6303f32SDanilo Krummrich 
1105*e6303f32SDanilo Krummrich 			if (end < req_end) {
1106*e6303f32SDanilo Krummrich 				ret = op_unmap_cb(ops, priv, va, merge);
1107*e6303f32SDanilo Krummrich 				if (ret)
1108*e6303f32SDanilo Krummrich 					return ret;
1109*e6303f32SDanilo Krummrich 				goto next;
1110*e6303f32SDanilo Krummrich 			}
1111*e6303f32SDanilo Krummrich 
1112*e6303f32SDanilo Krummrich 			if (end > req_end) {
1113*e6303f32SDanilo Krummrich 				struct drm_gpuva_op_map n = {
1114*e6303f32SDanilo Krummrich 					.va.addr = req_end,
1115*e6303f32SDanilo Krummrich 					.va.range = range - req_range,
1116*e6303f32SDanilo Krummrich 					.gem.obj = obj,
1117*e6303f32SDanilo Krummrich 					.gem.offset = offset + req_range,
1118*e6303f32SDanilo Krummrich 				};
1119*e6303f32SDanilo Krummrich 				struct drm_gpuva_op_unmap u = {
1120*e6303f32SDanilo Krummrich 					.va = va,
1121*e6303f32SDanilo Krummrich 					.keep = merge,
1122*e6303f32SDanilo Krummrich 				};
1123*e6303f32SDanilo Krummrich 
1124*e6303f32SDanilo Krummrich 				ret = op_remap_cb(ops, priv, NULL, &n, &u);
1125*e6303f32SDanilo Krummrich 				if (ret)
1126*e6303f32SDanilo Krummrich 					return ret;
1127*e6303f32SDanilo Krummrich 				break;
1128*e6303f32SDanilo Krummrich 			}
1129*e6303f32SDanilo Krummrich 		} else if (addr < req_addr) {
1130*e6303f32SDanilo Krummrich 			u64 ls_range = req_addr - addr;
1131*e6303f32SDanilo Krummrich 			struct drm_gpuva_op_map p = {
1132*e6303f32SDanilo Krummrich 				.va.addr = addr,
1133*e6303f32SDanilo Krummrich 				.va.range = ls_range,
1134*e6303f32SDanilo Krummrich 				.gem.obj = obj,
1135*e6303f32SDanilo Krummrich 				.gem.offset = offset,
1136*e6303f32SDanilo Krummrich 			};
1137*e6303f32SDanilo Krummrich 			struct drm_gpuva_op_unmap u = { .va = va };
1138*e6303f32SDanilo Krummrich 
1139*e6303f32SDanilo Krummrich 			merge &= obj == req_obj &&
1140*e6303f32SDanilo Krummrich 				 offset + ls_range == req_offset;
1141*e6303f32SDanilo Krummrich 			u.keep = merge;
1142*e6303f32SDanilo Krummrich 
1143*e6303f32SDanilo Krummrich 			if (end == req_end) {
1144*e6303f32SDanilo Krummrich 				ret = op_remap_cb(ops, priv, &p, NULL, &u);
1145*e6303f32SDanilo Krummrich 				if (ret)
1146*e6303f32SDanilo Krummrich 					return ret;
1147*e6303f32SDanilo Krummrich 				break;
1148*e6303f32SDanilo Krummrich 			}
1149*e6303f32SDanilo Krummrich 
1150*e6303f32SDanilo Krummrich 			if (end < req_end) {
1151*e6303f32SDanilo Krummrich 				ret = op_remap_cb(ops, priv, &p, NULL, &u);
1152*e6303f32SDanilo Krummrich 				if (ret)
1153*e6303f32SDanilo Krummrich 					return ret;
1154*e6303f32SDanilo Krummrich 				goto next;
1155*e6303f32SDanilo Krummrich 			}
1156*e6303f32SDanilo Krummrich 
1157*e6303f32SDanilo Krummrich 			if (end > req_end) {
1158*e6303f32SDanilo Krummrich 				struct drm_gpuva_op_map n = {
1159*e6303f32SDanilo Krummrich 					.va.addr = req_end,
1160*e6303f32SDanilo Krummrich 					.va.range = end - req_end,
1161*e6303f32SDanilo Krummrich 					.gem.obj = obj,
1162*e6303f32SDanilo Krummrich 					.gem.offset = offset + ls_range +
1163*e6303f32SDanilo Krummrich 						      req_range,
1164*e6303f32SDanilo Krummrich 				};
1165*e6303f32SDanilo Krummrich 
1166*e6303f32SDanilo Krummrich 				ret = op_remap_cb(ops, priv, &p, &n, &u);
1167*e6303f32SDanilo Krummrich 				if (ret)
1168*e6303f32SDanilo Krummrich 					return ret;
1169*e6303f32SDanilo Krummrich 				break;
1170*e6303f32SDanilo Krummrich 			}
1171*e6303f32SDanilo Krummrich 		} else if (addr > req_addr) {
1172*e6303f32SDanilo Krummrich 			merge &= obj == req_obj &&
1173*e6303f32SDanilo Krummrich 				 offset == req_offset +
1174*e6303f32SDanilo Krummrich 					   (addr - req_addr);
1175*e6303f32SDanilo Krummrich 
1176*e6303f32SDanilo Krummrich 			if (end == req_end) {
1177*e6303f32SDanilo Krummrich 				ret = op_unmap_cb(ops, priv, va, merge);
1178*e6303f32SDanilo Krummrich 				if (ret)
1179*e6303f32SDanilo Krummrich 					return ret;
1180*e6303f32SDanilo Krummrich 				break;
1181*e6303f32SDanilo Krummrich 			}
1182*e6303f32SDanilo Krummrich 
1183*e6303f32SDanilo Krummrich 			if (end < req_end) {
1184*e6303f32SDanilo Krummrich 				ret = op_unmap_cb(ops, priv, va, merge);
1185*e6303f32SDanilo Krummrich 				if (ret)
1186*e6303f32SDanilo Krummrich 					return ret;
1187*e6303f32SDanilo Krummrich 				goto next;
1188*e6303f32SDanilo Krummrich 			}
1189*e6303f32SDanilo Krummrich 
1190*e6303f32SDanilo Krummrich 			if (end > req_end) {
1191*e6303f32SDanilo Krummrich 				struct drm_gpuva_op_map n = {
1192*e6303f32SDanilo Krummrich 					.va.addr = req_end,
1193*e6303f32SDanilo Krummrich 					.va.range = end - req_end,
1194*e6303f32SDanilo Krummrich 					.gem.obj = obj,
1195*e6303f32SDanilo Krummrich 					.gem.offset = offset + req_end - addr,
1196*e6303f32SDanilo Krummrich 				};
1197*e6303f32SDanilo Krummrich 				struct drm_gpuva_op_unmap u = {
1198*e6303f32SDanilo Krummrich 					.va = va,
1199*e6303f32SDanilo Krummrich 					.keep = merge,
1200*e6303f32SDanilo Krummrich 				};
1201*e6303f32SDanilo Krummrich 
1202*e6303f32SDanilo Krummrich 				ret = op_remap_cb(ops, priv, NULL, &n, &u);
1203*e6303f32SDanilo Krummrich 				if (ret)
1204*e6303f32SDanilo Krummrich 					return ret;
1205*e6303f32SDanilo Krummrich 				break;
1206*e6303f32SDanilo Krummrich 			}
1207*e6303f32SDanilo Krummrich 		}
1208*e6303f32SDanilo Krummrich next:
1209*e6303f32SDanilo Krummrich 		prev = va;
1210*e6303f32SDanilo Krummrich 	}
1211*e6303f32SDanilo Krummrich 
1212*e6303f32SDanilo Krummrich 	return op_map_cb(ops, priv,
1213*e6303f32SDanilo Krummrich 			 req_addr, req_range,
1214*e6303f32SDanilo Krummrich 			 req_obj, req_offset);
1215*e6303f32SDanilo Krummrich }
1216*e6303f32SDanilo Krummrich 
1217*e6303f32SDanilo Krummrich static int
1218*e6303f32SDanilo Krummrich __drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr,
1219*e6303f32SDanilo Krummrich 		     const struct drm_gpuva_fn_ops *ops, void *priv,
1220*e6303f32SDanilo Krummrich 		     u64 req_addr, u64 req_range)
1221*e6303f32SDanilo Krummrich {
1222*e6303f32SDanilo Krummrich 	struct drm_gpuva *va, *next;
1223*e6303f32SDanilo Krummrich 	u64 req_end = req_addr + req_range;
1224*e6303f32SDanilo Krummrich 	int ret;
1225*e6303f32SDanilo Krummrich 
1226*e6303f32SDanilo Krummrich 	if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
1227*e6303f32SDanilo Krummrich 		return -EINVAL;
1228*e6303f32SDanilo Krummrich 
1229*e6303f32SDanilo Krummrich 	drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
1230*e6303f32SDanilo Krummrich 		struct drm_gpuva_op_map prev = {}, next = {};
1231*e6303f32SDanilo Krummrich 		bool prev_split = false, next_split = false;
1232*e6303f32SDanilo Krummrich 		struct drm_gem_object *obj = va->gem.obj;
1233*e6303f32SDanilo Krummrich 		u64 offset = va->gem.offset;
1234*e6303f32SDanilo Krummrich 		u64 addr = va->va.addr;
1235*e6303f32SDanilo Krummrich 		u64 range = va->va.range;
1236*e6303f32SDanilo Krummrich 		u64 end = addr + range;
1237*e6303f32SDanilo Krummrich 
1238*e6303f32SDanilo Krummrich 		if (addr < req_addr) {
1239*e6303f32SDanilo Krummrich 			prev.va.addr = addr;
1240*e6303f32SDanilo Krummrich 			prev.va.range = req_addr - addr;
1241*e6303f32SDanilo Krummrich 			prev.gem.obj = obj;
1242*e6303f32SDanilo Krummrich 			prev.gem.offset = offset;
1243*e6303f32SDanilo Krummrich 
1244*e6303f32SDanilo Krummrich 			prev_split = true;
1245*e6303f32SDanilo Krummrich 		}
1246*e6303f32SDanilo Krummrich 
1247*e6303f32SDanilo Krummrich 		if (end > req_end) {
1248*e6303f32SDanilo Krummrich 			next.va.addr = req_end;
1249*e6303f32SDanilo Krummrich 			next.va.range = end - req_end;
1250*e6303f32SDanilo Krummrich 			next.gem.obj = obj;
1251*e6303f32SDanilo Krummrich 			next.gem.offset = offset + (req_end - addr);
1252*e6303f32SDanilo Krummrich 
1253*e6303f32SDanilo Krummrich 			next_split = true;
1254*e6303f32SDanilo Krummrich 		}
1255*e6303f32SDanilo Krummrich 
1256*e6303f32SDanilo Krummrich 		if (prev_split || next_split) {
1257*e6303f32SDanilo Krummrich 			struct drm_gpuva_op_unmap unmap = { .va = va };
1258*e6303f32SDanilo Krummrich 
1259*e6303f32SDanilo Krummrich 			ret = op_remap_cb(ops, priv,
1260*e6303f32SDanilo Krummrich 					  prev_split ? &prev : NULL,
1261*e6303f32SDanilo Krummrich 					  next_split ? &next : NULL,
1262*e6303f32SDanilo Krummrich 					  &unmap);
1263*e6303f32SDanilo Krummrich 			if (ret)
1264*e6303f32SDanilo Krummrich 				return ret;
1265*e6303f32SDanilo Krummrich 		} else {
1266*e6303f32SDanilo Krummrich 			ret = op_unmap_cb(ops, priv, va, false);
1267*e6303f32SDanilo Krummrich 			if (ret)
1268*e6303f32SDanilo Krummrich 				return ret;
1269*e6303f32SDanilo Krummrich 		}
1270*e6303f32SDanilo Krummrich 	}
1271*e6303f32SDanilo Krummrich 
1272*e6303f32SDanilo Krummrich 	return 0;
1273*e6303f32SDanilo Krummrich }
1274*e6303f32SDanilo Krummrich 
1275*e6303f32SDanilo Krummrich /**
1276*e6303f32SDanilo Krummrich  * drm_gpuva_sm_map() - creates the &drm_gpuva_op split/merge steps
1277*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager representing the GPU VA space
1278*e6303f32SDanilo Krummrich  * @req_addr: the start address of the new mapping
1279*e6303f32SDanilo Krummrich  * @req_range: the range of the new mapping
1280*e6303f32SDanilo Krummrich  * @req_obj: the &drm_gem_object to map
1281*e6303f32SDanilo Krummrich  * @req_offset: the offset within the &drm_gem_object
1282*e6303f32SDanilo Krummrich  * @priv: pointer to a driver private data structure
1283*e6303f32SDanilo Krummrich  *
1284*e6303f32SDanilo Krummrich  * This function iterates the given range of the GPU VA space. It utilizes the
1285*e6303f32SDanilo Krummrich  * &drm_gpuva_fn_ops to call back into the driver providing the split and merge
1286*e6303f32SDanilo Krummrich  * steps.
1287*e6303f32SDanilo Krummrich  *
1288*e6303f32SDanilo Krummrich  * Drivers may use these callbacks to update the GPU VA space right away within
1289*e6303f32SDanilo Krummrich  * the callback. In case the driver decides to copy and store the operations for
1290*e6303f32SDanilo Krummrich  * later processing neither this function nor &drm_gpuva_sm_unmap is allowed to
1291*e6303f32SDanilo Krummrich  * be called before the &drm_gpuva_manager's view of the GPU VA space was
1292*e6303f32SDanilo Krummrich  * updated with the previous set of operations. To update the
1293*e6303f32SDanilo Krummrich  * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
1294*e6303f32SDanilo Krummrich  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
1295*e6303f32SDanilo Krummrich  * used.
1296*e6303f32SDanilo Krummrich  *
1297*e6303f32SDanilo Krummrich  * A sequence of callbacks can contain map, unmap and remap operations, but
1298*e6303f32SDanilo Krummrich  * the sequence of callbacks might also be empty if no operation is required,
1299*e6303f32SDanilo Krummrich  * e.g. if the requested mapping already exists in the exact same way.
1300*e6303f32SDanilo Krummrich  *
1301*e6303f32SDanilo Krummrich  * There can be an arbitrary amount of unmap operations, a maximum of two remap
1302*e6303f32SDanilo Krummrich  * operations and a single map operation. The latter one represents the original
1303*e6303f32SDanilo Krummrich  * map operation requested by the caller.
1304*e6303f32SDanilo Krummrich  *
1305*e6303f32SDanilo Krummrich  * Returns: 0 on success or a negative error code
1306*e6303f32SDanilo Krummrich  */
1307*e6303f32SDanilo Krummrich int
1308*e6303f32SDanilo Krummrich drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
1309*e6303f32SDanilo Krummrich 		 u64 req_addr, u64 req_range,
1310*e6303f32SDanilo Krummrich 		 struct drm_gem_object *req_obj, u64 req_offset)
1311*e6303f32SDanilo Krummrich {
1312*e6303f32SDanilo Krummrich 	const struct drm_gpuva_fn_ops *ops = mgr->ops;
1313*e6303f32SDanilo Krummrich 
1314*e6303f32SDanilo Krummrich 	if (unlikely(!(ops && ops->sm_step_map &&
1315*e6303f32SDanilo Krummrich 		       ops->sm_step_remap &&
1316*e6303f32SDanilo Krummrich 		       ops->sm_step_unmap)))
1317*e6303f32SDanilo Krummrich 		return -EINVAL;
1318*e6303f32SDanilo Krummrich 
1319*e6303f32SDanilo Krummrich 	return __drm_gpuva_sm_map(mgr, ops, priv,
1320*e6303f32SDanilo Krummrich 				  req_addr, req_range,
1321*e6303f32SDanilo Krummrich 				  req_obj, req_offset);
1322*e6303f32SDanilo Krummrich }
1323*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_sm_map);
1324*e6303f32SDanilo Krummrich 
1325*e6303f32SDanilo Krummrich /**
1326*e6303f32SDanilo Krummrich  * drm_gpuva_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
1327*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager representing the GPU VA space
1328*e6303f32SDanilo Krummrich  * @priv: pointer to a driver private data structure
1329*e6303f32SDanilo Krummrich  * @req_addr: the start address of the range to unmap
1330*e6303f32SDanilo Krummrich  * @req_range: the range of the mappings to unmap
1331*e6303f32SDanilo Krummrich  *
1332*e6303f32SDanilo Krummrich  * This function iterates the given range of the GPU VA space. It utilizes the
1333*e6303f32SDanilo Krummrich  * &drm_gpuva_fn_ops to call back into the driver providing the operations to
1334*e6303f32SDanilo Krummrich  * unmap and, if required, split existent mappings.
1335*e6303f32SDanilo Krummrich  *
1336*e6303f32SDanilo Krummrich  * Drivers may use these callbacks to update the GPU VA space right away within
1337*e6303f32SDanilo Krummrich  * the callback. In case the driver decides to copy and store the operations for
1338*e6303f32SDanilo Krummrich  * later processing neither this function nor &drm_gpuva_sm_map is allowed to be
1339*e6303f32SDanilo Krummrich  * called before the &drm_gpuva_manager's view of the GPU VA space was updated
1340*e6303f32SDanilo Krummrich  * with the previous set of operations. To update the &drm_gpuva_manager's view
1341*e6303f32SDanilo Krummrich  * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
1342*e6303f32SDanilo Krummrich  * drm_gpuva_destroy_unlocked() should be used.
1343*e6303f32SDanilo Krummrich  *
1344*e6303f32SDanilo Krummrich  * A sequence of callbacks can contain unmap and remap operations, depending on
1345*e6303f32SDanilo Krummrich  * whether there are actual overlapping mappings to split.
1346*e6303f32SDanilo Krummrich  *
1347*e6303f32SDanilo Krummrich  * There can be an arbitrary amount of unmap operations and a maximum of two
1348*e6303f32SDanilo Krummrich  * remap operations.
1349*e6303f32SDanilo Krummrich  *
1350*e6303f32SDanilo Krummrich  * Returns: 0 on success or a negative error code
1351*e6303f32SDanilo Krummrich  */
1352*e6303f32SDanilo Krummrich int
1353*e6303f32SDanilo Krummrich drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
1354*e6303f32SDanilo Krummrich 		   u64 req_addr, u64 req_range)
1355*e6303f32SDanilo Krummrich {
1356*e6303f32SDanilo Krummrich 	const struct drm_gpuva_fn_ops *ops = mgr->ops;
1357*e6303f32SDanilo Krummrich 
1358*e6303f32SDanilo Krummrich 	if (unlikely(!(ops && ops->sm_step_remap &&
1359*e6303f32SDanilo Krummrich 		       ops->sm_step_unmap)))
1360*e6303f32SDanilo Krummrich 		return -EINVAL;
1361*e6303f32SDanilo Krummrich 
1362*e6303f32SDanilo Krummrich 	return __drm_gpuva_sm_unmap(mgr, ops, priv,
1363*e6303f32SDanilo Krummrich 				    req_addr, req_range);
1364*e6303f32SDanilo Krummrich }
1365*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap);
1366*e6303f32SDanilo Krummrich 
1367*e6303f32SDanilo Krummrich static struct drm_gpuva_op *
1368*e6303f32SDanilo Krummrich gpuva_op_alloc(struct drm_gpuva_manager *mgr)
1369*e6303f32SDanilo Krummrich {
1370*e6303f32SDanilo Krummrich 	const struct drm_gpuva_fn_ops *fn = mgr->ops;
1371*e6303f32SDanilo Krummrich 	struct drm_gpuva_op *op;
1372*e6303f32SDanilo Krummrich 
1373*e6303f32SDanilo Krummrich 	if (fn && fn->op_alloc)
1374*e6303f32SDanilo Krummrich 		op = fn->op_alloc();
1375*e6303f32SDanilo Krummrich 	else
1376*e6303f32SDanilo Krummrich 		op = kzalloc(sizeof(*op), GFP_KERNEL);
1377*e6303f32SDanilo Krummrich 
1378*e6303f32SDanilo Krummrich 	if (unlikely(!op))
1379*e6303f32SDanilo Krummrich 		return NULL;
1380*e6303f32SDanilo Krummrich 
1381*e6303f32SDanilo Krummrich 	return op;
1382*e6303f32SDanilo Krummrich }
1383*e6303f32SDanilo Krummrich 
1384*e6303f32SDanilo Krummrich static void
1385*e6303f32SDanilo Krummrich gpuva_op_free(struct drm_gpuva_manager *mgr,
1386*e6303f32SDanilo Krummrich 	      struct drm_gpuva_op *op)
1387*e6303f32SDanilo Krummrich {
1388*e6303f32SDanilo Krummrich 	const struct drm_gpuva_fn_ops *fn = mgr->ops;
1389*e6303f32SDanilo Krummrich 
1390*e6303f32SDanilo Krummrich 	if (fn && fn->op_free)
1391*e6303f32SDanilo Krummrich 		fn->op_free(op);
1392*e6303f32SDanilo Krummrich 	else
1393*e6303f32SDanilo Krummrich 		kfree(op);
1394*e6303f32SDanilo Krummrich }
1395*e6303f32SDanilo Krummrich 
1396*e6303f32SDanilo Krummrich static int
1397*e6303f32SDanilo Krummrich drm_gpuva_sm_step(struct drm_gpuva_op *__op,
1398*e6303f32SDanilo Krummrich 		  void *priv)
1399*e6303f32SDanilo Krummrich {
1400*e6303f32SDanilo Krummrich 	struct {
1401*e6303f32SDanilo Krummrich 		struct drm_gpuva_manager *mgr;
1402*e6303f32SDanilo Krummrich 		struct drm_gpuva_ops *ops;
1403*e6303f32SDanilo Krummrich 	} *args = priv;
1404*e6303f32SDanilo Krummrich 	struct drm_gpuva_manager *mgr = args->mgr;
1405*e6303f32SDanilo Krummrich 	struct drm_gpuva_ops *ops = args->ops;
1406*e6303f32SDanilo Krummrich 	struct drm_gpuva_op *op;
1407*e6303f32SDanilo Krummrich 
1408*e6303f32SDanilo Krummrich 	op = gpuva_op_alloc(mgr);
1409*e6303f32SDanilo Krummrich 	if (unlikely(!op))
1410*e6303f32SDanilo Krummrich 		goto err;
1411*e6303f32SDanilo Krummrich 
1412*e6303f32SDanilo Krummrich 	memcpy(op, __op, sizeof(*op));
1413*e6303f32SDanilo Krummrich 
1414*e6303f32SDanilo Krummrich 	if (op->op == DRM_GPUVA_OP_REMAP) {
1415*e6303f32SDanilo Krummrich 		struct drm_gpuva_op_remap *__r = &__op->remap;
1416*e6303f32SDanilo Krummrich 		struct drm_gpuva_op_remap *r = &op->remap;
1417*e6303f32SDanilo Krummrich 
1418*e6303f32SDanilo Krummrich 		r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap),
1419*e6303f32SDanilo Krummrich 				   GFP_KERNEL);
1420*e6303f32SDanilo Krummrich 		if (unlikely(!r->unmap))
1421*e6303f32SDanilo Krummrich 			goto err_free_op;
1422*e6303f32SDanilo Krummrich 
1423*e6303f32SDanilo Krummrich 		if (__r->prev) {
1424*e6303f32SDanilo Krummrich 			r->prev = kmemdup(__r->prev, sizeof(*r->prev),
1425*e6303f32SDanilo Krummrich 					  GFP_KERNEL);
1426*e6303f32SDanilo Krummrich 			if (unlikely(!r->prev))
1427*e6303f32SDanilo Krummrich 				goto err_free_unmap;
1428*e6303f32SDanilo Krummrich 		}
1429*e6303f32SDanilo Krummrich 
1430*e6303f32SDanilo Krummrich 		if (__r->next) {
1431*e6303f32SDanilo Krummrich 			r->next = kmemdup(__r->next, sizeof(*r->next),
1432*e6303f32SDanilo Krummrich 					  GFP_KERNEL);
1433*e6303f32SDanilo Krummrich 			if (unlikely(!r->next))
1434*e6303f32SDanilo Krummrich 				goto err_free_prev;
1435*e6303f32SDanilo Krummrich 		}
1436*e6303f32SDanilo Krummrich 	}
1437*e6303f32SDanilo Krummrich 
1438*e6303f32SDanilo Krummrich 	list_add_tail(&op->entry, &ops->list);
1439*e6303f32SDanilo Krummrich 
1440*e6303f32SDanilo Krummrich 	return 0;
1441*e6303f32SDanilo Krummrich 
1442*e6303f32SDanilo Krummrich err_free_unmap:
1443*e6303f32SDanilo Krummrich 	kfree(op->remap.unmap);
1444*e6303f32SDanilo Krummrich err_free_prev:
1445*e6303f32SDanilo Krummrich 	kfree(op->remap.prev);
1446*e6303f32SDanilo Krummrich err_free_op:
1447*e6303f32SDanilo Krummrich 	gpuva_op_free(mgr, op);
1448*e6303f32SDanilo Krummrich err:
1449*e6303f32SDanilo Krummrich 	return -ENOMEM;
1450*e6303f32SDanilo Krummrich }
1451*e6303f32SDanilo Krummrich 
1452*e6303f32SDanilo Krummrich static const struct drm_gpuva_fn_ops gpuva_list_ops = {
1453*e6303f32SDanilo Krummrich 	.sm_step_map = drm_gpuva_sm_step,
1454*e6303f32SDanilo Krummrich 	.sm_step_remap = drm_gpuva_sm_step,
1455*e6303f32SDanilo Krummrich 	.sm_step_unmap = drm_gpuva_sm_step,
1456*e6303f32SDanilo Krummrich };
1457*e6303f32SDanilo Krummrich 
1458*e6303f32SDanilo Krummrich /**
1459*e6303f32SDanilo Krummrich  * drm_gpuva_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
1460*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager representing the GPU VA space
1461*e6303f32SDanilo Krummrich  * @req_addr: the start address of the new mapping
1462*e6303f32SDanilo Krummrich  * @req_range: the range of the new mapping
1463*e6303f32SDanilo Krummrich  * @req_obj: the &drm_gem_object to map
1464*e6303f32SDanilo Krummrich  * @req_offset: the offset within the &drm_gem_object
1465*e6303f32SDanilo Krummrich  *
1466*e6303f32SDanilo Krummrich  * This function creates a list of operations to perform splitting and merging
1467*e6303f32SDanilo Krummrich  * of existent mapping(s) with the newly requested one.
1468*e6303f32SDanilo Krummrich  *
1469*e6303f32SDanilo Krummrich  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
1470*e6303f32SDanilo Krummrich  * in the given order. It can contain map, unmap and remap operations, but it
1471*e6303f32SDanilo Krummrich  * also can be empty if no operation is required, e.g. if the requested mapping
1472*e6303f32SDanilo Krummrich  * already exists is the exact same way.
1473*e6303f32SDanilo Krummrich  *
1474*e6303f32SDanilo Krummrich  * There can be an arbitrary amount of unmap operations, a maximum of two remap
1475*e6303f32SDanilo Krummrich  * operations and a single map operation. The latter one represents the original
1476*e6303f32SDanilo Krummrich  * map operation requested by the caller.
1477*e6303f32SDanilo Krummrich  *
1478*e6303f32SDanilo Krummrich  * Note that before calling this function again with another mapping request it
1479*e6303f32SDanilo Krummrich  * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
1480*e6303f32SDanilo Krummrich  * previously obtained operations must be either processed or abandoned. To
1481*e6303f32SDanilo Krummrich  * update the &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
1482*e6303f32SDanilo Krummrich  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
1483*e6303f32SDanilo Krummrich  * used.
1484*e6303f32SDanilo Krummrich  *
1485*e6303f32SDanilo Krummrich  * After the caller finished processing the returned &drm_gpuva_ops, they must
1486*e6303f32SDanilo Krummrich  * be freed with &drm_gpuva_ops_free.
1487*e6303f32SDanilo Krummrich  *
1488*e6303f32SDanilo Krummrich  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
1489*e6303f32SDanilo Krummrich  */
1490*e6303f32SDanilo Krummrich struct drm_gpuva_ops *
1491*e6303f32SDanilo Krummrich drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
1492*e6303f32SDanilo Krummrich 			    u64 req_addr, u64 req_range,
1493*e6303f32SDanilo Krummrich 			    struct drm_gem_object *req_obj, u64 req_offset)
1494*e6303f32SDanilo Krummrich {
1495*e6303f32SDanilo Krummrich 	struct drm_gpuva_ops *ops;
1496*e6303f32SDanilo Krummrich 	struct {
1497*e6303f32SDanilo Krummrich 		struct drm_gpuva_manager *mgr;
1498*e6303f32SDanilo Krummrich 		struct drm_gpuva_ops *ops;
1499*e6303f32SDanilo Krummrich 	} args;
1500*e6303f32SDanilo Krummrich 	int ret;
1501*e6303f32SDanilo Krummrich 
1502*e6303f32SDanilo Krummrich 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
1503*e6303f32SDanilo Krummrich 	if (unlikely(!ops))
1504*e6303f32SDanilo Krummrich 		return ERR_PTR(-ENOMEM);
1505*e6303f32SDanilo Krummrich 
1506*e6303f32SDanilo Krummrich 	INIT_LIST_HEAD(&ops->list);
1507*e6303f32SDanilo Krummrich 
1508*e6303f32SDanilo Krummrich 	args.mgr = mgr;
1509*e6303f32SDanilo Krummrich 	args.ops = ops;
1510*e6303f32SDanilo Krummrich 
1511*e6303f32SDanilo Krummrich 	ret = __drm_gpuva_sm_map(mgr, &gpuva_list_ops, &args,
1512*e6303f32SDanilo Krummrich 				 req_addr, req_range,
1513*e6303f32SDanilo Krummrich 				 req_obj, req_offset);
1514*e6303f32SDanilo Krummrich 	if (ret)
1515*e6303f32SDanilo Krummrich 		goto err_free_ops;
1516*e6303f32SDanilo Krummrich 
1517*e6303f32SDanilo Krummrich 	return ops;
1518*e6303f32SDanilo Krummrich 
1519*e6303f32SDanilo Krummrich err_free_ops:
1520*e6303f32SDanilo Krummrich 	drm_gpuva_ops_free(mgr, ops);
1521*e6303f32SDanilo Krummrich 	return ERR_PTR(ret);
1522*e6303f32SDanilo Krummrich }
1523*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create);
1524*e6303f32SDanilo Krummrich 
1525*e6303f32SDanilo Krummrich /**
1526*e6303f32SDanilo Krummrich  * drm_gpuva_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
1527*e6303f32SDanilo Krummrich  * unmap
1528*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager representing the GPU VA space
1529*e6303f32SDanilo Krummrich  * @req_addr: the start address of the range to unmap
1530*e6303f32SDanilo Krummrich  * @req_range: the range of the mappings to unmap
1531*e6303f32SDanilo Krummrich  *
1532*e6303f32SDanilo Krummrich  * This function creates a list of operations to perform unmapping and, if
1533*e6303f32SDanilo Krummrich  * required, splitting of the mappings overlapping the unmap range.
1534*e6303f32SDanilo Krummrich  *
1535*e6303f32SDanilo Krummrich  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
1536*e6303f32SDanilo Krummrich  * in the given order. It can contain unmap and remap operations, depending on
1537*e6303f32SDanilo Krummrich  * whether there are actual overlapping mappings to split.
1538*e6303f32SDanilo Krummrich  *
1539*e6303f32SDanilo Krummrich  * There can be an arbitrary amount of unmap operations and a maximum of two
1540*e6303f32SDanilo Krummrich  * remap operations.
1541*e6303f32SDanilo Krummrich  *
1542*e6303f32SDanilo Krummrich  * Note that before calling this function again with another range to unmap it
1543*e6303f32SDanilo Krummrich  * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
1544*e6303f32SDanilo Krummrich  * previously obtained operations must be processed or abandoned. To update the
1545*e6303f32SDanilo Krummrich  * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
1546*e6303f32SDanilo Krummrich  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
1547*e6303f32SDanilo Krummrich  * used.
1548*e6303f32SDanilo Krummrich  *
1549*e6303f32SDanilo Krummrich  * After the caller finished processing the returned &drm_gpuva_ops, they must
1550*e6303f32SDanilo Krummrich  * be freed with &drm_gpuva_ops_free.
1551*e6303f32SDanilo Krummrich  *
1552*e6303f32SDanilo Krummrich  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
1553*e6303f32SDanilo Krummrich  */
1554*e6303f32SDanilo Krummrich struct drm_gpuva_ops *
1555*e6303f32SDanilo Krummrich drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
1556*e6303f32SDanilo Krummrich 			      u64 req_addr, u64 req_range)
1557*e6303f32SDanilo Krummrich {
1558*e6303f32SDanilo Krummrich 	struct drm_gpuva_ops *ops;
1559*e6303f32SDanilo Krummrich 	struct {
1560*e6303f32SDanilo Krummrich 		struct drm_gpuva_manager *mgr;
1561*e6303f32SDanilo Krummrich 		struct drm_gpuva_ops *ops;
1562*e6303f32SDanilo Krummrich 	} args;
1563*e6303f32SDanilo Krummrich 	int ret;
1564*e6303f32SDanilo Krummrich 
1565*e6303f32SDanilo Krummrich 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
1566*e6303f32SDanilo Krummrich 	if (unlikely(!ops))
1567*e6303f32SDanilo Krummrich 		return ERR_PTR(-ENOMEM);
1568*e6303f32SDanilo Krummrich 
1569*e6303f32SDanilo Krummrich 	INIT_LIST_HEAD(&ops->list);
1570*e6303f32SDanilo Krummrich 
1571*e6303f32SDanilo Krummrich 	args.mgr = mgr;
1572*e6303f32SDanilo Krummrich 	args.ops = ops;
1573*e6303f32SDanilo Krummrich 
1574*e6303f32SDanilo Krummrich 	ret = __drm_gpuva_sm_unmap(mgr, &gpuva_list_ops, &args,
1575*e6303f32SDanilo Krummrich 				   req_addr, req_range);
1576*e6303f32SDanilo Krummrich 	if (ret)
1577*e6303f32SDanilo Krummrich 		goto err_free_ops;
1578*e6303f32SDanilo Krummrich 
1579*e6303f32SDanilo Krummrich 	return ops;
1580*e6303f32SDanilo Krummrich 
1581*e6303f32SDanilo Krummrich err_free_ops:
1582*e6303f32SDanilo Krummrich 	drm_gpuva_ops_free(mgr, ops);
1583*e6303f32SDanilo Krummrich 	return ERR_PTR(ret);
1584*e6303f32SDanilo Krummrich }
1585*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap_ops_create);
1586*e6303f32SDanilo Krummrich 
1587*e6303f32SDanilo Krummrich /**
1588*e6303f32SDanilo Krummrich  * drm_gpuva_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
1589*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager representing the GPU VA space
1590*e6303f32SDanilo Krummrich  * @addr: the start address of the range to prefetch
1591*e6303f32SDanilo Krummrich  * @range: the range of the mappings to prefetch
1592*e6303f32SDanilo Krummrich  *
1593*e6303f32SDanilo Krummrich  * This function creates a list of operations to perform prefetching.
1594*e6303f32SDanilo Krummrich  *
1595*e6303f32SDanilo Krummrich  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
1596*e6303f32SDanilo Krummrich  * in the given order. It can contain prefetch operations.
1597*e6303f32SDanilo Krummrich  *
1598*e6303f32SDanilo Krummrich  * There can be an arbitrary amount of prefetch operations.
1599*e6303f32SDanilo Krummrich  *
1600*e6303f32SDanilo Krummrich  * After the caller finished processing the returned &drm_gpuva_ops, they must
1601*e6303f32SDanilo Krummrich  * be freed with &drm_gpuva_ops_free.
1602*e6303f32SDanilo Krummrich  *
1603*e6303f32SDanilo Krummrich  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
1604*e6303f32SDanilo Krummrich  */
1605*e6303f32SDanilo Krummrich struct drm_gpuva_ops *
1606*e6303f32SDanilo Krummrich drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
1607*e6303f32SDanilo Krummrich 			      u64 addr, u64 range)
1608*e6303f32SDanilo Krummrich {
1609*e6303f32SDanilo Krummrich 	struct drm_gpuva_ops *ops;
1610*e6303f32SDanilo Krummrich 	struct drm_gpuva_op *op;
1611*e6303f32SDanilo Krummrich 	struct drm_gpuva *va;
1612*e6303f32SDanilo Krummrich 	u64 end = addr + range;
1613*e6303f32SDanilo Krummrich 	int ret;
1614*e6303f32SDanilo Krummrich 
1615*e6303f32SDanilo Krummrich 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
1616*e6303f32SDanilo Krummrich 	if (!ops)
1617*e6303f32SDanilo Krummrich 		return ERR_PTR(-ENOMEM);
1618*e6303f32SDanilo Krummrich 
1619*e6303f32SDanilo Krummrich 	INIT_LIST_HEAD(&ops->list);
1620*e6303f32SDanilo Krummrich 
1621*e6303f32SDanilo Krummrich 	drm_gpuva_for_each_va_range(va, mgr, addr, end) {
1622*e6303f32SDanilo Krummrich 		op = gpuva_op_alloc(mgr);
1623*e6303f32SDanilo Krummrich 		if (!op) {
1624*e6303f32SDanilo Krummrich 			ret = -ENOMEM;
1625*e6303f32SDanilo Krummrich 			goto err_free_ops;
1626*e6303f32SDanilo Krummrich 		}
1627*e6303f32SDanilo Krummrich 
1628*e6303f32SDanilo Krummrich 		op->op = DRM_GPUVA_OP_PREFETCH;
1629*e6303f32SDanilo Krummrich 		op->prefetch.va = va;
1630*e6303f32SDanilo Krummrich 		list_add_tail(&op->entry, &ops->list);
1631*e6303f32SDanilo Krummrich 	}
1632*e6303f32SDanilo Krummrich 
1633*e6303f32SDanilo Krummrich 	return ops;
1634*e6303f32SDanilo Krummrich 
1635*e6303f32SDanilo Krummrich err_free_ops:
1636*e6303f32SDanilo Krummrich 	drm_gpuva_ops_free(mgr, ops);
1637*e6303f32SDanilo Krummrich 	return ERR_PTR(ret);
1638*e6303f32SDanilo Krummrich }
1639*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_prefetch_ops_create);
1640*e6303f32SDanilo Krummrich 
1641*e6303f32SDanilo Krummrich /**
1642*e6303f32SDanilo Krummrich  * drm_gpuva_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
1643*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager representing the GPU VA space
1644*e6303f32SDanilo Krummrich  * @obj: the &drm_gem_object to unmap
1645*e6303f32SDanilo Krummrich  *
1646*e6303f32SDanilo Krummrich  * This function creates a list of operations to perform unmapping for every
1647*e6303f32SDanilo Krummrich  * GPUVA attached to a GEM.
1648*e6303f32SDanilo Krummrich  *
1649*e6303f32SDanilo Krummrich  * The list can be iterated with &drm_gpuva_for_each_op and consists out of an
1650*e6303f32SDanilo Krummrich  * arbitrary amount of unmap operations.
1651*e6303f32SDanilo Krummrich  *
1652*e6303f32SDanilo Krummrich  * After the caller finished processing the returned &drm_gpuva_ops, they must
1653*e6303f32SDanilo Krummrich  * be freed with &drm_gpuva_ops_free.
1654*e6303f32SDanilo Krummrich  *
1655*e6303f32SDanilo Krummrich  * It is the callers responsibility to protect the GEMs GPUVA list against
1656*e6303f32SDanilo Krummrich  * concurrent access using the GEMs dma_resv lock.
1657*e6303f32SDanilo Krummrich  *
1658*e6303f32SDanilo Krummrich  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
1659*e6303f32SDanilo Krummrich  */
1660*e6303f32SDanilo Krummrich struct drm_gpuva_ops *
1661*e6303f32SDanilo Krummrich drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
1662*e6303f32SDanilo Krummrich 			       struct drm_gem_object *obj)
1663*e6303f32SDanilo Krummrich {
1664*e6303f32SDanilo Krummrich 	struct drm_gpuva_ops *ops;
1665*e6303f32SDanilo Krummrich 	struct drm_gpuva_op *op;
1666*e6303f32SDanilo Krummrich 	struct drm_gpuva *va;
1667*e6303f32SDanilo Krummrich 	int ret;
1668*e6303f32SDanilo Krummrich 
1669*e6303f32SDanilo Krummrich 	drm_gem_gpuva_assert_lock_held(obj);
1670*e6303f32SDanilo Krummrich 
1671*e6303f32SDanilo Krummrich 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
1672*e6303f32SDanilo Krummrich 	if (!ops)
1673*e6303f32SDanilo Krummrich 		return ERR_PTR(-ENOMEM);
1674*e6303f32SDanilo Krummrich 
1675*e6303f32SDanilo Krummrich 	INIT_LIST_HEAD(&ops->list);
1676*e6303f32SDanilo Krummrich 
1677*e6303f32SDanilo Krummrich 	drm_gem_for_each_gpuva(va, obj) {
1678*e6303f32SDanilo Krummrich 		op = gpuva_op_alloc(mgr);
1679*e6303f32SDanilo Krummrich 		if (!op) {
1680*e6303f32SDanilo Krummrich 			ret = -ENOMEM;
1681*e6303f32SDanilo Krummrich 			goto err_free_ops;
1682*e6303f32SDanilo Krummrich 		}
1683*e6303f32SDanilo Krummrich 
1684*e6303f32SDanilo Krummrich 		op->op = DRM_GPUVA_OP_UNMAP;
1685*e6303f32SDanilo Krummrich 		op->unmap.va = va;
1686*e6303f32SDanilo Krummrich 		list_add_tail(&op->entry, &ops->list);
1687*e6303f32SDanilo Krummrich 	}
1688*e6303f32SDanilo Krummrich 
1689*e6303f32SDanilo Krummrich 	return ops;
1690*e6303f32SDanilo Krummrich 
1691*e6303f32SDanilo Krummrich err_free_ops:
1692*e6303f32SDanilo Krummrich 	drm_gpuva_ops_free(mgr, ops);
1693*e6303f32SDanilo Krummrich 	return ERR_PTR(ret);
1694*e6303f32SDanilo Krummrich }
1695*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_gem_unmap_ops_create);
1696*e6303f32SDanilo Krummrich 
1697*e6303f32SDanilo Krummrich /**
1698*e6303f32SDanilo Krummrich  * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
1699*e6303f32SDanilo Krummrich  * @mgr: the &drm_gpuva_manager the ops were created for
1700*e6303f32SDanilo Krummrich  * @ops: the &drm_gpuva_ops to free
1701*e6303f32SDanilo Krummrich  *
1702*e6303f32SDanilo Krummrich  * Frees the given &drm_gpuva_ops structure including all the ops associated
1703*e6303f32SDanilo Krummrich  * with it.
1704*e6303f32SDanilo Krummrich  */
1705*e6303f32SDanilo Krummrich void
1706*e6303f32SDanilo Krummrich drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
1707*e6303f32SDanilo Krummrich 		   struct drm_gpuva_ops *ops)
1708*e6303f32SDanilo Krummrich {
1709*e6303f32SDanilo Krummrich 	struct drm_gpuva_op *op, *next;
1710*e6303f32SDanilo Krummrich 
1711*e6303f32SDanilo Krummrich 	drm_gpuva_for_each_op_safe(op, next, ops) {
1712*e6303f32SDanilo Krummrich 		list_del(&op->entry);
1713*e6303f32SDanilo Krummrich 
1714*e6303f32SDanilo Krummrich 		if (op->op == DRM_GPUVA_OP_REMAP) {
1715*e6303f32SDanilo Krummrich 			kfree(op->remap.prev);
1716*e6303f32SDanilo Krummrich 			kfree(op->remap.next);
1717*e6303f32SDanilo Krummrich 			kfree(op->remap.unmap);
1718*e6303f32SDanilo Krummrich 		}
1719*e6303f32SDanilo Krummrich 
1720*e6303f32SDanilo Krummrich 		gpuva_op_free(mgr, op);
1721*e6303f32SDanilo Krummrich 	}
1722*e6303f32SDanilo Krummrich 
1723*e6303f32SDanilo Krummrich 	kfree(ops);
1724*e6303f32SDanilo Krummrich }
1725*e6303f32SDanilo Krummrich EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
1726