1*e6303f32SDanilo Krummrich /* SPDX-License-Identifier: GPL-2.0-only */
2*e6303f32SDanilo Krummrich
3*e6303f32SDanilo Krummrich #ifndef __DRM_GPUVA_MGR_H__
4*e6303f32SDanilo Krummrich #define __DRM_GPUVA_MGR_H__
5*e6303f32SDanilo Krummrich
6*e6303f32SDanilo Krummrich /*
7*e6303f32SDanilo Krummrich * Copyright (c) 2022 Red Hat.
8*e6303f32SDanilo Krummrich *
9*e6303f32SDanilo Krummrich * Permission is hereby granted, free of charge, to any person obtaining a
10*e6303f32SDanilo Krummrich * copy of this software and associated documentation files (the "Software"),
11*e6303f32SDanilo Krummrich * to deal in the Software without restriction, including without limitation
12*e6303f32SDanilo Krummrich * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13*e6303f32SDanilo Krummrich * and/or sell copies of the Software, and to permit persons to whom the
14*e6303f32SDanilo Krummrich * Software is furnished to do so, subject to the following conditions:
15*e6303f32SDanilo Krummrich *
16*e6303f32SDanilo Krummrich * The above copyright notice and this permission notice shall be included in
17*e6303f32SDanilo Krummrich * all copies or substantial portions of the Software.
18*e6303f32SDanilo Krummrich *
19*e6303f32SDanilo Krummrich * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20*e6303f32SDanilo Krummrich * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21*e6303f32SDanilo Krummrich * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22*e6303f32SDanilo Krummrich * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23*e6303f32SDanilo Krummrich * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24*e6303f32SDanilo Krummrich * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25*e6303f32SDanilo Krummrich * OTHER DEALINGS IN THE SOFTWARE.
26*e6303f32SDanilo Krummrich */
27*e6303f32SDanilo Krummrich
28*e6303f32SDanilo Krummrich #include <linux/list.h>
29*e6303f32SDanilo Krummrich #include <linux/rbtree.h>
30*e6303f32SDanilo Krummrich #include <linux/types.h>
31*e6303f32SDanilo Krummrich
32*e6303f32SDanilo Krummrich #include <drm/drm_gem.h>
33*e6303f32SDanilo Krummrich
34*e6303f32SDanilo Krummrich struct drm_gpuva_manager;
35*e6303f32SDanilo Krummrich struct drm_gpuva_fn_ops;
36*e6303f32SDanilo Krummrich
37*e6303f32SDanilo Krummrich /**
38*e6303f32SDanilo Krummrich * enum drm_gpuva_flags - flags for struct drm_gpuva
39*e6303f32SDanilo Krummrich */
40*e6303f32SDanilo Krummrich enum drm_gpuva_flags {
41*e6303f32SDanilo Krummrich /**
42*e6303f32SDanilo Krummrich * @DRM_GPUVA_INVALIDATED:
43*e6303f32SDanilo Krummrich *
44*e6303f32SDanilo Krummrich * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
45*e6303f32SDanilo Krummrich */
46*e6303f32SDanilo Krummrich DRM_GPUVA_INVALIDATED = (1 << 0),
47*e6303f32SDanilo Krummrich
48*e6303f32SDanilo Krummrich /**
49*e6303f32SDanilo Krummrich * @DRM_GPUVA_SPARSE:
50*e6303f32SDanilo Krummrich *
51*e6303f32SDanilo Krummrich * Flag indicating that the &drm_gpuva is a sparse mapping.
52*e6303f32SDanilo Krummrich */
53*e6303f32SDanilo Krummrich DRM_GPUVA_SPARSE = (1 << 1),
54*e6303f32SDanilo Krummrich
55*e6303f32SDanilo Krummrich /**
56*e6303f32SDanilo Krummrich * @DRM_GPUVA_USERBITS: user defined bits
57*e6303f32SDanilo Krummrich */
58*e6303f32SDanilo Krummrich DRM_GPUVA_USERBITS = (1 << 2),
59*e6303f32SDanilo Krummrich };
60*e6303f32SDanilo Krummrich
61*e6303f32SDanilo Krummrich /**
62*e6303f32SDanilo Krummrich * struct drm_gpuva - structure to track a GPU VA mapping
63*e6303f32SDanilo Krummrich *
64*e6303f32SDanilo Krummrich * This structure represents a GPU VA mapping and is associated with a
65*e6303f32SDanilo Krummrich * &drm_gpuva_manager.
66*e6303f32SDanilo Krummrich *
67*e6303f32SDanilo Krummrich * Typically, this structure is embedded in bigger driver structures.
68*e6303f32SDanilo Krummrich */
69*e6303f32SDanilo Krummrich struct drm_gpuva {
70*e6303f32SDanilo Krummrich /**
71*e6303f32SDanilo Krummrich * @mgr: the &drm_gpuva_manager this object is associated with
72*e6303f32SDanilo Krummrich */
73*e6303f32SDanilo Krummrich struct drm_gpuva_manager *mgr;
74*e6303f32SDanilo Krummrich
75*e6303f32SDanilo Krummrich /**
76*e6303f32SDanilo Krummrich * @flags: the &drm_gpuva_flags for this mapping
77*e6303f32SDanilo Krummrich */
78*e6303f32SDanilo Krummrich enum drm_gpuva_flags flags;
79*e6303f32SDanilo Krummrich
80*e6303f32SDanilo Krummrich /**
81*e6303f32SDanilo Krummrich * @va: structure containing the address and range of the &drm_gpuva
82*e6303f32SDanilo Krummrich */
83*e6303f32SDanilo Krummrich struct {
84*e6303f32SDanilo Krummrich /**
85*e6303f32SDanilo Krummrich * @addr: the start address
86*e6303f32SDanilo Krummrich */
87*e6303f32SDanilo Krummrich u64 addr;
88*e6303f32SDanilo Krummrich
89*e6303f32SDanilo Krummrich /*
90*e6303f32SDanilo Krummrich * @range: the range
91*e6303f32SDanilo Krummrich */
92*e6303f32SDanilo Krummrich u64 range;
93*e6303f32SDanilo Krummrich } va;
94*e6303f32SDanilo Krummrich
95*e6303f32SDanilo Krummrich /**
96*e6303f32SDanilo Krummrich * @gem: structure containing the &drm_gem_object and it's offset
97*e6303f32SDanilo Krummrich */
98*e6303f32SDanilo Krummrich struct {
99*e6303f32SDanilo Krummrich /**
100*e6303f32SDanilo Krummrich * @offset: the offset within the &drm_gem_object
101*e6303f32SDanilo Krummrich */
102*e6303f32SDanilo Krummrich u64 offset;
103*e6303f32SDanilo Krummrich
104*e6303f32SDanilo Krummrich /**
105*e6303f32SDanilo Krummrich * @obj: the mapped &drm_gem_object
106*e6303f32SDanilo Krummrich */
107*e6303f32SDanilo Krummrich struct drm_gem_object *obj;
108*e6303f32SDanilo Krummrich
109*e6303f32SDanilo Krummrich /**
110*e6303f32SDanilo Krummrich * @entry: the &list_head to attach this object to a &drm_gem_object
111*e6303f32SDanilo Krummrich */
112*e6303f32SDanilo Krummrich struct list_head entry;
113*e6303f32SDanilo Krummrich } gem;
114*e6303f32SDanilo Krummrich
115*e6303f32SDanilo Krummrich /**
116*e6303f32SDanilo Krummrich * @rb: structure containing data to store &drm_gpuvas in a rb-tree
117*e6303f32SDanilo Krummrich */
118*e6303f32SDanilo Krummrich struct {
119*e6303f32SDanilo Krummrich /**
120*e6303f32SDanilo Krummrich * @rb: the rb-tree node
121*e6303f32SDanilo Krummrich */
122*e6303f32SDanilo Krummrich struct rb_node node;
123*e6303f32SDanilo Krummrich
124*e6303f32SDanilo Krummrich /**
125*e6303f32SDanilo Krummrich * @entry: The &list_head to additionally connect &drm_gpuvas
126*e6303f32SDanilo Krummrich * in the same order they appear in the interval tree. This is
127*e6303f32SDanilo Krummrich * useful to keep iterating &drm_gpuvas from a start node found
128*e6303f32SDanilo Krummrich * through the rb-tree while doing modifications on the rb-tree
129*e6303f32SDanilo Krummrich * itself.
130*e6303f32SDanilo Krummrich */
131*e6303f32SDanilo Krummrich struct list_head entry;
132*e6303f32SDanilo Krummrich
133*e6303f32SDanilo Krummrich /**
134*e6303f32SDanilo Krummrich * @__subtree_last: needed by the interval tree, holding last-in-subtree
135*e6303f32SDanilo Krummrich */
136*e6303f32SDanilo Krummrich u64 __subtree_last;
137*e6303f32SDanilo Krummrich } rb;
138*e6303f32SDanilo Krummrich };
139*e6303f32SDanilo Krummrich
140*e6303f32SDanilo Krummrich int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va);
141*e6303f32SDanilo Krummrich void drm_gpuva_remove(struct drm_gpuva *va);
142*e6303f32SDanilo Krummrich
143*e6303f32SDanilo Krummrich void drm_gpuva_link(struct drm_gpuva *va);
144*e6303f32SDanilo Krummrich void drm_gpuva_unlink(struct drm_gpuva *va);
145*e6303f32SDanilo Krummrich
146*e6303f32SDanilo Krummrich struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr,
147*e6303f32SDanilo Krummrich u64 addr, u64 range);
148*e6303f32SDanilo Krummrich struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
149*e6303f32SDanilo Krummrich u64 addr, u64 range);
150*e6303f32SDanilo Krummrich struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start);
151*e6303f32SDanilo Krummrich struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end);
152*e6303f32SDanilo Krummrich
153*e6303f32SDanilo Krummrich bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range);
154*e6303f32SDanilo Krummrich
drm_gpuva_init(struct drm_gpuva * va,u64 addr,u64 range,struct drm_gem_object * obj,u64 offset)155*e6303f32SDanilo Krummrich static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
156*e6303f32SDanilo Krummrich struct drm_gem_object *obj, u64 offset)
157*e6303f32SDanilo Krummrich {
158*e6303f32SDanilo Krummrich va->va.addr = addr;
159*e6303f32SDanilo Krummrich va->va.range = range;
160*e6303f32SDanilo Krummrich va->gem.obj = obj;
161*e6303f32SDanilo Krummrich va->gem.offset = offset;
162*e6303f32SDanilo Krummrich }
163*e6303f32SDanilo Krummrich
164*e6303f32SDanilo Krummrich /**
165*e6303f32SDanilo Krummrich * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
166*e6303f32SDanilo Krummrich * invalidated
167*e6303f32SDanilo Krummrich * @va: the &drm_gpuva to set the invalidate flag for
168*e6303f32SDanilo Krummrich * @invalidate: indicates whether the &drm_gpuva is invalidated
169*e6303f32SDanilo Krummrich */
drm_gpuva_invalidate(struct drm_gpuva * va,bool invalidate)170*e6303f32SDanilo Krummrich static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
171*e6303f32SDanilo Krummrich {
172*e6303f32SDanilo Krummrich if (invalidate)
173*e6303f32SDanilo Krummrich va->flags |= DRM_GPUVA_INVALIDATED;
174*e6303f32SDanilo Krummrich else
175*e6303f32SDanilo Krummrich va->flags &= ~DRM_GPUVA_INVALIDATED;
176*e6303f32SDanilo Krummrich }
177*e6303f32SDanilo Krummrich
178*e6303f32SDanilo Krummrich /**
179*e6303f32SDanilo Krummrich * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
180*e6303f32SDanilo Krummrich * is invalidated
181*e6303f32SDanilo Krummrich * @va: the &drm_gpuva to check
182*e6303f32SDanilo Krummrich */
drm_gpuva_invalidated(struct drm_gpuva * va)183*e6303f32SDanilo Krummrich static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
184*e6303f32SDanilo Krummrich {
185*e6303f32SDanilo Krummrich return va->flags & DRM_GPUVA_INVALIDATED;
186*e6303f32SDanilo Krummrich }
187*e6303f32SDanilo Krummrich
188*e6303f32SDanilo Krummrich /**
189*e6303f32SDanilo Krummrich * struct drm_gpuva_manager - DRM GPU VA Manager
190*e6303f32SDanilo Krummrich *
191*e6303f32SDanilo Krummrich * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
192*e6303f32SDanilo Krummrich * &maple_tree structures. Typically, this structure is embedded in bigger
193*e6303f32SDanilo Krummrich * driver structures.
194*e6303f32SDanilo Krummrich *
195*e6303f32SDanilo Krummrich * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
196*e6303f32SDanilo Krummrich * pages.
197*e6303f32SDanilo Krummrich *
198*e6303f32SDanilo Krummrich * There should be one manager instance per GPU virtual address space.
199*e6303f32SDanilo Krummrich */
200*e6303f32SDanilo Krummrich struct drm_gpuva_manager {
201*e6303f32SDanilo Krummrich /**
202*e6303f32SDanilo Krummrich * @name: the name of the DRM GPU VA space
203*e6303f32SDanilo Krummrich */
204*e6303f32SDanilo Krummrich const char *name;
205*e6303f32SDanilo Krummrich
206*e6303f32SDanilo Krummrich /**
207*e6303f32SDanilo Krummrich * @mm_start: start of the VA space
208*e6303f32SDanilo Krummrich */
209*e6303f32SDanilo Krummrich u64 mm_start;
210*e6303f32SDanilo Krummrich
211*e6303f32SDanilo Krummrich /**
212*e6303f32SDanilo Krummrich * @mm_range: length of the VA space
213*e6303f32SDanilo Krummrich */
214*e6303f32SDanilo Krummrich u64 mm_range;
215*e6303f32SDanilo Krummrich
216*e6303f32SDanilo Krummrich /**
217*e6303f32SDanilo Krummrich * @rb: structures to track &drm_gpuva entries
218*e6303f32SDanilo Krummrich */
219*e6303f32SDanilo Krummrich struct {
220*e6303f32SDanilo Krummrich /**
221*e6303f32SDanilo Krummrich * @tree: the rb-tree to track GPU VA mappings
222*e6303f32SDanilo Krummrich */
223*e6303f32SDanilo Krummrich struct rb_root_cached tree;
224*e6303f32SDanilo Krummrich
225*e6303f32SDanilo Krummrich /**
226*e6303f32SDanilo Krummrich * @list: the &list_head to track GPU VA mappings
227*e6303f32SDanilo Krummrich */
228*e6303f32SDanilo Krummrich struct list_head list;
229*e6303f32SDanilo Krummrich } rb;
230*e6303f32SDanilo Krummrich
231*e6303f32SDanilo Krummrich /**
232*e6303f32SDanilo Krummrich * @kernel_alloc_node:
233*e6303f32SDanilo Krummrich *
234*e6303f32SDanilo Krummrich * &drm_gpuva representing the address space cutout reserved for
235*e6303f32SDanilo Krummrich * the kernel
236*e6303f32SDanilo Krummrich */
237*e6303f32SDanilo Krummrich struct drm_gpuva kernel_alloc_node;
238*e6303f32SDanilo Krummrich
239*e6303f32SDanilo Krummrich /**
240*e6303f32SDanilo Krummrich * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers
241*e6303f32SDanilo Krummrich */
242*e6303f32SDanilo Krummrich const struct drm_gpuva_fn_ops *ops;
243*e6303f32SDanilo Krummrich };
244*e6303f32SDanilo Krummrich
245*e6303f32SDanilo Krummrich void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
246*e6303f32SDanilo Krummrich const char *name,
247*e6303f32SDanilo Krummrich u64 start_offset, u64 range,
248*e6303f32SDanilo Krummrich u64 reserve_offset, u64 reserve_range,
249*e6303f32SDanilo Krummrich const struct drm_gpuva_fn_ops *ops);
250*e6303f32SDanilo Krummrich void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr);
251*e6303f32SDanilo Krummrich
252*e6303f32SDanilo Krummrich static inline struct drm_gpuva *
__drm_gpuva_next(struct drm_gpuva * va)253*e6303f32SDanilo Krummrich __drm_gpuva_next(struct drm_gpuva *va)
254*e6303f32SDanilo Krummrich {
255*e6303f32SDanilo Krummrich if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list))
256*e6303f32SDanilo Krummrich return list_next_entry(va, rb.entry);
257*e6303f32SDanilo Krummrich
258*e6303f32SDanilo Krummrich return NULL;
259*e6303f32SDanilo Krummrich }
260*e6303f32SDanilo Krummrich
261*e6303f32SDanilo Krummrich /**
262*e6303f32SDanilo Krummrich * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas
263*e6303f32SDanilo Krummrich * @va__: &drm_gpuva structure to assign to in each iteration step
264*e6303f32SDanilo Krummrich * @mgr__: &drm_gpuva_manager to walk over
265*e6303f32SDanilo Krummrich * @start__: starting offset, the first gpuva will overlap this
266*e6303f32SDanilo Krummrich * @end__: ending offset, the last gpuva will start before this (but may
267*e6303f32SDanilo Krummrich * overlap)
268*e6303f32SDanilo Krummrich *
269*e6303f32SDanilo Krummrich * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
270*e6303f32SDanilo Krummrich * between @start__ and @end__. It is implemented similarly to list_for_each(),
271*e6303f32SDanilo Krummrich * but is using the &drm_gpuva_manager's internal interval tree to accelerate
272*e6303f32SDanilo Krummrich * the search for the starting &drm_gpuva, and hence isn't safe against removal
273*e6303f32SDanilo Krummrich * of elements. It assumes that @end__ is within (or is the upper limit of) the
274*e6303f32SDanilo Krummrich * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's
275*e6303f32SDanilo Krummrich * @kernel_alloc_node.
276*e6303f32SDanilo Krummrich */
277*e6303f32SDanilo Krummrich #define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \
278*e6303f32SDanilo Krummrich for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \
279*e6303f32SDanilo Krummrich va__ && (va__->va.addr < (end__)); \
280*e6303f32SDanilo Krummrich va__ = __drm_gpuva_next(va__))
281*e6303f32SDanilo Krummrich
282*e6303f32SDanilo Krummrich /**
283*e6303f32SDanilo Krummrich * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of
284*e6303f32SDanilo Krummrich * &drm_gpuvas
285*e6303f32SDanilo Krummrich * @va__: &drm_gpuva to assign to in each iteration step
286*e6303f32SDanilo Krummrich * @next__: another &drm_gpuva to use as temporary storage
287*e6303f32SDanilo Krummrich * @mgr__: &drm_gpuva_manager to walk over
288*e6303f32SDanilo Krummrich * @start__: starting offset, the first gpuva will overlap this
289*e6303f32SDanilo Krummrich * @end__: ending offset, the last gpuva will start before this (but may
290*e6303f32SDanilo Krummrich * overlap)
291*e6303f32SDanilo Krummrich *
292*e6303f32SDanilo Krummrich * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
293*e6303f32SDanilo Krummrich * between @start__ and @end__. It is implemented similarly to
294*e6303f32SDanilo Krummrich * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval
295*e6303f32SDanilo Krummrich * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
296*e6303f32SDanilo Krummrich * against removal of elements. It assumes that @end__ is within (or is the
297*e6303f32SDanilo Krummrich * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the
298*e6303f32SDanilo Krummrich * &drm_gpuva_manager's @kernel_alloc_node.
299*e6303f32SDanilo Krummrich */
300*e6303f32SDanilo Krummrich #define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \
301*e6303f32SDanilo Krummrich for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \
302*e6303f32SDanilo Krummrich next__ = __drm_gpuva_next(va__); \
303*e6303f32SDanilo Krummrich va__ && (va__->va.addr < (end__)); \
304*e6303f32SDanilo Krummrich va__ = next__, next__ = __drm_gpuva_next(va__))
305*e6303f32SDanilo Krummrich
306*e6303f32SDanilo Krummrich /**
307*e6303f32SDanilo Krummrich * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas
308*e6303f32SDanilo Krummrich * @va__: &drm_gpuva to assign to in each iteration step
309*e6303f32SDanilo Krummrich * @mgr__: &drm_gpuva_manager to walk over
310*e6303f32SDanilo Krummrich *
311*e6303f32SDanilo Krummrich * This iterator walks over all &drm_gpuva structures associated with the given
312*e6303f32SDanilo Krummrich * &drm_gpuva_manager.
313*e6303f32SDanilo Krummrich */
314*e6303f32SDanilo Krummrich #define drm_gpuva_for_each_va(va__, mgr__) \
315*e6303f32SDanilo Krummrich list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry)
316*e6303f32SDanilo Krummrich
317*e6303f32SDanilo Krummrich /**
318*e6303f32SDanilo Krummrich * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas
319*e6303f32SDanilo Krummrich * @va__: &drm_gpuva to assign to in each iteration step
320*e6303f32SDanilo Krummrich * @next__: another &drm_gpuva to use as temporary storage
321*e6303f32SDanilo Krummrich * @mgr__: &drm_gpuva_manager to walk over
322*e6303f32SDanilo Krummrich *
323*e6303f32SDanilo Krummrich * This iterator walks over all &drm_gpuva structures associated with the given
324*e6303f32SDanilo Krummrich * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and
325*e6303f32SDanilo Krummrich * hence safe against the removal of elements.
326*e6303f32SDanilo Krummrich */
327*e6303f32SDanilo Krummrich #define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \
328*e6303f32SDanilo Krummrich list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry)
329*e6303f32SDanilo Krummrich
330*e6303f32SDanilo Krummrich /**
331*e6303f32SDanilo Krummrich * enum drm_gpuva_op_type - GPU VA operation type
332*e6303f32SDanilo Krummrich *
333*e6303f32SDanilo Krummrich * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager.
334*e6303f32SDanilo Krummrich */
335*e6303f32SDanilo Krummrich enum drm_gpuva_op_type {
336*e6303f32SDanilo Krummrich /**
337*e6303f32SDanilo Krummrich * @DRM_GPUVA_OP_MAP: the map op type
338*e6303f32SDanilo Krummrich */
339*e6303f32SDanilo Krummrich DRM_GPUVA_OP_MAP,
340*e6303f32SDanilo Krummrich
341*e6303f32SDanilo Krummrich /**
342*e6303f32SDanilo Krummrich * @DRM_GPUVA_OP_REMAP: the remap op type
343*e6303f32SDanilo Krummrich */
344*e6303f32SDanilo Krummrich DRM_GPUVA_OP_REMAP,
345*e6303f32SDanilo Krummrich
346*e6303f32SDanilo Krummrich /**
347*e6303f32SDanilo Krummrich * @DRM_GPUVA_OP_UNMAP: the unmap op type
348*e6303f32SDanilo Krummrich */
349*e6303f32SDanilo Krummrich DRM_GPUVA_OP_UNMAP,
350*e6303f32SDanilo Krummrich
351*e6303f32SDanilo Krummrich /**
352*e6303f32SDanilo Krummrich * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
353*e6303f32SDanilo Krummrich */
354*e6303f32SDanilo Krummrich DRM_GPUVA_OP_PREFETCH,
355*e6303f32SDanilo Krummrich };
356*e6303f32SDanilo Krummrich
357*e6303f32SDanilo Krummrich /**
358*e6303f32SDanilo Krummrich * struct drm_gpuva_op_map - GPU VA map operation
359*e6303f32SDanilo Krummrich *
360*e6303f32SDanilo Krummrich * This structure represents a single map operation generated by the
361*e6303f32SDanilo Krummrich * DRM GPU VA manager.
362*e6303f32SDanilo Krummrich */
363*e6303f32SDanilo Krummrich struct drm_gpuva_op_map {
364*e6303f32SDanilo Krummrich /**
365*e6303f32SDanilo Krummrich * @va: structure containing address and range of a map
366*e6303f32SDanilo Krummrich * operation
367*e6303f32SDanilo Krummrich */
368*e6303f32SDanilo Krummrich struct {
369*e6303f32SDanilo Krummrich /**
370*e6303f32SDanilo Krummrich * @addr: the base address of the new mapping
371*e6303f32SDanilo Krummrich */
372*e6303f32SDanilo Krummrich u64 addr;
373*e6303f32SDanilo Krummrich
374*e6303f32SDanilo Krummrich /**
375*e6303f32SDanilo Krummrich * @range: the range of the new mapping
376*e6303f32SDanilo Krummrich */
377*e6303f32SDanilo Krummrich u64 range;
378*e6303f32SDanilo Krummrich } va;
379*e6303f32SDanilo Krummrich
380*e6303f32SDanilo Krummrich /**
381*e6303f32SDanilo Krummrich * @gem: structure containing the &drm_gem_object and it's offset
382*e6303f32SDanilo Krummrich */
383*e6303f32SDanilo Krummrich struct {
384*e6303f32SDanilo Krummrich /**
385*e6303f32SDanilo Krummrich * @offset: the offset within the &drm_gem_object
386*e6303f32SDanilo Krummrich */
387*e6303f32SDanilo Krummrich u64 offset;
388*e6303f32SDanilo Krummrich
389*e6303f32SDanilo Krummrich /**
390*e6303f32SDanilo Krummrich * @obj: the &drm_gem_object to map
391*e6303f32SDanilo Krummrich */
392*e6303f32SDanilo Krummrich struct drm_gem_object *obj;
393*e6303f32SDanilo Krummrich } gem;
394*e6303f32SDanilo Krummrich };
395*e6303f32SDanilo Krummrich
396*e6303f32SDanilo Krummrich /**
397*e6303f32SDanilo Krummrich * struct drm_gpuva_op_unmap - GPU VA unmap operation
398*e6303f32SDanilo Krummrich *
399*e6303f32SDanilo Krummrich * This structure represents a single unmap operation generated by the
400*e6303f32SDanilo Krummrich * DRM GPU VA manager.
401*e6303f32SDanilo Krummrich */
402*e6303f32SDanilo Krummrich struct drm_gpuva_op_unmap {
403*e6303f32SDanilo Krummrich /**
404*e6303f32SDanilo Krummrich * @va: the &drm_gpuva to unmap
405*e6303f32SDanilo Krummrich */
406*e6303f32SDanilo Krummrich struct drm_gpuva *va;
407*e6303f32SDanilo Krummrich
408*e6303f32SDanilo Krummrich /**
409*e6303f32SDanilo Krummrich * @keep:
410*e6303f32SDanilo Krummrich *
411*e6303f32SDanilo Krummrich * Indicates whether this &drm_gpuva is physically contiguous with the
412*e6303f32SDanilo Krummrich * original mapping request.
413*e6303f32SDanilo Krummrich *
414*e6303f32SDanilo Krummrich * Optionally, if &keep is set, drivers may keep the actual page table
415*e6303f32SDanilo Krummrich * mappings for this &drm_gpuva, adding the missing page table entries
416*e6303f32SDanilo Krummrich * only and update the &drm_gpuva_manager accordingly.
417*e6303f32SDanilo Krummrich */
418*e6303f32SDanilo Krummrich bool keep;
419*e6303f32SDanilo Krummrich };
420*e6303f32SDanilo Krummrich
421*e6303f32SDanilo Krummrich /**
422*e6303f32SDanilo Krummrich * struct drm_gpuva_op_remap - GPU VA remap operation
423*e6303f32SDanilo Krummrich *
424*e6303f32SDanilo Krummrich * This represents a single remap operation generated by the DRM GPU VA manager.
425*e6303f32SDanilo Krummrich *
426*e6303f32SDanilo Krummrich * A remap operation is generated when an existing GPU VA mmapping is split up
427*e6303f32SDanilo Krummrich * by inserting a new GPU VA mapping or by partially unmapping existent
428*e6303f32SDanilo Krummrich * mapping(s), hence it consists of a maximum of two map and one unmap
429*e6303f32SDanilo Krummrich * operation.
430*e6303f32SDanilo Krummrich *
431*e6303f32SDanilo Krummrich * The @unmap operation takes care of removing the original existing mapping.
432*e6303f32SDanilo Krummrich * @prev is used to remap the preceding part, @next the subsequent part.
433*e6303f32SDanilo Krummrich *
434*e6303f32SDanilo Krummrich * If either a new mapping's start address is aligned with the start address
435*e6303f32SDanilo Krummrich * of the old mapping or the new mapping's end address is aligned with the
436*e6303f32SDanilo Krummrich * end address of the old mapping, either @prev or @next is NULL.
437*e6303f32SDanilo Krummrich *
438*e6303f32SDanilo Krummrich * Note, the reason for a dedicated remap operation, rather than arbitrary
439*e6303f32SDanilo Krummrich * unmap and map operations, is to give drivers the chance of extracting driver
440*e6303f32SDanilo Krummrich * specific data for creating the new mappings from the unmap operations's
441*e6303f32SDanilo Krummrich * &drm_gpuva structure which typically is embedded in larger driver specific
442*e6303f32SDanilo Krummrich * structures.
443*e6303f32SDanilo Krummrich */
444*e6303f32SDanilo Krummrich struct drm_gpuva_op_remap {
445*e6303f32SDanilo Krummrich /**
446*e6303f32SDanilo Krummrich * @prev: the preceding part of a split mapping
447*e6303f32SDanilo Krummrich */
448*e6303f32SDanilo Krummrich struct drm_gpuva_op_map *prev;
449*e6303f32SDanilo Krummrich
450*e6303f32SDanilo Krummrich /**
451*e6303f32SDanilo Krummrich * @next: the subsequent part of a split mapping
452*e6303f32SDanilo Krummrich */
453*e6303f32SDanilo Krummrich struct drm_gpuva_op_map *next;
454*e6303f32SDanilo Krummrich
455*e6303f32SDanilo Krummrich /**
456*e6303f32SDanilo Krummrich * @unmap: the unmap operation for the original existing mapping
457*e6303f32SDanilo Krummrich */
458*e6303f32SDanilo Krummrich struct drm_gpuva_op_unmap *unmap;
459*e6303f32SDanilo Krummrich };
460*e6303f32SDanilo Krummrich
461*e6303f32SDanilo Krummrich /**
462*e6303f32SDanilo Krummrich * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
463*e6303f32SDanilo Krummrich *
464*e6303f32SDanilo Krummrich * This structure represents a single prefetch operation generated by the
465*e6303f32SDanilo Krummrich * DRM GPU VA manager.
466*e6303f32SDanilo Krummrich */
467*e6303f32SDanilo Krummrich struct drm_gpuva_op_prefetch {
468*e6303f32SDanilo Krummrich /**
469*e6303f32SDanilo Krummrich * @va: the &drm_gpuva to prefetch
470*e6303f32SDanilo Krummrich */
471*e6303f32SDanilo Krummrich struct drm_gpuva *va;
472*e6303f32SDanilo Krummrich };
473*e6303f32SDanilo Krummrich
474*e6303f32SDanilo Krummrich /**
475*e6303f32SDanilo Krummrich * struct drm_gpuva_op - GPU VA operation
476*e6303f32SDanilo Krummrich *
477*e6303f32SDanilo Krummrich * This structure represents a single generic operation.
478*e6303f32SDanilo Krummrich *
479*e6303f32SDanilo Krummrich * The particular type of the operation is defined by @op.
480*e6303f32SDanilo Krummrich */
481*e6303f32SDanilo Krummrich struct drm_gpuva_op {
482*e6303f32SDanilo Krummrich /**
483*e6303f32SDanilo Krummrich * @entry:
484*e6303f32SDanilo Krummrich *
485*e6303f32SDanilo Krummrich * The &list_head used to distribute instances of this struct within
486*e6303f32SDanilo Krummrich * &drm_gpuva_ops.
487*e6303f32SDanilo Krummrich */
488*e6303f32SDanilo Krummrich struct list_head entry;
489*e6303f32SDanilo Krummrich
490*e6303f32SDanilo Krummrich /**
491*e6303f32SDanilo Krummrich * @op: the type of the operation
492*e6303f32SDanilo Krummrich */
493*e6303f32SDanilo Krummrich enum drm_gpuva_op_type op;
494*e6303f32SDanilo Krummrich
495*e6303f32SDanilo Krummrich union {
496*e6303f32SDanilo Krummrich /**
497*e6303f32SDanilo Krummrich * @map: the map operation
498*e6303f32SDanilo Krummrich */
499*e6303f32SDanilo Krummrich struct drm_gpuva_op_map map;
500*e6303f32SDanilo Krummrich
501*e6303f32SDanilo Krummrich /**
502*e6303f32SDanilo Krummrich * @remap: the remap operation
503*e6303f32SDanilo Krummrich */
504*e6303f32SDanilo Krummrich struct drm_gpuva_op_remap remap;
505*e6303f32SDanilo Krummrich
506*e6303f32SDanilo Krummrich /**
507*e6303f32SDanilo Krummrich * @unmap: the unmap operation
508*e6303f32SDanilo Krummrich */
509*e6303f32SDanilo Krummrich struct drm_gpuva_op_unmap unmap;
510*e6303f32SDanilo Krummrich
511*e6303f32SDanilo Krummrich /**
512*e6303f32SDanilo Krummrich * @prefetch: the prefetch operation
513*e6303f32SDanilo Krummrich */
514*e6303f32SDanilo Krummrich struct drm_gpuva_op_prefetch prefetch;
515*e6303f32SDanilo Krummrich };
516*e6303f32SDanilo Krummrich };
517*e6303f32SDanilo Krummrich
518*e6303f32SDanilo Krummrich /**
519*e6303f32SDanilo Krummrich * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
520*e6303f32SDanilo Krummrich */
521*e6303f32SDanilo Krummrich struct drm_gpuva_ops {
522*e6303f32SDanilo Krummrich /**
523*e6303f32SDanilo Krummrich * @list: the &list_head
524*e6303f32SDanilo Krummrich */
525*e6303f32SDanilo Krummrich struct list_head list;
526*e6303f32SDanilo Krummrich };
527*e6303f32SDanilo Krummrich
528*e6303f32SDanilo Krummrich /**
529*e6303f32SDanilo Krummrich * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
530*e6303f32SDanilo Krummrich * @op: &drm_gpuva_op to assign in each iteration step
531*e6303f32SDanilo Krummrich * @ops: &drm_gpuva_ops to walk
532*e6303f32SDanilo Krummrich *
533*e6303f32SDanilo Krummrich * This iterator walks over all ops within a given list of operations.
534*e6303f32SDanilo Krummrich */
535*e6303f32SDanilo Krummrich #define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
536*e6303f32SDanilo Krummrich
537*e6303f32SDanilo Krummrich /**
538*e6303f32SDanilo Krummrich * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
539*e6303f32SDanilo Krummrich * @op: &drm_gpuva_op to assign in each iteration step
540*e6303f32SDanilo Krummrich * @next: &next &drm_gpuva_op to store the next step
541*e6303f32SDanilo Krummrich * @ops: &drm_gpuva_ops to walk
542*e6303f32SDanilo Krummrich *
543*e6303f32SDanilo Krummrich * This iterator walks over all ops within a given list of operations. It is
544*e6303f32SDanilo Krummrich * implemented with list_for_each_safe(), so save against removal of elements.
545*e6303f32SDanilo Krummrich */
546*e6303f32SDanilo Krummrich #define drm_gpuva_for_each_op_safe(op, next, ops) \
547*e6303f32SDanilo Krummrich list_for_each_entry_safe(op, next, &(ops)->list, entry)
548*e6303f32SDanilo Krummrich
549*e6303f32SDanilo Krummrich /**
550*e6303f32SDanilo Krummrich * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
551*e6303f32SDanilo Krummrich * @op: &drm_gpuva_op to assign in each iteration step
552*e6303f32SDanilo Krummrich * @ops: &drm_gpuva_ops to walk
553*e6303f32SDanilo Krummrich *
554*e6303f32SDanilo Krummrich * This iterator walks over all ops within a given list of operations beginning
555*e6303f32SDanilo Krummrich * from the given operation in reverse order.
556*e6303f32SDanilo Krummrich */
557*e6303f32SDanilo Krummrich #define drm_gpuva_for_each_op_from_reverse(op, ops) \
558*e6303f32SDanilo Krummrich list_for_each_entry_from_reverse(op, &(ops)->list, entry)
559*e6303f32SDanilo Krummrich
560*e6303f32SDanilo Krummrich /**
561*e6303f32SDanilo Krummrich * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
562*e6303f32SDanilo Krummrich * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
563*e6303f32SDanilo Krummrich */
564*e6303f32SDanilo Krummrich #define drm_gpuva_first_op(ops) \
565*e6303f32SDanilo Krummrich list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
566*e6303f32SDanilo Krummrich
567*e6303f32SDanilo Krummrich /**
568*e6303f32SDanilo Krummrich * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
569*e6303f32SDanilo Krummrich * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
570*e6303f32SDanilo Krummrich */
571*e6303f32SDanilo Krummrich #define drm_gpuva_last_op(ops) \
572*e6303f32SDanilo Krummrich list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
573*e6303f32SDanilo Krummrich
574*e6303f32SDanilo Krummrich /**
575*e6303f32SDanilo Krummrich * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
576*e6303f32SDanilo Krummrich * @op: the current &drm_gpuva_op
577*e6303f32SDanilo Krummrich */
578*e6303f32SDanilo Krummrich #define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
579*e6303f32SDanilo Krummrich
580*e6303f32SDanilo Krummrich /**
581*e6303f32SDanilo Krummrich * drm_gpuva_next_op() - next &drm_gpuva_op in the list
582*e6303f32SDanilo Krummrich * @op: the current &drm_gpuva_op
583*e6303f32SDanilo Krummrich */
584*e6303f32SDanilo Krummrich #define drm_gpuva_next_op(op) list_next_entry(op, entry)
585*e6303f32SDanilo Krummrich
586*e6303f32SDanilo Krummrich struct drm_gpuva_ops *
587*e6303f32SDanilo Krummrich drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
588*e6303f32SDanilo Krummrich u64 addr, u64 range,
589*e6303f32SDanilo Krummrich struct drm_gem_object *obj, u64 offset);
590*e6303f32SDanilo Krummrich struct drm_gpuva_ops *
591*e6303f32SDanilo Krummrich drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
592*e6303f32SDanilo Krummrich u64 addr, u64 range);
593*e6303f32SDanilo Krummrich
594*e6303f32SDanilo Krummrich struct drm_gpuva_ops *
595*e6303f32SDanilo Krummrich drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
596*e6303f32SDanilo Krummrich u64 addr, u64 range);
597*e6303f32SDanilo Krummrich
598*e6303f32SDanilo Krummrich struct drm_gpuva_ops *
599*e6303f32SDanilo Krummrich drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
600*e6303f32SDanilo Krummrich struct drm_gem_object *obj);
601*e6303f32SDanilo Krummrich
602*e6303f32SDanilo Krummrich void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
603*e6303f32SDanilo Krummrich struct drm_gpuva_ops *ops);
604*e6303f32SDanilo Krummrich
drm_gpuva_init_from_op(struct drm_gpuva * va,struct drm_gpuva_op_map * op)605*e6303f32SDanilo Krummrich static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
606*e6303f32SDanilo Krummrich struct drm_gpuva_op_map *op)
607*e6303f32SDanilo Krummrich {
608*e6303f32SDanilo Krummrich drm_gpuva_init(va, op->va.addr, op->va.range,
609*e6303f32SDanilo Krummrich op->gem.obj, op->gem.offset);
610*e6303f32SDanilo Krummrich }
611*e6303f32SDanilo Krummrich
612*e6303f32SDanilo Krummrich /**
613*e6303f32SDanilo Krummrich * struct drm_gpuva_fn_ops - callbacks for split/merge steps
614*e6303f32SDanilo Krummrich *
615*e6303f32SDanilo Krummrich * This structure defines the callbacks used by &drm_gpuva_sm_map and
616*e6303f32SDanilo Krummrich * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap
617*e6303f32SDanilo Krummrich * operations to drivers.
618*e6303f32SDanilo Krummrich */
619*e6303f32SDanilo Krummrich struct drm_gpuva_fn_ops {
620*e6303f32SDanilo Krummrich /**
621*e6303f32SDanilo Krummrich * @op_alloc: called when the &drm_gpuva_manager allocates
622*e6303f32SDanilo Krummrich * a struct drm_gpuva_op
623*e6303f32SDanilo Krummrich *
624*e6303f32SDanilo Krummrich * Some drivers may want to embed struct drm_gpuva_op into driver
625*e6303f32SDanilo Krummrich * specific structures. By implementing this callback drivers can
626*e6303f32SDanilo Krummrich * allocate memory accordingly.
627*e6303f32SDanilo Krummrich *
628*e6303f32SDanilo Krummrich * This callback is optional.
629*e6303f32SDanilo Krummrich */
630*e6303f32SDanilo Krummrich struct drm_gpuva_op *(*op_alloc)(void);
631*e6303f32SDanilo Krummrich
632*e6303f32SDanilo Krummrich /**
633*e6303f32SDanilo Krummrich * @op_free: called when the &drm_gpuva_manager frees a
634*e6303f32SDanilo Krummrich * struct drm_gpuva_op
635*e6303f32SDanilo Krummrich *
636*e6303f32SDanilo Krummrich * Some drivers may want to embed struct drm_gpuva_op into driver
637*e6303f32SDanilo Krummrich * specific structures. By implementing this callback drivers can
638*e6303f32SDanilo Krummrich * free the previously allocated memory accordingly.
639*e6303f32SDanilo Krummrich *
640*e6303f32SDanilo Krummrich * This callback is optional.
641*e6303f32SDanilo Krummrich */
642*e6303f32SDanilo Krummrich void (*op_free)(struct drm_gpuva_op *op);
643*e6303f32SDanilo Krummrich
644*e6303f32SDanilo Krummrich /**
645*e6303f32SDanilo Krummrich * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the
646*e6303f32SDanilo Krummrich * mapping once all previous steps were completed
647*e6303f32SDanilo Krummrich *
648*e6303f32SDanilo Krummrich * The &priv pointer matches the one the driver passed to
649*e6303f32SDanilo Krummrich * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
650*e6303f32SDanilo Krummrich *
651*e6303f32SDanilo Krummrich * Can be NULL if &drm_gpuva_sm_map is used.
652*e6303f32SDanilo Krummrich */
653*e6303f32SDanilo Krummrich int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
654*e6303f32SDanilo Krummrich
655*e6303f32SDanilo Krummrich /**
656*e6303f32SDanilo Krummrich * @sm_step_remap: called from &drm_gpuva_sm_map and
657*e6303f32SDanilo Krummrich * &drm_gpuva_sm_unmap to split up an existent mapping
658*e6303f32SDanilo Krummrich *
659*e6303f32SDanilo Krummrich * This callback is called when existent mapping needs to be split up.
660*e6303f32SDanilo Krummrich * This is the case when either a newly requested mapping overlaps or
661*e6303f32SDanilo Krummrich * is enclosed by an existent mapping or a partial unmap of an existent
662*e6303f32SDanilo Krummrich * mapping is requested.
663*e6303f32SDanilo Krummrich *
664*e6303f32SDanilo Krummrich * The &priv pointer matches the one the driver passed to
665*e6303f32SDanilo Krummrich * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
666*e6303f32SDanilo Krummrich *
667*e6303f32SDanilo Krummrich * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
668*e6303f32SDanilo Krummrich * used.
669*e6303f32SDanilo Krummrich */
670*e6303f32SDanilo Krummrich int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
671*e6303f32SDanilo Krummrich
672*e6303f32SDanilo Krummrich /**
673*e6303f32SDanilo Krummrich * @sm_step_unmap: called from &drm_gpuva_sm_map and
674*e6303f32SDanilo Krummrich * &drm_gpuva_sm_unmap to unmap an existent mapping
675*e6303f32SDanilo Krummrich *
676*e6303f32SDanilo Krummrich * This callback is called when existent mapping needs to be unmapped.
677*e6303f32SDanilo Krummrich * This is the case when either a newly requested mapping encloses an
678*e6303f32SDanilo Krummrich * existent mapping or an unmap of an existent mapping is requested.
679*e6303f32SDanilo Krummrich *
680*e6303f32SDanilo Krummrich * The &priv pointer matches the one the driver passed to
681*e6303f32SDanilo Krummrich * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
682*e6303f32SDanilo Krummrich *
683*e6303f32SDanilo Krummrich * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
684*e6303f32SDanilo Krummrich * used.
685*e6303f32SDanilo Krummrich */
686*e6303f32SDanilo Krummrich int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
687*e6303f32SDanilo Krummrich };
688*e6303f32SDanilo Krummrich
689*e6303f32SDanilo Krummrich int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
690*e6303f32SDanilo Krummrich u64 addr, u64 range,
691*e6303f32SDanilo Krummrich struct drm_gem_object *obj, u64 offset);
692*e6303f32SDanilo Krummrich
693*e6303f32SDanilo Krummrich int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
694*e6303f32SDanilo Krummrich u64 addr, u64 range);
695*e6303f32SDanilo Krummrich
696*e6303f32SDanilo Krummrich void drm_gpuva_map(struct drm_gpuva_manager *mgr,
697*e6303f32SDanilo Krummrich struct drm_gpuva *va,
698*e6303f32SDanilo Krummrich struct drm_gpuva_op_map *op);
699*e6303f32SDanilo Krummrich
700*e6303f32SDanilo Krummrich void drm_gpuva_remap(struct drm_gpuva *prev,
701*e6303f32SDanilo Krummrich struct drm_gpuva *next,
702*e6303f32SDanilo Krummrich struct drm_gpuva_op_remap *op);
703*e6303f32SDanilo Krummrich
704*e6303f32SDanilo Krummrich void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
705*e6303f32SDanilo Krummrich
706*e6303f32SDanilo Krummrich #endif /* __DRM_GPUVA_MGR_H__ */
707