xref: /openbmc/linux/drivers/gpu/drm/i915/i915_vma_resource.h (revision 0791faebfe750292a8a842b64795a390ca4a3b51)
1e1a4bbb6SThomas Hellström /* SPDX-License-Identifier: MIT */
2e1a4bbb6SThomas Hellström /*
3e1a4bbb6SThomas Hellström  * Copyright © 2021 Intel Corporation
4e1a4bbb6SThomas Hellström  */
5e1a4bbb6SThomas Hellström 
6e1a4bbb6SThomas Hellström #ifndef __I915_VMA_RESOURCE_H__
7e1a4bbb6SThomas Hellström #define __I915_VMA_RESOURCE_H__
8e1a4bbb6SThomas Hellström 
9e1a4bbb6SThomas Hellström #include <linux/dma-fence.h>
10e1a4bbb6SThomas Hellström #include <linux/refcount.h>
11e1a4bbb6SThomas Hellström 
1239a2bd34SThomas Hellström #include "i915_gem.h"
1360dc43d1SThomas Hellström #include "i915_scatterlist.h"
142f6b90daSThomas Hellström #include "i915_sw_fence.h"
152f6b90daSThomas Hellström #include "intel_runtime_pm.h"
1639a2bd34SThomas Hellström 
1760dc43d1SThomas Hellström struct intel_memory_region;
1860dc43d1SThomas Hellström 
1939a2bd34SThomas Hellström struct i915_page_sizes {
2039a2bd34SThomas Hellström 	/**
2139a2bd34SThomas Hellström 	 * The sg mask of the pages sg_table. i.e the mask of
2239a2bd34SThomas Hellström 	 * the lengths for each sg entry.
2339a2bd34SThomas Hellström 	 */
2439a2bd34SThomas Hellström 	unsigned int phys;
2539a2bd34SThomas Hellström 
2639a2bd34SThomas Hellström 	/**
2739a2bd34SThomas Hellström 	 * The gtt page sizes we are allowed to use given the
2839a2bd34SThomas Hellström 	 * sg mask and the supported page sizes. This will
2939a2bd34SThomas Hellström 	 * express the smallest unit we can use for the whole
3039a2bd34SThomas Hellström 	 * object, as well as the larger sizes we may be able
3139a2bd34SThomas Hellström 	 * to use opportunistically.
3239a2bd34SThomas Hellström 	 */
3339a2bd34SThomas Hellström 	unsigned int sg;
3439a2bd34SThomas Hellström };
3539a2bd34SThomas Hellström 
36e1a4bbb6SThomas Hellström /**
37*e9711213SJani Nikula  * struct i915_vma_bindinfo - Information needed for async bind
38*e9711213SJani Nikula  * only but that can be dropped after the bind has taken place.
39*e9711213SJani Nikula  * Consider making this a separate argument to the bind_vma
40*e9711213SJani Nikula  * op, coalescing with other arguments like vm, stash, cache_level
41*e9711213SJani Nikula  * and flags
42*e9711213SJani Nikula  * @pages: The pages sg-table.
43*e9711213SJani Nikula  * @page_sizes: Page sizes of the pages.
44*e9711213SJani Nikula  * @pages_rsgt: Refcounted sg-table when delayed object destruction
45*e9711213SJani Nikula  * is supported. May be NULL.
46*e9711213SJani Nikula  * @readonly: Whether the vma should be bound read-only.
47*e9711213SJani Nikula  * @lmem: Whether the vma points to lmem.
48*e9711213SJani Nikula  */
49*e9711213SJani Nikula struct i915_vma_bindinfo {
50*e9711213SJani Nikula 	struct sg_table *pages;
51*e9711213SJani Nikula 	struct i915_page_sizes page_sizes;
52*e9711213SJani Nikula 	struct i915_refct_sgt *pages_rsgt;
53*e9711213SJani Nikula 	bool readonly:1;
54*e9711213SJani Nikula 	bool lmem:1;
55*e9711213SJani Nikula };
56*e9711213SJani Nikula 
57*e9711213SJani Nikula /**
58e1a4bbb6SThomas Hellström  * struct i915_vma_resource - Snapshotted unbind information.
59e1a4bbb6SThomas Hellström  * @unbind_fence: Fence to mark unbinding complete. Note that this fence
60e1a4bbb6SThomas Hellström  * is not considered published until unbind is scheduled, and as such it
61e1a4bbb6SThomas Hellström  * is illegal to access this fence before scheduled unbind other than
62e1a4bbb6SThomas Hellström  * for refcounting.
63e1a4bbb6SThomas Hellström  * @lock: The @unbind_fence lock.
64e1a4bbb6SThomas Hellström  * @hold_count: Number of holders blocking the fence from finishing.
65e1a4bbb6SThomas Hellström  * The vma itself is keeping a hold, which is released when unbind
66e1a4bbb6SThomas Hellström  * is scheduled.
672f6b90daSThomas Hellström  * @work: Work struct for deferred unbind work.
682f6b90daSThomas Hellström  * @chain: Pointer to struct i915_sw_fence used to await dependencies.
692f6b90daSThomas Hellström  * @rb: Rb node for the vm's pending unbind interval tree.
702f6b90daSThomas Hellström  * @__subtree_last: Interval tree private member.
7128487ecbSJani Nikula  * @wakeref: wakeref.
722f6b90daSThomas Hellström  * @vm: non-refcounted pointer to the vm. This is for internal use only and
732f6b90daSThomas Hellström  * this member is cleared after vm_resource unbind.
7460dc43d1SThomas Hellström  * @mr: The memory region of the object pointed to by the vma.
752f6b90daSThomas Hellström  * @ops: Pointer to the backend i915_vma_ops.
7639a2bd34SThomas Hellström  * @private: Bind backend private info.
778e4ee5e8SChris Wilson  * @start: Offset into the address space of bind range start. Note that
788e4ee5e8SChris Wilson  * this is after any padding that might have been allocated.
798e4ee5e8SChris Wilson  * @node_size: Size of the allocated range manager node with padding
808e4ee5e8SChris Wilson  * subtracted.
8139a2bd34SThomas Hellström  * @vma_size: Bind size.
8261102251SChris Wilson  * @guard: The size of guard area preceding and trailing the bind.
8339a2bd34SThomas Hellström  * @page_sizes_gtt: Resulting page sizes from the bind operation.
8439a2bd34SThomas Hellström  * @bound_flags: Flags indicating binding status.
8539a2bd34SThomas Hellström  * @allocated: Backend private data. TODO: Should move into @private.
8660dc43d1SThomas Hellström  * @immediate_unbind: Unbind can be done immediately and doesn't need to be
8760dc43d1SThomas Hellström  * deferred to a work item awaiting unsignaled fences. This is a hack.
8860dc43d1SThomas Hellström  * (dma_fence_work uses a fence flag for this, but this seems slightly
8960dc43d1SThomas Hellström  * cleaner).
90e1a7ab4fSThomas Hellström  * @needs_wakeref: Whether a wakeref is needed during unbind. Since we can't
91e1a7ab4fSThomas Hellström  * take a wakeref in the dma-fence signalling critical path, it needs to be
92e1a7ab4fSThomas Hellström  * taken when the unbind is scheduled.
93e1a7ab4fSThomas Hellström  * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
94e1a7ab4fSThomas Hellström  * needs to be skipped for unbind.
955d36acb7SChris Wilson  * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
96e1a4bbb6SThomas Hellström  *
97e1a4bbb6SThomas Hellström  * The lifetime of a struct i915_vma_resource is from a binding request to
98e1a4bbb6SThomas Hellström  * the actual possible asynchronous unbind has completed.
99e1a4bbb6SThomas Hellström  */
100e1a4bbb6SThomas Hellström struct i915_vma_resource {
101e1a4bbb6SThomas Hellström 	struct dma_fence unbind_fence;
102e1a4bbb6SThomas Hellström 	/* See above for description of the lock. */
103e1a4bbb6SThomas Hellström 	spinlock_t lock;
104e1a4bbb6SThomas Hellström 	refcount_t hold_count;
1052f6b90daSThomas Hellström 	struct work_struct work;
1062f6b90daSThomas Hellström 	struct i915_sw_fence chain;
1072f6b90daSThomas Hellström 	struct rb_node rb;
1082f6b90daSThomas Hellström 	u64 __subtree_last;
1092f6b90daSThomas Hellström 	struct i915_address_space *vm;
1102f6b90daSThomas Hellström 	intel_wakeref_t wakeref;
11139a2bd34SThomas Hellström 
11239a2bd34SThomas Hellström 	/**
113*e9711213SJani Nikula 	 * @bi: Information needed for async bind only but that can be dropped
114*e9711213SJani Nikula 	 * after the bind has taken place.
115*e9711213SJani Nikula 	 *
116*e9711213SJani Nikula 	 * Consider making this a separate argument to the bind_vma op,
117*e9711213SJani Nikula 	 * coalescing with other arguments like vm, stash, cache_level and flags
11839a2bd34SThomas Hellström 	 */
119*e9711213SJani Nikula 	struct i915_vma_bindinfo bi;
12039a2bd34SThomas Hellström 
12160dc43d1SThomas Hellström #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
12260dc43d1SThomas Hellström 	struct intel_memory_region *mr;
12360dc43d1SThomas Hellström #endif
1242f6b90daSThomas Hellström 	const struct i915_vma_ops *ops;
12539a2bd34SThomas Hellström 	void *private;
12639a2bd34SThomas Hellström 	u64 start;
12739a2bd34SThomas Hellström 	u64 node_size;
12839a2bd34SThomas Hellström 	u64 vma_size;
12961102251SChris Wilson 	u32 guard;
13039a2bd34SThomas Hellström 	u32 page_sizes_gtt;
1312f6b90daSThomas Hellström 
13239a2bd34SThomas Hellström 	u32 bound_flags;
13339a2bd34SThomas Hellström 	bool allocated:1;
1342f6b90daSThomas Hellström 	bool immediate_unbind:1;
1352f6b90daSThomas Hellström 	bool needs_wakeref:1;
136e1a7ab4fSThomas Hellström 	bool skip_pte_rewrite:1;
1375d36acb7SChris Wilson 
1385d36acb7SChris Wilson 	u32 *tlb;
139e1a4bbb6SThomas Hellström };
140e1a4bbb6SThomas Hellström 
141e1a4bbb6SThomas Hellström bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
142e1a4bbb6SThomas Hellström 			    bool *lockdep_cookie);
143e1a4bbb6SThomas Hellström 
144e1a4bbb6SThomas Hellström void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
145e1a4bbb6SThomas Hellström 			      bool lockdep_cookie);
146e1a4bbb6SThomas Hellström 
147e1a4bbb6SThomas Hellström struct i915_vma_resource *i915_vma_resource_alloc(void);
148e1a4bbb6SThomas Hellström 
1492f6b90daSThomas Hellström void i915_vma_resource_free(struct i915_vma_resource *vma_res);
1502f6b90daSThomas Hellström 
1515d36acb7SChris Wilson struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
1525d36acb7SChris Wilson 					   u32 *tlb);
153e1a4bbb6SThomas Hellström 
15439a2bd34SThomas Hellström void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
15539a2bd34SThomas Hellström 
156e1a4bbb6SThomas Hellström /**
157e1a4bbb6SThomas Hellström  * i915_vma_resource_get - Take a reference on a vma resource
158e1a4bbb6SThomas Hellström  * @vma_res: The vma resource on which to take a reference.
159e1a4bbb6SThomas Hellström  *
160e1a4bbb6SThomas Hellström  * Return: The @vma_res pointer
161e1a4bbb6SThomas Hellström  */
162e1a4bbb6SThomas Hellström static inline struct i915_vma_resource
i915_vma_resource_get(struct i915_vma_resource * vma_res)163e1a4bbb6SThomas Hellström *i915_vma_resource_get(struct i915_vma_resource *vma_res)
164e1a4bbb6SThomas Hellström {
165e1a4bbb6SThomas Hellström 	dma_fence_get(&vma_res->unbind_fence);
166e1a4bbb6SThomas Hellström 	return vma_res;
167e1a4bbb6SThomas Hellström }
168e1a4bbb6SThomas Hellström 
169e1a4bbb6SThomas Hellström /**
170e1a4bbb6SThomas Hellström  * i915_vma_resource_put - Release a reference to a struct i915_vma_resource
171e1a4bbb6SThomas Hellström  * @vma_res: The resource
172e1a4bbb6SThomas Hellström  */
i915_vma_resource_put(struct i915_vma_resource * vma_res)173e1a4bbb6SThomas Hellström static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
174e1a4bbb6SThomas Hellström {
175e1a4bbb6SThomas Hellström 	dma_fence_put(&vma_res->unbind_fence);
176e1a4bbb6SThomas Hellström }
177e1a4bbb6SThomas Hellström 
17839a2bd34SThomas Hellström /**
17939a2bd34SThomas Hellström  * i915_vma_resource_init - Initialize a vma resource.
18039a2bd34SThomas Hellström  * @vma_res: The vma resource to initialize
1812f6b90daSThomas Hellström  * @vm: Pointer to the vm.
18239a2bd34SThomas Hellström  * @pages: The pages sg-table.
18339a2bd34SThomas Hellström  * @page_sizes: Page sizes of the pages.
18460dc43d1SThomas Hellström  * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with
18560dc43d1SThomas Hellström  * delayed destruction.
18639a2bd34SThomas Hellström  * @readonly: Whether the vma should be bound read-only.
18739a2bd34SThomas Hellström  * @lmem: Whether the vma points to lmem.
18860dc43d1SThomas Hellström  * @mr: The memory region of the object the vma points to.
1892f6b90daSThomas Hellström  * @ops: The backend ops.
19039a2bd34SThomas Hellström  * @private: Bind backend private info.
1918e4ee5e8SChris Wilson  * @start: Offset into the address space of bind range start after padding.
1928e4ee5e8SChris Wilson  * @node_size: Size of the allocated range manager node minus padding.
19339a2bd34SThomas Hellström  * @size: Bind size.
19461102251SChris Wilson  * @guard: The size of the guard area preceding and trailing the bind.
19539a2bd34SThomas Hellström  *
19639a2bd34SThomas Hellström  * Initializes a vma resource allocated using i915_vma_resource_alloc().
19739a2bd34SThomas Hellström  * The reason for having separate allocate and initialize function is that
19839a2bd34SThomas Hellström  * initialization may need to be performed from under a lock where
19939a2bd34SThomas Hellström  * allocation is not allowed.
20039a2bd34SThomas Hellström  */
i915_vma_resource_init(struct i915_vma_resource * vma_res,struct i915_address_space * vm,struct sg_table * pages,const struct i915_page_sizes * page_sizes,struct i915_refct_sgt * pages_rsgt,bool readonly,bool lmem,struct intel_memory_region * mr,const struct i915_vma_ops * ops,void * private,u64 start,u64 node_size,u64 size,u32 guard)20139a2bd34SThomas Hellström static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
2022f6b90daSThomas Hellström 					  struct i915_address_space *vm,
20339a2bd34SThomas Hellström 					  struct sg_table *pages,
20439a2bd34SThomas Hellström 					  const struct i915_page_sizes *page_sizes,
20560dc43d1SThomas Hellström 					  struct i915_refct_sgt *pages_rsgt,
20639a2bd34SThomas Hellström 					  bool readonly,
20739a2bd34SThomas Hellström 					  bool lmem,
20860dc43d1SThomas Hellström 					  struct intel_memory_region *mr,
2092f6b90daSThomas Hellström 					  const struct i915_vma_ops *ops,
21039a2bd34SThomas Hellström 					  void *private,
21139a2bd34SThomas Hellström 					  u64 start,
21239a2bd34SThomas Hellström 					  u64 node_size,
21361102251SChris Wilson 					  u64 size,
21461102251SChris Wilson 					  u32 guard)
21539a2bd34SThomas Hellström {
21639a2bd34SThomas Hellström 	__i915_vma_resource_init(vma_res);
2172f6b90daSThomas Hellström 	vma_res->vm = vm;
21839a2bd34SThomas Hellström 	vma_res->bi.pages = pages;
21939a2bd34SThomas Hellström 	vma_res->bi.page_sizes = *page_sizes;
22060dc43d1SThomas Hellström 	if (pages_rsgt)
22160dc43d1SThomas Hellström 		vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt);
22239a2bd34SThomas Hellström 	vma_res->bi.readonly = readonly;
22339a2bd34SThomas Hellström 	vma_res->bi.lmem = lmem;
22460dc43d1SThomas Hellström #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
22560dc43d1SThomas Hellström 	vma_res->mr = mr;
22660dc43d1SThomas Hellström #endif
2272f6b90daSThomas Hellström 	vma_res->ops = ops;
22839a2bd34SThomas Hellström 	vma_res->private = private;
22939a2bd34SThomas Hellström 	vma_res->start = start;
23039a2bd34SThomas Hellström 	vma_res->node_size = node_size;
23139a2bd34SThomas Hellström 	vma_res->vma_size = size;
23261102251SChris Wilson 	vma_res->guard = guard;
23339a2bd34SThomas Hellström }
23439a2bd34SThomas Hellström 
i915_vma_resource_fini(struct i915_vma_resource * vma_res)23539a2bd34SThomas Hellström static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
23639a2bd34SThomas Hellström {
23739a2bd34SThomas Hellström 	GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1);
23860dc43d1SThomas Hellström 	if (vma_res->bi.pages_rsgt)
23960dc43d1SThomas Hellström 		i915_refct_sgt_put(vma_res->bi.pages_rsgt);
2402f6b90daSThomas Hellström 	i915_sw_fence_fini(&vma_res->chain);
24139a2bd34SThomas Hellström }
242e1a4bbb6SThomas Hellström 
2432f6b90daSThomas Hellström int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
2442f6b90daSThomas Hellström 				    u64 first,
2452f6b90daSThomas Hellström 				    u64 last,
2462f6b90daSThomas Hellström 				    bool intr);
2472f6b90daSThomas Hellström 
2482f6b90daSThomas Hellström int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
2492f6b90daSThomas Hellström 				     struct i915_sw_fence *sw_fence,
2502f6b90daSThomas Hellström 				     u64 first,
2512f6b90daSThomas Hellström 				     u64 last,
2522f6b90daSThomas Hellström 				     bool intr,
2532f6b90daSThomas Hellström 				     gfp_t gfp);
2542f6b90daSThomas Hellström 
2552f6b90daSThomas Hellström void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
2562f6b90daSThomas Hellström 
2572f6b90daSThomas Hellström void i915_vma_resource_module_exit(void);
2582f6b90daSThomas Hellström 
2592f6b90daSThomas Hellström int i915_vma_resource_module_init(void);
2602f6b90daSThomas Hellström 
261e1a4bbb6SThomas Hellström #endif
262