xref: /openbmc/linux/include/drm/ttm/ttm_device.h (revision 78c5335b)
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 
25 #ifndef _TTM_DEVICE_H_
26 #define _TTM_DEVICE_H_
27 
28 #include <linux/types.h>
29 #include <linux/workqueue.h>
30 #include <drm/ttm/ttm_resource.h>
31 #include <drm/ttm/ttm_pool.h>
32 
33 #define TTM_NUM_MEM_TYPES 8
34 
35 struct ttm_device;
36 struct ttm_placement;
37 struct ttm_buffer_object;
38 struct ttm_operation_ctx;
39 
40 /**
41  * struct ttm_global - Buffer object driver global data.
42  */
43 extern struct ttm_global {
44 
45 	/**
46 	 * @dummy_read_page: Pointer to a dummy page used for mapping requests
47 	 * of unpopulated pages. Constant after init.
48 	 */
49 	struct page *dummy_read_page;
50 
51 	/**
52 	 * @device_list: List of buffer object devices. Protected by
53 	 * ttm_global_mutex.
54 	 */
55 	struct list_head device_list;
56 
57 	/**
58 	 * @bo_count: Number of buffer objects allocated by devices.
59 	 */
60 	atomic_t bo_count;
61 } ttm_glob;
62 
63 struct ttm_device_funcs {
64 	/**
65 	 * ttm_tt_create
66 	 *
67 	 * @bo: The buffer object to create the ttm for.
68 	 * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
69 	 *
70 	 * Create a struct ttm_tt to back data with system memory pages.
71 	 * No pages are actually allocated.
72 	 * Returns:
73 	 * NULL: Out of memory.
74 	 */
75 	struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
76 					uint32_t page_flags);
77 
78 	/**
79 	 * ttm_tt_populate
80 	 *
81 	 * @ttm: The struct ttm_tt to contain the backing pages.
82 	 *
83 	 * Allocate all backing pages
84 	 * Returns:
85 	 * -ENOMEM: Out of memory.
86 	 */
87 	int (*ttm_tt_populate)(struct ttm_device *bdev,
88 			       struct ttm_tt *ttm,
89 			       struct ttm_operation_ctx *ctx);
90 
91 	/**
92 	 * ttm_tt_unpopulate
93 	 *
94 	 * @ttm: The struct ttm_tt to contain the backing pages.
95 	 *
96 	 * Free all backing page
97 	 */
98 	void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
99 				  struct ttm_tt *ttm);
100 
101 	/**
102 	 * ttm_tt_destroy
103 	 *
104 	 * @bdev: Pointer to a ttm device
105 	 * @ttm: Pointer to a struct ttm_tt.
106 	 *
107 	 * Destroy the backend. This will be call back from ttm_tt_destroy so
108 	 * don't call ttm_tt_destroy from the callback or infinite loop.
109 	 */
110 	void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
111 
112 	/**
113 	 * struct ttm_bo_driver member eviction_valuable
114 	 *
115 	 * @bo: the buffer object to be evicted
116 	 * @place: placement we need room for
117 	 *
118 	 * Check with the driver if it is valuable to evict a BO to make room
119 	 * for a certain placement.
120 	 */
121 	bool (*eviction_valuable)(struct ttm_buffer_object *bo,
122 				  const struct ttm_place *place);
123 	/**
124 	 * struct ttm_bo_driver member evict_flags:
125 	 *
126 	 * @bo: the buffer object to be evicted
127 	 *
128 	 * Return the bo flags for a buffer which is not mapped to the hardware.
129 	 * These will be placed in proposed_flags so that when the move is
130 	 * finished, they'll end up in bo->mem.flags
131 	 * This should not cause multihop evictions, and the core will warn
132 	 * if one is proposed.
133 	 */
134 
135 	void (*evict_flags)(struct ttm_buffer_object *bo,
136 			    struct ttm_placement *placement);
137 
138 	/**
139 	 * struct ttm_bo_driver member move:
140 	 *
141 	 * @bo: the buffer to move
142 	 * @evict: whether this motion is evicting the buffer from
143 	 * the graphics address space
144 	 * @ctx: context for this move with parameters
145 	 * @new_mem: the new memory region receiving the buffer
146 	 @ @hop: placement for driver directed intermediate hop
147 	 *
148 	 * Move a buffer between two memory regions.
149 	 * Returns errno -EMULTIHOP if driver requests a hop
150 	 */
151 	int (*move)(struct ttm_buffer_object *bo, bool evict,
152 		    struct ttm_operation_ctx *ctx,
153 		    struct ttm_resource *new_mem,
154 		    struct ttm_place *hop);
155 
156 	/**
157 	 * Hook to notify driver about a resource delete.
158 	 */
159 	void (*delete_mem_notify)(struct ttm_buffer_object *bo);
160 
161 	/**
162 	 * notify the driver that we're about to swap out this bo
163 	 */
164 	void (*swap_notify)(struct ttm_buffer_object *bo);
165 
166 	/**
167 	 * Driver callback on when mapping io memory (for bo_move_memcpy
168 	 * for instance). TTM will take care to call io_mem_free whenever
169 	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
170 	 * are balanced.
171 	 */
172 	int (*io_mem_reserve)(struct ttm_device *bdev,
173 			      struct ttm_resource *mem);
174 	void (*io_mem_free)(struct ttm_device *bdev,
175 			    struct ttm_resource *mem);
176 
177 	/**
178 	 * Return the pfn for a given page_offset inside the BO.
179 	 *
180 	 * @bo: the BO to look up the pfn for
181 	 * @page_offset: the offset to look up
182 	 */
183 	unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
184 				    unsigned long page_offset);
185 
186 	/**
187 	 * Read/write memory buffers for ptrace access
188 	 *
189 	 * @bo: the BO to access
190 	 * @offset: the offset from the start of the BO
191 	 * @buf: pointer to source/destination buffer
192 	 * @len: number of bytes to copy
193 	 * @write: whether to read (0) from or write (non-0) to BO
194 	 *
195 	 * If successful, this function should return the number of
196 	 * bytes copied, -EIO otherwise. If the number of bytes
197 	 * returned is < len, the function may be called again with
198 	 * the remainder of the buffer to copy.
199 	 */
200 	int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
201 			     void *buf, int len, int write);
202 
203 	/**
204 	 * struct ttm_bo_driver member del_from_lru_notify
205 	 *
206 	 * @bo: the buffer object deleted from lru
207 	 *
208 	 * notify driver that a BO was deleted from LRU.
209 	 */
210 	void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
211 
212 	/**
213 	 * Notify the driver that we're about to release a BO
214 	 *
215 	 * @bo: BO that is about to be released
216 	 *
217 	 * Gives the driver a chance to do any cleanup, including
218 	 * adding fences that may force a delayed delete
219 	 */
220 	void (*release_notify)(struct ttm_buffer_object *bo);
221 };
222 
223 /**
224  * struct ttm_device - Buffer object driver device-specific data.
225  */
226 struct ttm_device {
227 	/**
228 	 * @device_list: Our entry in the global device list.
229 	 * Constant after bo device init
230 	 */
231 	struct list_head device_list;
232 
233 	/**
234 	 * @funcs: Function table for the device.
235 	 * Constant after bo device init
236 	 */
237 	struct ttm_device_funcs *funcs;
238 
239 	/**
240 	 * @sysman: Resource manager for the system domain.
241 	 * Access via ttm_manager_type.
242 	 */
243 	struct ttm_resource_manager sysman;
244 
245 	/**
246 	 * @man_drv: An array of resource_managers, one per resource type.
247 	 */
248 	struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
249 
250 	/**
251 	 * @vma_manager: Address space manager for finding BOs to mmap.
252 	 */
253 	struct drm_vma_offset_manager *vma_manager;
254 
255 	/**
256 	 * @pool: page pool for the device.
257 	 */
258 	struct ttm_pool pool;
259 
260 	/**
261 	 * @lru_lock: Protection for the per manager LRU and ddestroy lists.
262 	 */
263 	spinlock_t lru_lock;
264 
265 	/**
266 	 * @ddestroy: Destroyed but not yet cleaned up buffer objects.
267 	 */
268 	struct list_head ddestroy;
269 
270 	/**
271 	 * @pinned: Buffer objects which are pinned and so not on any LRU list.
272 	 */
273 	struct list_head pinned;
274 
275 	/**
276 	 * @dev_mapping: A pointer to the struct address_space for invalidating
277 	 * CPU mappings on buffer move. Protected by load/unload sync.
278 	 */
279 	struct address_space *dev_mapping;
280 
281 	/**
282 	 * @wq: Work queue structure for the delayed delete workqueue.
283 	 */
284 	struct delayed_work wq;
285 };
286 
287 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
288 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
289 		       gfp_t gfp_flags);
290 
291 static inline struct ttm_resource_manager *
292 ttm_manager_type(struct ttm_device *bdev, int mem_type)
293 {
294 	BUILD_BUG_ON(__builtin_constant_p(mem_type)
295 		     && mem_type >= TTM_NUM_MEM_TYPES);
296 	return bdev->man_drv[mem_type];
297 }
298 
299 static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
300 					  struct ttm_resource_manager *manager)
301 {
302 	BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
303 	bdev->man_drv[type] = manager;
304 }
305 
306 int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
307 		    struct device *dev, struct address_space *mapping,
308 		    struct drm_vma_offset_manager *vma_manager,
309 		    bool use_dma_alloc, bool use_dma32);
310 void ttm_device_fini(struct ttm_device *bdev);
311 void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
312 
313 #endif
314