xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_tt.c (revision 9aab6601)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <linux/sched.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo_driver.h>
40 #include <drm/ttm/ttm_page_alloc.h>
41 
42 /**
43  * Allocates a ttm structure for the given BO.
44  */
45 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
46 {
47 	struct ttm_bo_device *bdev = bo->bdev;
48 	uint32_t page_flags = 0;
49 
50 	dma_resv_assert_held(bo->base.resv);
51 
52 	if (bo->ttm)
53 		return 0;
54 
55 	if (bdev->need_dma32)
56 		page_flags |= TTM_PAGE_FLAG_DMA32;
57 
58 	if (bdev->no_retry)
59 		page_flags |= TTM_PAGE_FLAG_NO_RETRY;
60 
61 	switch (bo->type) {
62 	case ttm_bo_type_device:
63 		if (zero_alloc)
64 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
65 		break;
66 	case ttm_bo_type_kernel:
67 		break;
68 	case ttm_bo_type_sg:
69 		page_flags |= TTM_PAGE_FLAG_SG;
70 		break;
71 	default:
72 		pr_err("Illegal buffer object type\n");
73 		return -EINVAL;
74 	}
75 
76 	bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
77 	if (unlikely(bo->ttm == NULL))
78 		return -ENOMEM;
79 
80 	return 0;
81 }
82 
83 /**
84  * Allocates storage for pointers to the pages that back the ttm.
85  */
86 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
87 {
88 	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
89 			GFP_KERNEL | __GFP_ZERO);
90 	if (!ttm->pages)
91 		return -ENOMEM;
92 	return 0;
93 }
94 
95 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
96 {
97 	ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
98 					  sizeof(*ttm->ttm.pages) +
99 					  sizeof(*ttm->dma_address),
100 					  GFP_KERNEL | __GFP_ZERO);
101 	if (!ttm->ttm.pages)
102 		return -ENOMEM;
103 	ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
104 	return 0;
105 }
106 
107 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
108 {
109 	ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
110 					  sizeof(*ttm->dma_address),
111 					  GFP_KERNEL | __GFP_ZERO);
112 	if (!ttm->dma_address)
113 		return -ENOMEM;
114 	return 0;
115 }
116 
117 void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
118 {
119 	ttm_tt_unpopulate(bdev, ttm);
120 
121 	if (ttm->swap_storage)
122 		fput(ttm->swap_storage);
123 
124 	ttm->swap_storage = NULL;
125 }
126 EXPORT_SYMBOL(ttm_tt_destroy_common);
127 
128 void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
129 {
130 	bdev->driver->ttm_tt_destroy(bdev, ttm);
131 }
132 
133 static void ttm_tt_init_fields(struct ttm_tt *ttm,
134 			       struct ttm_buffer_object *bo,
135 			       uint32_t page_flags,
136 			       enum ttm_caching caching)
137 {
138 	ttm->num_pages = bo->num_pages;
139 	ttm->caching = ttm_cached;
140 	ttm->page_flags = page_flags;
141 	ttm_tt_set_unpopulated(ttm);
142 	ttm->swap_storage = NULL;
143 	ttm->sg = bo->sg;
144 	ttm->caching = caching;
145 }
146 
147 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
148 		uint32_t page_flags, enum ttm_caching caching)
149 {
150 	ttm_tt_init_fields(ttm, bo, page_flags, caching);
151 
152 	if (ttm_tt_alloc_page_directory(ttm)) {
153 		pr_err("Failed allocating page table\n");
154 		return -ENOMEM;
155 	}
156 	return 0;
157 }
158 EXPORT_SYMBOL(ttm_tt_init);
159 
160 void ttm_tt_fini(struct ttm_tt *ttm)
161 {
162 	kvfree(ttm->pages);
163 	ttm->pages = NULL;
164 }
165 EXPORT_SYMBOL(ttm_tt_fini);
166 
167 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
168 		    uint32_t page_flags, enum ttm_caching caching)
169 {
170 	struct ttm_tt *ttm = &ttm_dma->ttm;
171 
172 	ttm_tt_init_fields(ttm, bo, page_flags, caching);
173 
174 	INIT_LIST_HEAD(&ttm_dma->pages_list);
175 	if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
176 		pr_err("Failed allocating page table\n");
177 		return -ENOMEM;
178 	}
179 	return 0;
180 }
181 EXPORT_SYMBOL(ttm_dma_tt_init);
182 
183 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
184 		   uint32_t page_flags, enum ttm_caching caching)
185 {
186 	struct ttm_tt *ttm = &ttm_dma->ttm;
187 	int ret;
188 
189 	ttm_tt_init_fields(ttm, bo, page_flags, caching);
190 
191 	INIT_LIST_HEAD(&ttm_dma->pages_list);
192 	if (page_flags & TTM_PAGE_FLAG_SG)
193 		ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
194 	else
195 		ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
196 	if (ret) {
197 		pr_err("Failed allocating page table\n");
198 		return -ENOMEM;
199 	}
200 	return 0;
201 }
202 EXPORT_SYMBOL(ttm_sg_tt_init);
203 
204 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
205 {
206 	struct ttm_tt *ttm = &ttm_dma->ttm;
207 
208 	if (ttm->pages)
209 		kvfree(ttm->pages);
210 	else
211 		kvfree(ttm_dma->dma_address);
212 	ttm->pages = NULL;
213 	ttm_dma->dma_address = NULL;
214 }
215 EXPORT_SYMBOL(ttm_dma_tt_fini);
216 
217 int ttm_tt_swapin(struct ttm_tt *ttm)
218 {
219 	struct address_space *swap_space;
220 	struct file *swap_storage;
221 	struct page *from_page;
222 	struct page *to_page;
223 	gfp_t gfp_mask;
224 	int i, ret;
225 
226 	swap_storage = ttm->swap_storage;
227 	BUG_ON(swap_storage == NULL);
228 
229 	swap_space = swap_storage->f_mapping;
230 	gfp_mask = mapping_gfp_mask(swap_space);
231 	if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
232 		gfp_mask |= __GFP_RETRY_MAYFAIL;
233 
234 	for (i = 0; i < ttm->num_pages; ++i) {
235 		from_page = shmem_read_mapping_page_gfp(swap_space, i,
236 							gfp_mask);
237 		if (IS_ERR(from_page)) {
238 			ret = PTR_ERR(from_page);
239 			goto out_err;
240 		}
241 		to_page = ttm->pages[i];
242 		if (unlikely(to_page == NULL)) {
243 			ret = -ENOMEM;
244 			goto out_err;
245 		}
246 
247 		copy_highpage(to_page, from_page);
248 		put_page(from_page);
249 	}
250 
251 	fput(swap_storage);
252 	ttm->swap_storage = NULL;
253 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
254 
255 	return 0;
256 
257 out_err:
258 	return ret;
259 }
260 
261 int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
262 {
263 	struct address_space *swap_space;
264 	struct file *swap_storage;
265 	struct page *from_page;
266 	struct page *to_page;
267 	gfp_t gfp_mask;
268 	int i, ret;
269 
270 	swap_storage = shmem_file_setup("ttm swap",
271 					ttm->num_pages << PAGE_SHIFT,
272 					0);
273 	if (IS_ERR(swap_storage)) {
274 		pr_err("Failed allocating swap storage\n");
275 		return PTR_ERR(swap_storage);
276 	}
277 
278 	swap_space = swap_storage->f_mapping;
279 	gfp_mask = mapping_gfp_mask(swap_space);
280 	if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
281 		gfp_mask |= __GFP_RETRY_MAYFAIL;
282 
283 	for (i = 0; i < ttm->num_pages; ++i) {
284 		from_page = ttm->pages[i];
285 		if (unlikely(from_page == NULL))
286 			continue;
287 
288 		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
289 		if (IS_ERR(to_page)) {
290 			ret = PTR_ERR(to_page);
291 			goto out_err;
292 		}
293 		copy_highpage(to_page, from_page);
294 		set_page_dirty(to_page);
295 		mark_page_accessed(to_page);
296 		put_page(to_page);
297 	}
298 
299 	ttm_tt_unpopulate(bdev, ttm);
300 	ttm->swap_storage = swap_storage;
301 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
302 
303 	return 0;
304 
305 out_err:
306 	fput(swap_storage);
307 
308 	return ret;
309 }
310 
311 static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
312 {
313 	pgoff_t i;
314 
315 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
316 		return;
317 
318 	for (i = 0; i < ttm->num_pages; ++i)
319 		ttm->pages[i]->mapping = bdev->dev_mapping;
320 }
321 
322 int ttm_tt_populate(struct ttm_bo_device *bdev,
323 		    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
324 {
325 	int ret;
326 
327 	if (!ttm)
328 		return -EINVAL;
329 
330 	if (ttm_tt_is_populated(ttm))
331 		return 0;
332 
333 	if (bdev->driver->ttm_tt_populate)
334 		ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
335 	else
336 		ret = ttm_pool_populate(ttm, ctx);
337 	if (!ret)
338 		ttm_tt_add_mapping(bdev, ttm);
339 	return ret;
340 }
341 EXPORT_SYMBOL(ttm_tt_populate);
342 
343 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
344 {
345 	pgoff_t i;
346 	struct page **page = ttm->pages;
347 
348 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
349 		return;
350 
351 	for (i = 0; i < ttm->num_pages; ++i) {
352 		(*page)->mapping = NULL;
353 		(*page++)->index = 0;
354 	}
355 }
356 
357 void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
358 		       struct ttm_tt *ttm)
359 {
360 	if (!ttm_tt_is_populated(ttm))
361 		return;
362 
363 	ttm_tt_clear_mapping(ttm);
364 	if (bdev->driver->ttm_tt_unpopulate)
365 		bdev->driver->ttm_tt_unpopulate(bdev, ttm);
366 	else
367 		ttm_pool_unpopulate(ttm);
368 }
369