xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_tt.c (revision 2208f39c)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <linux/sched.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo_driver.h>
40 
41 /**
42  * Allocates a ttm structure for the given BO.
43  */
44 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
45 {
46 	struct ttm_bo_device *bdev = bo->bdev;
47 	uint32_t page_flags = 0;
48 
49 	dma_resv_assert_held(bo->base.resv);
50 
51 	if (bo->ttm)
52 		return 0;
53 
54 	switch (bo->type) {
55 	case ttm_bo_type_device:
56 		if (zero_alloc)
57 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
58 		break;
59 	case ttm_bo_type_kernel:
60 		break;
61 	case ttm_bo_type_sg:
62 		page_flags |= TTM_PAGE_FLAG_SG;
63 		break;
64 	default:
65 		pr_err("Illegal buffer object type\n");
66 		return -EINVAL;
67 	}
68 
69 	bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
70 	if (unlikely(bo->ttm == NULL))
71 		return -ENOMEM;
72 
73 	return 0;
74 }
75 
76 /**
77  * Allocates storage for pointers to the pages that back the ttm.
78  */
79 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
80 {
81 	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
82 			GFP_KERNEL | __GFP_ZERO);
83 	if (!ttm->pages)
84 		return -ENOMEM;
85 	return 0;
86 }
87 
88 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
89 {
90 	ttm->pages = kvmalloc_array(ttm->num_pages,
91 				    sizeof(*ttm->pages) +
92 				    sizeof(*ttm->dma_address),
93 				    GFP_KERNEL | __GFP_ZERO);
94 	if (!ttm->pages)
95 		return -ENOMEM;
96 
97 	ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
98 	return 0;
99 }
100 
101 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
102 {
103 	ttm->dma_address = kvmalloc_array(ttm->num_pages,
104 					  sizeof(*ttm->dma_address),
105 					  GFP_KERNEL | __GFP_ZERO);
106 	if (!ttm->dma_address)
107 		return -ENOMEM;
108 	return 0;
109 }
110 
111 void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
112 {
113 	ttm_tt_unpopulate(bdev, ttm);
114 
115 	if (ttm->swap_storage)
116 		fput(ttm->swap_storage);
117 
118 	ttm->swap_storage = NULL;
119 }
120 EXPORT_SYMBOL(ttm_tt_destroy_common);
121 
122 void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
123 {
124 	bdev->driver->ttm_tt_destroy(bdev, ttm);
125 }
126 
127 static void ttm_tt_init_fields(struct ttm_tt *ttm,
128 			       struct ttm_buffer_object *bo,
129 			       uint32_t page_flags,
130 			       enum ttm_caching caching)
131 {
132 	ttm->num_pages = bo->num_pages;
133 	ttm->caching = ttm_cached;
134 	ttm->page_flags = page_flags;
135 	ttm->dma_address = NULL;
136 	ttm->swap_storage = NULL;
137 	ttm->sg = bo->sg;
138 	ttm->caching = caching;
139 }
140 
141 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
142 		uint32_t page_flags, enum ttm_caching caching)
143 {
144 	ttm_tt_init_fields(ttm, bo, page_flags, caching);
145 
146 	if (ttm_tt_alloc_page_directory(ttm)) {
147 		pr_err("Failed allocating page table\n");
148 		return -ENOMEM;
149 	}
150 	return 0;
151 }
152 EXPORT_SYMBOL(ttm_tt_init);
153 
154 void ttm_tt_fini(struct ttm_tt *ttm)
155 {
156 	if (ttm->pages)
157 		kvfree(ttm->pages);
158 	else
159 		kvfree(ttm->dma_address);
160 	ttm->pages = NULL;
161 	ttm->dma_address = NULL;
162 }
163 EXPORT_SYMBOL(ttm_tt_fini);
164 
165 int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
166 		    uint32_t page_flags, enum ttm_caching caching)
167 {
168 	ttm_tt_init_fields(ttm, bo, page_flags, caching);
169 
170 	if (ttm_dma_tt_alloc_page_directory(ttm)) {
171 		pr_err("Failed allocating page table\n");
172 		return -ENOMEM;
173 	}
174 	return 0;
175 }
176 EXPORT_SYMBOL(ttm_dma_tt_init);
177 
178 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
179 		   uint32_t page_flags, enum ttm_caching caching)
180 {
181 	int ret;
182 
183 	ttm_tt_init_fields(ttm, bo, page_flags, caching);
184 
185 	if (page_flags & TTM_PAGE_FLAG_SG)
186 		ret = ttm_sg_tt_alloc_page_directory(ttm);
187 	else
188 		ret = ttm_dma_tt_alloc_page_directory(ttm);
189 	if (ret) {
190 		pr_err("Failed allocating page table\n");
191 		return -ENOMEM;
192 	}
193 	return 0;
194 }
195 EXPORT_SYMBOL(ttm_sg_tt_init);
196 
197 int ttm_tt_swapin(struct ttm_tt *ttm)
198 {
199 	struct address_space *swap_space;
200 	struct file *swap_storage;
201 	struct page *from_page;
202 	struct page *to_page;
203 	gfp_t gfp_mask;
204 	int i, ret;
205 
206 	swap_storage = ttm->swap_storage;
207 	BUG_ON(swap_storage == NULL);
208 
209 	swap_space = swap_storage->f_mapping;
210 	gfp_mask = mapping_gfp_mask(swap_space);
211 
212 	for (i = 0; i < ttm->num_pages; ++i) {
213 		from_page = shmem_read_mapping_page_gfp(swap_space, i,
214 							gfp_mask);
215 		if (IS_ERR(from_page)) {
216 			ret = PTR_ERR(from_page);
217 			goto out_err;
218 		}
219 		to_page = ttm->pages[i];
220 		if (unlikely(to_page == NULL)) {
221 			ret = -ENOMEM;
222 			goto out_err;
223 		}
224 
225 		copy_highpage(to_page, from_page);
226 		put_page(from_page);
227 	}
228 
229 	fput(swap_storage);
230 	ttm->swap_storage = NULL;
231 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
232 
233 	return 0;
234 
235 out_err:
236 	return ret;
237 }
238 
239 int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
240 {
241 	struct address_space *swap_space;
242 	struct file *swap_storage;
243 	struct page *from_page;
244 	struct page *to_page;
245 	gfp_t gfp_mask;
246 	int i, ret;
247 
248 	swap_storage = shmem_file_setup("ttm swap",
249 					ttm->num_pages << PAGE_SHIFT,
250 					0);
251 	if (IS_ERR(swap_storage)) {
252 		pr_err("Failed allocating swap storage\n");
253 		return PTR_ERR(swap_storage);
254 	}
255 
256 	swap_space = swap_storage->f_mapping;
257 	gfp_mask = mapping_gfp_mask(swap_space);
258 
259 	for (i = 0; i < ttm->num_pages; ++i) {
260 		from_page = ttm->pages[i];
261 		if (unlikely(from_page == NULL))
262 			continue;
263 
264 		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
265 		if (IS_ERR(to_page)) {
266 			ret = PTR_ERR(to_page);
267 			goto out_err;
268 		}
269 		copy_highpage(to_page, from_page);
270 		set_page_dirty(to_page);
271 		mark_page_accessed(to_page);
272 		put_page(to_page);
273 	}
274 
275 	ttm_tt_unpopulate(bdev, ttm);
276 	ttm->swap_storage = swap_storage;
277 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
278 
279 	return 0;
280 
281 out_err:
282 	fput(swap_storage);
283 
284 	return ret;
285 }
286 
287 static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
288 {
289 	pgoff_t i;
290 
291 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
292 		return;
293 
294 	for (i = 0; i < ttm->num_pages; ++i)
295 		ttm->pages[i]->mapping = bdev->dev_mapping;
296 }
297 
298 int ttm_tt_populate(struct ttm_bo_device *bdev,
299 		    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
300 {
301 	int ret;
302 
303 	if (!ttm)
304 		return -EINVAL;
305 
306 	if (ttm_tt_is_populated(ttm))
307 		return 0;
308 
309 	if (bdev->driver->ttm_tt_populate)
310 		ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
311 	else
312 		ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
313 	if (ret)
314 		return ret;
315 
316 	ttm_tt_add_mapping(bdev, ttm);
317 	ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
318 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
319 		ret = ttm_tt_swapin(ttm);
320 		if (unlikely(ret != 0)) {
321 			ttm_tt_unpopulate(bdev, ttm);
322 			return ret;
323 		}
324 	}
325 
326 	return 0;
327 }
328 EXPORT_SYMBOL(ttm_tt_populate);
329 
330 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
331 {
332 	pgoff_t i;
333 	struct page **page = ttm->pages;
334 
335 	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
336 		return;
337 
338 	for (i = 0; i < ttm->num_pages; ++i) {
339 		(*page)->mapping = NULL;
340 		(*page++)->index = 0;
341 	}
342 }
343 
344 void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
345 		       struct ttm_tt *ttm)
346 {
347 	if (!ttm_tt_is_populated(ttm))
348 		return;
349 
350 	ttm_tt_clear_mapping(ttm);
351 	if (bdev->driver->ttm_tt_unpopulate)
352 		bdev->driver->ttm_tt_unpopulate(bdev, ttm);
353 	else
354 		ttm_pool_free(&bdev->pool, ttm);
355 	ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
356 }
357