xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_tt.c (revision 306e9977)
11297bf2eSDirk Hohndel /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2ba4e7d97SThomas Hellstrom /**************************************************************************
3ba4e7d97SThomas Hellstrom  *
4ba4e7d97SThomas Hellstrom  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5ba4e7d97SThomas Hellstrom  * All Rights Reserved.
6ba4e7d97SThomas Hellstrom  *
7ba4e7d97SThomas Hellstrom  * Permission is hereby granted, free of charge, to any person obtaining a
8ba4e7d97SThomas Hellstrom  * copy of this software and associated documentation files (the
9ba4e7d97SThomas Hellstrom  * "Software"), to deal in the Software without restriction, including
10ba4e7d97SThomas Hellstrom  * without limitation the rights to use, copy, modify, merge, publish,
11ba4e7d97SThomas Hellstrom  * distribute, sub license, and/or sell copies of the Software, and to
12ba4e7d97SThomas Hellstrom  * permit persons to whom the Software is furnished to do so, subject to
13ba4e7d97SThomas Hellstrom  * the following conditions:
14ba4e7d97SThomas Hellstrom  *
15ba4e7d97SThomas Hellstrom  * The above copyright notice and this permission notice (including the
16ba4e7d97SThomas Hellstrom  * next paragraph) shall be included in all copies or substantial portions
17ba4e7d97SThomas Hellstrom  * of the Software.
18ba4e7d97SThomas Hellstrom  *
19ba4e7d97SThomas Hellstrom  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20ba4e7d97SThomas Hellstrom  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21ba4e7d97SThomas Hellstrom  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22ba4e7d97SThomas Hellstrom  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23ba4e7d97SThomas Hellstrom  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24ba4e7d97SThomas Hellstrom  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25ba4e7d97SThomas Hellstrom  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26ba4e7d97SThomas Hellstrom  *
27ba4e7d97SThomas Hellstrom  **************************************************************************/
28ba4e7d97SThomas Hellstrom /*
29ba4e7d97SThomas Hellstrom  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30ba4e7d97SThomas Hellstrom  */
31ba4e7d97SThomas Hellstrom 
3225d0479aSJoe Perches #define pr_fmt(fmt) "[TTM] " fmt
3325d0479aSJoe Perches 
34de125efbSZack Rusin #include <linux/cc_platform.h>
35ba4e7d97SThomas Hellstrom #include <linux/sched.h>
363142b651SHugh Dickins #include <linux/shmem_fs.h>
37ba4e7d97SThomas Hellstrom #include <linux/file.h>
388581fd40SJakub Kicinski #include <linux/module.h>
39760285e7SDavid Howells #include <drm/drm_cache.h>
40de125efbSZack Rusin #include <drm/drm_device.h>
41de125efbSZack Rusin #include <drm/drm_util.h>
42a3185f91SChristian König #include <drm/ttm/ttm_bo.h>
43a3185f91SChristian König #include <drm/ttm/ttm_tt.h>
44ba4e7d97SThomas Hellstrom 
45d4bd7776SChristian König #include "ttm_module.h"
46d4bd7776SChristian König 
47680dcedeSChristian König static unsigned long ttm_pages_limit;
48680dcedeSChristian König 
49680dcedeSChristian König MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
50680dcedeSChristian König module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
51680dcedeSChristian König 
52680dcedeSChristian König static unsigned long ttm_dma32_pages_limit;
53680dcedeSChristian König 
54680dcedeSChristian König MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
55680dcedeSChristian König module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
56680dcedeSChristian König 
57680dcedeSChristian König static atomic_long_t ttm_pages_allocated;
58680dcedeSChristian König static atomic_long_t ttm_dma32_pages_allocated;
59d4bd7776SChristian König 
6078616b88SLee Jones /*
6197b7e1b8SChristian König  * Allocates a ttm structure for the given BO.
6297b7e1b8SChristian König  */
ttm_tt_create(struct ttm_buffer_object * bo,bool zero_alloc)6397b7e1b8SChristian König int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
6497b7e1b8SChristian König {
658af8a109SChristian König 	struct ttm_device *bdev = bo->bdev;
66de125efbSZack Rusin 	struct drm_device *ddev = bo->base.dev;
6797b7e1b8SChristian König 	uint32_t page_flags = 0;
6897b7e1b8SChristian König 
6952791eeeSChristian König 	dma_resv_assert_held(bo->base.resv);
7097b7e1b8SChristian König 
710b062865SChristian König 	if (bo->ttm)
720b062865SChristian König 		return 0;
730b062865SChristian König 
7497b7e1b8SChristian König 	switch (bo->type) {
7597b7e1b8SChristian König 	case ttm_bo_type_device:
7697b7e1b8SChristian König 		if (zero_alloc)
7743d46f0bSMatthew Auld 			page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
7845a9d154SChristian König 		break;
7997b7e1b8SChristian König 	case ttm_bo_type_kernel:
8045a9d154SChristian König 		break;
8145a9d154SChristian König 	case ttm_bo_type_sg:
8243d46f0bSMatthew Auld 		page_flags |= TTM_TT_FLAG_EXTERNAL;
8345a9d154SChristian König 		break;
8445a9d154SChristian König 	default:
8545a9d154SChristian König 		pr_err("Illegal buffer object type\n");
8645a9d154SChristian König 		return -EINVAL;
8745a9d154SChristian König 	}
88de125efbSZack Rusin 	/*
89de125efbSZack Rusin 	 * When using dma_alloc_coherent with memory encryption the
90de125efbSZack Rusin 	 * mapped TT pages need to be decrypted or otherwise the drivers
91de125efbSZack Rusin 	 * will end up sending encrypted mem to the gpu.
92de125efbSZack Rusin 	 */
93de125efbSZack Rusin 	if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
94de125efbSZack Rusin 		page_flags |= TTM_TT_FLAG_DECRYPTED;
95306e9977SZack Rusin 		drm_info_once(ddev, "TT memory decryption enabled.");
96de125efbSZack Rusin 	}
9745a9d154SChristian König 
988af8a109SChristian König 	bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
9997b7e1b8SChristian König 	if (unlikely(bo->ttm == NULL))
10045a9d154SChristian König 		return -ENOMEM;
10197b7e1b8SChristian König 
10249e7f76fSMatthew Auld 	WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
10349e7f76fSMatthew Auld 		!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
10449e7f76fSMatthew Auld 
10545a9d154SChristian König 	return 0;
10697b7e1b8SChristian König }
10797b7e1b8SChristian König 
10878616b88SLee Jones /*
109ba4e7d97SThomas Hellstrom  * Allocates storage for pointers to the pages that back the ttm.
110ba4e7d97SThomas Hellstrom  */
ttm_tt_alloc_page_directory(struct ttm_tt * ttm)1115b4262d7STom St Denis static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
112ba4e7d97SThomas Hellstrom {
1138f97344aSYang Wang 	ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
1145b4262d7STom St Denis 	if (!ttm->pages)
1155b4262d7STom St Denis 		return -ENOMEM;
1168f97344aSYang Wang 
1175b4262d7STom St Denis 	return 0;
118ba4e7d97SThomas Hellstrom }
119ba4e7d97SThomas Hellstrom 
ttm_dma_tt_alloc_page_directory(struct ttm_tt * ttm)120e34b8feeSChristian König static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
121ba4e7d97SThomas Hellstrom {
1228f97344aSYang Wang 	ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
1238f97344aSYang Wang 			      sizeof(*ttm->dma_address), GFP_KERNEL);
124e34b8feeSChristian König 	if (!ttm->pages)
1255b4262d7STom St Denis 		return -ENOMEM;
126e34b8feeSChristian König 
127e34b8feeSChristian König 	ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
1285b4262d7STom St Denis 	return 0;
129ba4e7d97SThomas Hellstrom }
130ba4e7d97SThomas Hellstrom 
ttm_sg_tt_alloc_page_directory(struct ttm_tt * ttm)131e34b8feeSChristian König static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
13275a57669SChristian König {
1338f97344aSYang Wang 	ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
1348f97344aSYang Wang 				    GFP_KERNEL);
13575a57669SChristian König 	if (!ttm->dma_address)
13675a57669SChristian König 		return -ENOMEM;
1378f97344aSYang Wang 
13875a57669SChristian König 	return 0;
13975a57669SChristian König }
14075a57669SChristian König 
ttm_tt_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)1418af8a109SChristian König void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
1427626168fSDave Airlie {
1438af8a109SChristian König 	bdev->funcs->ttm_tt_destroy(bdev, ttm);
144ba4e7d97SThomas Hellstrom }
145ba4e7d97SThomas Hellstrom 
ttm_tt_init_fields(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching,unsigned long extra_pages)1462869e82eSChristian König static void ttm_tt_init_fields(struct ttm_tt *ttm,
1472869e82eSChristian König 			       struct ttm_buffer_object *bo,
1481b4ea4c5SChristian König 			       uint32_t page_flags,
149e36764ecSRamalingam C 			       enum ttm_caching caching,
150e36764ecSRamalingam C 			       unsigned long extra_pages)
151ba4e7d97SThomas Hellstrom {
152e36764ecSRamalingam C 	ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
153ba4e7d97SThomas Hellstrom 	ttm->page_flags = page_flags;
154e34b8feeSChristian König 	ttm->dma_address = NULL;
155dea7e0acSJerome Glisse 	ttm->swap_storage = NULL;
156536bbebaSChristian König 	ttm->sg = bo->sg;
1571b4ea4c5SChristian König 	ttm->caching = caching;
15875a57669SChristian König }
15975a57669SChristian König 
ttm_tt_init(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching,unsigned long extra_pages)160dde5da23SChristian König int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
161e36764ecSRamalingam C 		uint32_t page_flags, enum ttm_caching caching,
162e36764ecSRamalingam C 		unsigned long extra_pages)
16375a57669SChristian König {
164e36764ecSRamalingam C 	ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
165ba4e7d97SThomas Hellstrom 
1665b4262d7STom St Denis 	if (ttm_tt_alloc_page_directory(ttm)) {
16725d0479aSJoe Perches 		pr_err("Failed allocating page table\n");
168649bf3caSJerome Glisse 		return -ENOMEM;
169ba4e7d97SThomas Hellstrom 	}
170649bf3caSJerome Glisse 	return 0;
171ba4e7d97SThomas Hellstrom }
172649bf3caSJerome Glisse EXPORT_SYMBOL(ttm_tt_init);
173ba4e7d97SThomas Hellstrom 
ttm_tt_fini(struct ttm_tt * ttm)1748e7e7052SJerome Glisse void ttm_tt_fini(struct ttm_tt *ttm)
1758e7e7052SJerome Glisse {
17643d46f0bSMatthew Auld 	WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
177d5f45d1eSChristian König 
178d5f45d1eSChristian König 	if (ttm->swap_storage)
179d5f45d1eSChristian König 		fput(ttm->swap_storage);
180d5f45d1eSChristian König 	ttm->swap_storage = NULL;
181d5f45d1eSChristian König 
182e34b8feeSChristian König 	if (ttm->pages)
1832098105eSMichal Hocko 		kvfree(ttm->pages);
184e34b8feeSChristian König 	else
185e34b8feeSChristian König 		kvfree(ttm->dma_address);
1868e7e7052SJerome Glisse 	ttm->pages = NULL;
187e34b8feeSChristian König 	ttm->dma_address = NULL;
1888e7e7052SJerome Glisse }
1898e7e7052SJerome Glisse EXPORT_SYMBOL(ttm_tt_fini);
1908e7e7052SJerome Glisse 
ttm_sg_tt_init(struct ttm_tt * ttm,struct ttm_buffer_object * bo,uint32_t page_flags,enum ttm_caching caching)191e34b8feeSChristian König int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
1921b4ea4c5SChristian König 		   uint32_t page_flags, enum ttm_caching caching)
19375a57669SChristian König {
19475a57669SChristian König 	int ret;
19575a57669SChristian König 
196e36764ecSRamalingam C 	ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
19775a57669SChristian König 
19843d46f0bSMatthew Auld 	if (page_flags & TTM_TT_FLAG_EXTERNAL)
199e34b8feeSChristian König 		ret = ttm_sg_tt_alloc_page_directory(ttm);
20075a57669SChristian König 	else
201e34b8feeSChristian König 		ret = ttm_dma_tt_alloc_page_directory(ttm);
20275a57669SChristian König 	if (ret) {
20375a57669SChristian König 		pr_err("Failed allocating page table\n");
20475a57669SChristian König 		return -ENOMEM;
20575a57669SChristian König 	}
20675a57669SChristian König 	return 0;
20775a57669SChristian König }
20875a57669SChristian König EXPORT_SYMBOL(ttm_sg_tt_init);
20975a57669SChristian König 
ttm_tt_swapin(struct ttm_tt * ttm)210b1e5f172SJerome Glisse int ttm_tt_swapin(struct ttm_tt *ttm)
211ba4e7d97SThomas Hellstrom {
212ba4e7d97SThomas Hellstrom 	struct address_space *swap_space;
213ba4e7d97SThomas Hellstrom 	struct file *swap_storage;
214ba4e7d97SThomas Hellstrom 	struct page *from_page;
215ba4e7d97SThomas Hellstrom 	struct page *to_page;
216ab861424SChristian König 	gfp_t gfp_mask;
217ab861424SChristian König 	int i, ret;
218ba4e7d97SThomas Hellstrom 
219ba4e7d97SThomas Hellstrom 	swap_storage = ttm->swap_storage;
220ba4e7d97SThomas Hellstrom 	BUG_ON(swap_storage == NULL);
221ba4e7d97SThomas Hellstrom 
22293c76a3dSAl Viro 	swap_space = swap_storage->f_mapping;
223ab861424SChristian König 	gfp_mask = mapping_gfp_mask(swap_space);
224ba4e7d97SThomas Hellstrom 
225ba4e7d97SThomas Hellstrom 	for (i = 0; i < ttm->num_pages; ++i) {
226ab861424SChristian König 		from_page = shmem_read_mapping_page_gfp(swap_space, i,
227ab861424SChristian König 							gfp_mask);
228290e5505SMaarten Maathuis 		if (IS_ERR(from_page)) {
229290e5505SMaarten Maathuis 			ret = PTR_ERR(from_page);
230ba4e7d97SThomas Hellstrom 			goto out_err;
231290e5505SMaarten Maathuis 		}
232b1e5f172SJerome Glisse 		to_page = ttm->pages[i];
233ab861424SChristian König 		if (unlikely(to_page == NULL)) {
234ab861424SChristian König 			ret = -ENOMEM;
235ba4e7d97SThomas Hellstrom 			goto out_err;
236ab861424SChristian König 		}
237ba4e7d97SThomas Hellstrom 
238259a290eSAkinobu Mita 		copy_highpage(to_page, from_page);
23909cbfeafSKirill A. Shutemov 		put_page(from_page);
240ba4e7d97SThomas Hellstrom 	}
241ba4e7d97SThomas Hellstrom 
242ba4e7d97SThomas Hellstrom 	fput(swap_storage);
243ba4e7d97SThomas Hellstrom 	ttm->swap_storage = NULL;
24443d46f0bSMatthew Auld 	ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
245ba4e7d97SThomas Hellstrom 
246ba4e7d97SThomas Hellstrom 	return 0;
247ab861424SChristian König 
248ba4e7d97SThomas Hellstrom out_err:
249290e5505SMaarten Maathuis 	return ret;
250ba4e7d97SThomas Hellstrom }
251ba4e7d97SThomas Hellstrom 
252d4bd7776SChristian König /**
253d4bd7776SChristian König  * ttm_tt_swapout - swap out tt object
254d4bd7776SChristian König  *
255d4bd7776SChristian König  * @bdev: TTM device structure.
256d4bd7776SChristian König  * @ttm: The struct ttm_tt.
257d4bd7776SChristian König  * @gfp_flags: Flags to use for memory allocation.
258d4bd7776SChristian König  *
259d4bd7776SChristian König  * Swapout a TT object to a shmem_file, return number of pages swapped out or
260d4bd7776SChristian König  * negative error code.
261d4bd7776SChristian König  */
ttm_tt_swapout(struct ttm_device * bdev,struct ttm_tt * ttm,gfp_t gfp_flags)262d4bd7776SChristian König int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
263d4bd7776SChristian König 		   gfp_t gfp_flags)
264ba4e7d97SThomas Hellstrom {
265d4bd7776SChristian König 	loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
266ba4e7d97SThomas Hellstrom 	struct address_space *swap_space;
267ba4e7d97SThomas Hellstrom 	struct file *swap_storage;
268ba4e7d97SThomas Hellstrom 	struct page *from_page;
269ba4e7d97SThomas Hellstrom 	struct page *to_page;
270ab861424SChristian König 	int i, ret;
271ba4e7d97SThomas Hellstrom 
272d4bd7776SChristian König 	swap_storage = shmem_file_setup("ttm swap", size, 0);
27355579cfeSViresh Kumar 	if (IS_ERR(swap_storage)) {
27425d0479aSJoe Perches 		pr_err("Failed allocating swap storage\n");
275290e5505SMaarten Maathuis 		return PTR_ERR(swap_storage);
276ba4e7d97SThomas Hellstrom 	}
277ba4e7d97SThomas Hellstrom 
27893c76a3dSAl Viro 	swap_space = swap_storage->f_mapping;
279d4bd7776SChristian König 	gfp_flags &= mapping_gfp_mask(swap_space);
280ba4e7d97SThomas Hellstrom 
281ba4e7d97SThomas Hellstrom 	for (i = 0; i < ttm->num_pages; ++i) {
282ba4e7d97SThomas Hellstrom 		from_page = ttm->pages[i];
283ba4e7d97SThomas Hellstrom 		if (unlikely(from_page == NULL))
284ba4e7d97SThomas Hellstrom 			continue;
285cb5f1a52SAndrey Grodzovsky 
286d4bd7776SChristian König 		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
28755579cfeSViresh Kumar 		if (IS_ERR(to_page)) {
288290e5505SMaarten Maathuis 			ret = PTR_ERR(to_page);
289ba4e7d97SThomas Hellstrom 			goto out_err;
290290e5505SMaarten Maathuis 		}
291259a290eSAkinobu Mita 		copy_highpage(to_page, from_page);
292ba4e7d97SThomas Hellstrom 		set_page_dirty(to_page);
293ba4e7d97SThomas Hellstrom 		mark_page_accessed(to_page);
29409cbfeafSKirill A. Shutemov 		put_page(to_page);
295ba4e7d97SThomas Hellstrom 	}
296ba4e7d97SThomas Hellstrom 
2970a667b50SDave Airlie 	ttm_tt_unpopulate(bdev, ttm);
298ba4e7d97SThomas Hellstrom 	ttm->swap_storage = swap_storage;
29943d46f0bSMatthew Auld 	ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
300ba4e7d97SThomas Hellstrom 
301d4bd7776SChristian König 	return ttm->num_pages;
302ab861424SChristian König 
303ba4e7d97SThomas Hellstrom out_err:
304ba4e7d97SThomas Hellstrom 	fput(swap_storage);
305ba4e7d97SThomas Hellstrom 
306290e5505SMaarten Maathuis 	return ret;
307ba4e7d97SThomas Hellstrom }
30858aa6622SThomas Hellstrom 
ttm_tt_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)3098af8a109SChristian König int ttm_tt_populate(struct ttm_device *bdev,
3100a667b50SDave Airlie 		    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
31125893a14SChristian König {
312ec929370SChristian König 	int ret;
313ec929370SChristian König 
3142040ec97SDave Airlie 	if (!ttm)
3152040ec97SDave Airlie 		return -EINVAL;
3162040ec97SDave Airlie 
3177eec9151SDave Airlie 	if (ttm_tt_is_populated(ttm))
31825893a14SChristian König 		return 0;
31925893a14SChristian König 
32043d46f0bSMatthew Auld 	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
321680dcedeSChristian König 		atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
322680dcedeSChristian König 		if (bdev->pool.use_dma32)
3232b173d7fSFelix Kuehling 			atomic_long_add(ttm->num_pages,
3242b173d7fSFelix Kuehling 					&ttm_dma32_pages_allocated);
3252b173d7fSFelix Kuehling 	}
326680dcedeSChristian König 
327680dcedeSChristian König 	while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
328680dcedeSChristian König 	       atomic_long_read(&ttm_dma32_pages_allocated) >
329680dcedeSChristian König 	       ttm_dma32_pages_limit) {
330680dcedeSChristian König 
331680dcedeSChristian König 		ret = ttm_global_swapout(ctx, GFP_KERNEL);
33213ea9aa1SShiwu Zhang 		if (ret == 0)
33313ea9aa1SShiwu Zhang 			break;
334a28e10edSChristian König 		if (ret < 0)
335680dcedeSChristian König 			goto error;
336680dcedeSChristian König 	}
337680dcedeSChristian König 
3388af8a109SChristian König 	if (bdev->funcs->ttm_tt_populate)
3398af8a109SChristian König 		ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
340e44fcf71SChristian König 	else
341ee5d2a8eSChristian König 		ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
342d1cb1f25SChristian König 	if (ret)
343680dcedeSChristian König 		goto error;
344d1cb1f25SChristian König 
34543d46f0bSMatthew Auld 	ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
34643d46f0bSMatthew Auld 	if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
34705f8d250SChristian König 		ret = ttm_tt_swapin(ttm);
34805f8d250SChristian König 		if (unlikely(ret != 0)) {
34905f8d250SChristian König 			ttm_tt_unpopulate(bdev, ttm);
35005f8d250SChristian König 			return ret;
35105f8d250SChristian König 		}
35205f8d250SChristian König 	}
35305f8d250SChristian König 
354d1cb1f25SChristian König 	return 0;
355680dcedeSChristian König 
356680dcedeSChristian König error:
35743d46f0bSMatthew Auld 	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
358680dcedeSChristian König 		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
359680dcedeSChristian König 		if (bdev->pool.use_dma32)
3602b173d7fSFelix Kuehling 			atomic_long_sub(ttm->num_pages,
3612b173d7fSFelix Kuehling 					&ttm_dma32_pages_allocated);
3622b173d7fSFelix Kuehling 	}
363680dcedeSChristian König 	return ret;
36425893a14SChristian König }
3652040ec97SDave Airlie EXPORT_SYMBOL(ttm_tt_populate);
36625893a14SChristian König 
ttm_tt_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)367680dcedeSChristian König void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
36858aa6622SThomas Hellstrom {
3697eec9151SDave Airlie 	if (!ttm_tt_is_populated(ttm))
37058aa6622SThomas Hellstrom 		return;
37158aa6622SThomas Hellstrom 
3728af8a109SChristian König 	if (bdev->funcs->ttm_tt_unpopulate)
3738af8a109SChristian König 		bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
374e44fcf71SChristian König 	else
375ee5d2a8eSChristian König 		ttm_pool_free(&bdev->pool, ttm);
376680dcedeSChristian König 
37743d46f0bSMatthew Auld 	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
378680dcedeSChristian König 		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
379680dcedeSChristian König 		if (bdev->pool.use_dma32)
3802b173d7fSFelix Kuehling 			atomic_long_sub(ttm->num_pages,
3812b173d7fSFelix Kuehling 					&ttm_dma32_pages_allocated);
3822b173d7fSFelix Kuehling 	}
383680dcedeSChristian König 
38443d46f0bSMatthew Auld 	ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
38558aa6622SThomas Hellstrom }
386d4bd7776SChristian König 
387b057f37bSChristian König #ifdef CONFIG_DEBUG_FS
388b057f37bSChristian König 
389b057f37bSChristian König /* Test the shrinker functions and dump the result */
ttm_tt_debugfs_shrink_show(struct seq_file * m,void * data)390b057f37bSChristian König static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
391b057f37bSChristian König {
392b057f37bSChristian König 	struct ttm_operation_ctx ctx = { false, false };
393b057f37bSChristian König 
394b057f37bSChristian König 	seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
395b057f37bSChristian König 	return 0;
396b057f37bSChristian König }
397b057f37bSChristian König DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
398b057f37bSChristian König 
399b057f37bSChristian König #endif
400b057f37bSChristian König 
401b057f37bSChristian König 
402c3d670fcSLee Jones /*
403d4bd7776SChristian König  * ttm_tt_mgr_init - register with the MM shrinker
404d4bd7776SChristian König  *
405d4bd7776SChristian König  * Register with the MM shrinker for swapping out BOs.
406d4bd7776SChristian König  */
ttm_tt_mgr_init(unsigned long num_pages,unsigned long num_dma32_pages)407680dcedeSChristian König void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
408d4bd7776SChristian König {
409b057f37bSChristian König #ifdef CONFIG_DEBUG_FS
410b057f37bSChristian König 	debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
411b057f37bSChristian König 			    &ttm_tt_debugfs_shrink_fops);
412b057f37bSChristian König #endif
413b057f37bSChristian König 
414680dcedeSChristian König 	if (!ttm_pages_limit)
415680dcedeSChristian König 		ttm_pages_limit = num_pages;
416d4bd7776SChristian König 
417680dcedeSChristian König 	if (!ttm_dma32_pages_limit)
418680dcedeSChristian König 		ttm_dma32_pages_limit = num_dma32_pages;
419d4bd7776SChristian König }
4203bf3710eSThomas Hellström 
ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter * iter,struct iosys_map * dmap,pgoff_t i)4213bf3710eSThomas Hellström static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
4227938f421SLucas De Marchi 				       struct iosys_map *dmap,
4233bf3710eSThomas Hellström 				       pgoff_t i)
4243bf3710eSThomas Hellström {
4253bf3710eSThomas Hellström 	struct ttm_kmap_iter_tt *iter_tt =
4263bf3710eSThomas Hellström 		container_of(iter, typeof(*iter_tt), base);
4273bf3710eSThomas Hellström 
4287938f421SLucas De Marchi 	iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
4293bf3710eSThomas Hellström 						       iter_tt->prot));
4303bf3710eSThomas Hellström }
4313bf3710eSThomas Hellström 
ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter * iter,struct iosys_map * map)4323bf3710eSThomas Hellström static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
4337938f421SLucas De Marchi 					 struct iosys_map *map)
4343bf3710eSThomas Hellström {
4353bf3710eSThomas Hellström 	kunmap_local(map->vaddr);
4363bf3710eSThomas Hellström }
4373bf3710eSThomas Hellström 
4383bf3710eSThomas Hellström static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
4393bf3710eSThomas Hellström 	.map_local = ttm_kmap_iter_tt_map_local,
4403bf3710eSThomas Hellström 	.unmap_local = ttm_kmap_iter_tt_unmap_local,
4413bf3710eSThomas Hellström 	.maps_tt = true,
4423bf3710eSThomas Hellström };
4433bf3710eSThomas Hellström 
4443bf3710eSThomas Hellström /**
4453bf3710eSThomas Hellström  * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
4463bf3710eSThomas Hellström  * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
4473bf3710eSThomas Hellström  * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
4483bf3710eSThomas Hellström  *
4493bf3710eSThomas Hellström  * Return: Pointer to the embedded struct ttm_kmap_iter.
4503bf3710eSThomas Hellström  */
4513bf3710eSThomas Hellström struct ttm_kmap_iter *
ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt * iter_tt,struct ttm_tt * tt)4523bf3710eSThomas Hellström ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
4533bf3710eSThomas Hellström 		      struct ttm_tt *tt)
4543bf3710eSThomas Hellström {
4553bf3710eSThomas Hellström 	iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
4563bf3710eSThomas Hellström 	iter_tt->tt = tt;
4573bf3710eSThomas Hellström 	if (tt)
4583bf3710eSThomas Hellström 		iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
4593bf3710eSThomas Hellström 	else
4603bf3710eSThomas Hellström 		iter_tt->prot = PAGE_KERNEL;
4613bf3710eSThomas Hellström 
4623bf3710eSThomas Hellström 	return &iter_tt->base;
4633bf3710eSThomas Hellström }
4643bf3710eSThomas Hellström EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
4651d741590SMukul Joshi 
ttm_tt_pages_limit(void)4661d741590SMukul Joshi unsigned long ttm_tt_pages_limit(void)
4671d741590SMukul Joshi {
4681d741590SMukul Joshi 	return ttm_pages_limit;
4691d741590SMukul Joshi }
4701d741590SMukul Joshi EXPORT_SYMBOL(ttm_tt_pages_limit);
471