xref: /openbmc/linux/drivers/gpu/drm/ttm/ttm_tt.c (revision ba4e7d97)
1ba4e7d97SThomas Hellstrom /**************************************************************************
2ba4e7d97SThomas Hellstrom  *
3ba4e7d97SThomas Hellstrom  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4ba4e7d97SThomas Hellstrom  * All Rights Reserved.
5ba4e7d97SThomas Hellstrom  *
6ba4e7d97SThomas Hellstrom  * Permission is hereby granted, free of charge, to any person obtaining a
7ba4e7d97SThomas Hellstrom  * copy of this software and associated documentation files (the
8ba4e7d97SThomas Hellstrom  * "Software"), to deal in the Software without restriction, including
9ba4e7d97SThomas Hellstrom  * without limitation the rights to use, copy, modify, merge, publish,
10ba4e7d97SThomas Hellstrom  * distribute, sub license, and/or sell copies of the Software, and to
11ba4e7d97SThomas Hellstrom  * permit persons to whom the Software is furnished to do so, subject to
12ba4e7d97SThomas Hellstrom  * the following conditions:
13ba4e7d97SThomas Hellstrom  *
14ba4e7d97SThomas Hellstrom  * The above copyright notice and this permission notice (including the
15ba4e7d97SThomas Hellstrom  * next paragraph) shall be included in all copies or substantial portions
16ba4e7d97SThomas Hellstrom  * of the Software.
17ba4e7d97SThomas Hellstrom  *
18ba4e7d97SThomas Hellstrom  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19ba4e7d97SThomas Hellstrom  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20ba4e7d97SThomas Hellstrom  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21ba4e7d97SThomas Hellstrom  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22ba4e7d97SThomas Hellstrom  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23ba4e7d97SThomas Hellstrom  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24ba4e7d97SThomas Hellstrom  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25ba4e7d97SThomas Hellstrom  *
26ba4e7d97SThomas Hellstrom  **************************************************************************/
27ba4e7d97SThomas Hellstrom /*
28ba4e7d97SThomas Hellstrom  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29ba4e7d97SThomas Hellstrom  */
30ba4e7d97SThomas Hellstrom 
31ba4e7d97SThomas Hellstrom #include <linux/version.h>
32ba4e7d97SThomas Hellstrom #include <linux/vmalloc.h>
33ba4e7d97SThomas Hellstrom #include <linux/sched.h>
34ba4e7d97SThomas Hellstrom #include <linux/highmem.h>
35ba4e7d97SThomas Hellstrom #include <linux/pagemap.h>
36ba4e7d97SThomas Hellstrom #include <linux/file.h>
37ba4e7d97SThomas Hellstrom #include <linux/swap.h>
38ba4e7d97SThomas Hellstrom #include "ttm/ttm_module.h"
39ba4e7d97SThomas Hellstrom #include "ttm/ttm_bo_driver.h"
40ba4e7d97SThomas Hellstrom #include "ttm/ttm_placement.h"
41ba4e7d97SThomas Hellstrom 
42ba4e7d97SThomas Hellstrom static int ttm_tt_swapin(struct ttm_tt *ttm);
43ba4e7d97SThomas Hellstrom 
44ba4e7d97SThomas Hellstrom #if defined(CONFIG_X86)
45ba4e7d97SThomas Hellstrom static void ttm_tt_clflush_page(struct page *page)
46ba4e7d97SThomas Hellstrom {
47ba4e7d97SThomas Hellstrom 	uint8_t *page_virtual;
48ba4e7d97SThomas Hellstrom 	unsigned int i;
49ba4e7d97SThomas Hellstrom 
50ba4e7d97SThomas Hellstrom 	if (unlikely(page == NULL))
51ba4e7d97SThomas Hellstrom 		return;
52ba4e7d97SThomas Hellstrom 
53ba4e7d97SThomas Hellstrom 	page_virtual = kmap_atomic(page, KM_USER0);
54ba4e7d97SThomas Hellstrom 
55ba4e7d97SThomas Hellstrom 	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
56ba4e7d97SThomas Hellstrom 		clflush(page_virtual + i);
57ba4e7d97SThomas Hellstrom 
58ba4e7d97SThomas Hellstrom 	kunmap_atomic(page_virtual, KM_USER0);
59ba4e7d97SThomas Hellstrom }
60ba4e7d97SThomas Hellstrom 
61ba4e7d97SThomas Hellstrom static void ttm_tt_cache_flush_clflush(struct page *pages[],
62ba4e7d97SThomas Hellstrom 				       unsigned long num_pages)
63ba4e7d97SThomas Hellstrom {
64ba4e7d97SThomas Hellstrom 	unsigned long i;
65ba4e7d97SThomas Hellstrom 
66ba4e7d97SThomas Hellstrom 	mb();
67ba4e7d97SThomas Hellstrom 	for (i = 0; i < num_pages; ++i)
68ba4e7d97SThomas Hellstrom 		ttm_tt_clflush_page(*pages++);
69ba4e7d97SThomas Hellstrom 	mb();
70ba4e7d97SThomas Hellstrom }
71ba4e7d97SThomas Hellstrom #else
72ba4e7d97SThomas Hellstrom static void ttm_tt_ipi_handler(void *null)
73ba4e7d97SThomas Hellstrom {
74ba4e7d97SThomas Hellstrom 	;
75ba4e7d97SThomas Hellstrom }
76ba4e7d97SThomas Hellstrom #endif
77ba4e7d97SThomas Hellstrom 
78ba4e7d97SThomas Hellstrom void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
79ba4e7d97SThomas Hellstrom {
80ba4e7d97SThomas Hellstrom 
81ba4e7d97SThomas Hellstrom #if defined(CONFIG_X86)
82ba4e7d97SThomas Hellstrom 	if (cpu_has_clflush) {
83ba4e7d97SThomas Hellstrom 		ttm_tt_cache_flush_clflush(pages, num_pages);
84ba4e7d97SThomas Hellstrom 		return;
85ba4e7d97SThomas Hellstrom 	}
86ba4e7d97SThomas Hellstrom #else
87ba4e7d97SThomas Hellstrom 	if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
88ba4e7d97SThomas Hellstrom 		printk(KERN_ERR TTM_PFX
89ba4e7d97SThomas Hellstrom 		       "Timed out waiting for drm cache flush.\n");
90ba4e7d97SThomas Hellstrom #endif
91ba4e7d97SThomas Hellstrom }
92ba4e7d97SThomas Hellstrom 
93ba4e7d97SThomas Hellstrom /**
94ba4e7d97SThomas Hellstrom  * Allocates storage for pointers to the pages that back the ttm.
95ba4e7d97SThomas Hellstrom  *
96ba4e7d97SThomas Hellstrom  * Uses kmalloc if possible. Otherwise falls back to vmalloc.
97ba4e7d97SThomas Hellstrom  */
98ba4e7d97SThomas Hellstrom static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
99ba4e7d97SThomas Hellstrom {
100ba4e7d97SThomas Hellstrom 	unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
101ba4e7d97SThomas Hellstrom 	ttm->pages = NULL;
102ba4e7d97SThomas Hellstrom 
103ba4e7d97SThomas Hellstrom 	if (size <= PAGE_SIZE)
104ba4e7d97SThomas Hellstrom 		ttm->pages = kzalloc(size, GFP_KERNEL);
105ba4e7d97SThomas Hellstrom 
106ba4e7d97SThomas Hellstrom 	if (!ttm->pages) {
107ba4e7d97SThomas Hellstrom 		ttm->pages = vmalloc_user(size);
108ba4e7d97SThomas Hellstrom 		if (ttm->pages)
109ba4e7d97SThomas Hellstrom 			ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
110ba4e7d97SThomas Hellstrom 	}
111ba4e7d97SThomas Hellstrom }
112ba4e7d97SThomas Hellstrom 
113ba4e7d97SThomas Hellstrom static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
114ba4e7d97SThomas Hellstrom {
115ba4e7d97SThomas Hellstrom 	if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
116ba4e7d97SThomas Hellstrom 		vfree(ttm->pages);
117ba4e7d97SThomas Hellstrom 		ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
118ba4e7d97SThomas Hellstrom 	} else {
119ba4e7d97SThomas Hellstrom 		kfree(ttm->pages);
120ba4e7d97SThomas Hellstrom 	}
121ba4e7d97SThomas Hellstrom 	ttm->pages = NULL;
122ba4e7d97SThomas Hellstrom }
123ba4e7d97SThomas Hellstrom 
124ba4e7d97SThomas Hellstrom static struct page *ttm_tt_alloc_page(unsigned page_flags)
125ba4e7d97SThomas Hellstrom {
126ba4e7d97SThomas Hellstrom 	if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
127ba4e7d97SThomas Hellstrom 		return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
128ba4e7d97SThomas Hellstrom 
129ba4e7d97SThomas Hellstrom 	return alloc_page(GFP_HIGHUSER);
130ba4e7d97SThomas Hellstrom }
131ba4e7d97SThomas Hellstrom 
132ba4e7d97SThomas Hellstrom static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
133ba4e7d97SThomas Hellstrom {
134ba4e7d97SThomas Hellstrom 	int write;
135ba4e7d97SThomas Hellstrom 	int dirty;
136ba4e7d97SThomas Hellstrom 	struct page *page;
137ba4e7d97SThomas Hellstrom 	int i;
138ba4e7d97SThomas Hellstrom 	struct ttm_backend *be = ttm->be;
139ba4e7d97SThomas Hellstrom 
140ba4e7d97SThomas Hellstrom 	BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
141ba4e7d97SThomas Hellstrom 	write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
142ba4e7d97SThomas Hellstrom 	dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
143ba4e7d97SThomas Hellstrom 
144ba4e7d97SThomas Hellstrom 	if (be)
145ba4e7d97SThomas Hellstrom 		be->func->clear(be);
146ba4e7d97SThomas Hellstrom 
147ba4e7d97SThomas Hellstrom 	for (i = 0; i < ttm->num_pages; ++i) {
148ba4e7d97SThomas Hellstrom 		page = ttm->pages[i];
149ba4e7d97SThomas Hellstrom 		if (page == NULL)
150ba4e7d97SThomas Hellstrom 			continue;
151ba4e7d97SThomas Hellstrom 
152ba4e7d97SThomas Hellstrom 		if (page == ttm->dummy_read_page) {
153ba4e7d97SThomas Hellstrom 			BUG_ON(write);
154ba4e7d97SThomas Hellstrom 			continue;
155ba4e7d97SThomas Hellstrom 		}
156ba4e7d97SThomas Hellstrom 
157ba4e7d97SThomas Hellstrom 		if (write && dirty && !PageReserved(page))
158ba4e7d97SThomas Hellstrom 			set_page_dirty_lock(page);
159ba4e7d97SThomas Hellstrom 
160ba4e7d97SThomas Hellstrom 		ttm->pages[i] = NULL;
161ba4e7d97SThomas Hellstrom 		ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
162ba4e7d97SThomas Hellstrom 		put_page(page);
163ba4e7d97SThomas Hellstrom 	}
164ba4e7d97SThomas Hellstrom 	ttm->state = tt_unpopulated;
165ba4e7d97SThomas Hellstrom 	ttm->first_himem_page = ttm->num_pages;
166ba4e7d97SThomas Hellstrom 	ttm->last_lomem_page = -1;
167ba4e7d97SThomas Hellstrom }
168ba4e7d97SThomas Hellstrom 
169ba4e7d97SThomas Hellstrom static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
170ba4e7d97SThomas Hellstrom {
171ba4e7d97SThomas Hellstrom 	struct page *p;
172ba4e7d97SThomas Hellstrom 	struct ttm_bo_device *bdev = ttm->bdev;
173ba4e7d97SThomas Hellstrom 	struct ttm_mem_global *mem_glob = bdev->mem_glob;
174ba4e7d97SThomas Hellstrom 	int ret;
175ba4e7d97SThomas Hellstrom 
176ba4e7d97SThomas Hellstrom 	while (NULL == (p = ttm->pages[index])) {
177ba4e7d97SThomas Hellstrom 		p = ttm_tt_alloc_page(ttm->page_flags);
178ba4e7d97SThomas Hellstrom 
179ba4e7d97SThomas Hellstrom 		if (!p)
180ba4e7d97SThomas Hellstrom 			return NULL;
181ba4e7d97SThomas Hellstrom 
182ba4e7d97SThomas Hellstrom 		if (PageHighMem(p)) {
183ba4e7d97SThomas Hellstrom 			ret =
184ba4e7d97SThomas Hellstrom 			    ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
185ba4e7d97SThomas Hellstrom 						 false, false, true);
186ba4e7d97SThomas Hellstrom 			if (unlikely(ret != 0))
187ba4e7d97SThomas Hellstrom 				goto out_err;
188ba4e7d97SThomas Hellstrom 			ttm->pages[--ttm->first_himem_page] = p;
189ba4e7d97SThomas Hellstrom 		} else {
190ba4e7d97SThomas Hellstrom 			ret =
191ba4e7d97SThomas Hellstrom 			    ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
192ba4e7d97SThomas Hellstrom 						 false, false, false);
193ba4e7d97SThomas Hellstrom 			if (unlikely(ret != 0))
194ba4e7d97SThomas Hellstrom 				goto out_err;
195ba4e7d97SThomas Hellstrom 			ttm->pages[++ttm->last_lomem_page] = p;
196ba4e7d97SThomas Hellstrom 		}
197ba4e7d97SThomas Hellstrom 	}
198ba4e7d97SThomas Hellstrom 	return p;
199ba4e7d97SThomas Hellstrom out_err:
200ba4e7d97SThomas Hellstrom 	put_page(p);
201ba4e7d97SThomas Hellstrom 	return NULL;
202ba4e7d97SThomas Hellstrom }
203ba4e7d97SThomas Hellstrom 
204ba4e7d97SThomas Hellstrom struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
205ba4e7d97SThomas Hellstrom {
206ba4e7d97SThomas Hellstrom 	int ret;
207ba4e7d97SThomas Hellstrom 
208ba4e7d97SThomas Hellstrom 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
209ba4e7d97SThomas Hellstrom 		ret = ttm_tt_swapin(ttm);
210ba4e7d97SThomas Hellstrom 		if (unlikely(ret != 0))
211ba4e7d97SThomas Hellstrom 			return NULL;
212ba4e7d97SThomas Hellstrom 	}
213ba4e7d97SThomas Hellstrom 	return __ttm_tt_get_page(ttm, index);
214ba4e7d97SThomas Hellstrom }
215ba4e7d97SThomas Hellstrom 
216ba4e7d97SThomas Hellstrom int ttm_tt_populate(struct ttm_tt *ttm)
217ba4e7d97SThomas Hellstrom {
218ba4e7d97SThomas Hellstrom 	struct page *page;
219ba4e7d97SThomas Hellstrom 	unsigned long i;
220ba4e7d97SThomas Hellstrom 	struct ttm_backend *be;
221ba4e7d97SThomas Hellstrom 	int ret;
222ba4e7d97SThomas Hellstrom 
223ba4e7d97SThomas Hellstrom 	if (ttm->state != tt_unpopulated)
224ba4e7d97SThomas Hellstrom 		return 0;
225ba4e7d97SThomas Hellstrom 
226ba4e7d97SThomas Hellstrom 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
227ba4e7d97SThomas Hellstrom 		ret = ttm_tt_swapin(ttm);
228ba4e7d97SThomas Hellstrom 		if (unlikely(ret != 0))
229ba4e7d97SThomas Hellstrom 			return ret;
230ba4e7d97SThomas Hellstrom 	}
231ba4e7d97SThomas Hellstrom 
232ba4e7d97SThomas Hellstrom 	be = ttm->be;
233ba4e7d97SThomas Hellstrom 
234ba4e7d97SThomas Hellstrom 	for (i = 0; i < ttm->num_pages; ++i) {
235ba4e7d97SThomas Hellstrom 		page = __ttm_tt_get_page(ttm, i);
236ba4e7d97SThomas Hellstrom 		if (!page)
237ba4e7d97SThomas Hellstrom 			return -ENOMEM;
238ba4e7d97SThomas Hellstrom 	}
239ba4e7d97SThomas Hellstrom 
240ba4e7d97SThomas Hellstrom 	be->func->populate(be, ttm->num_pages, ttm->pages,
241ba4e7d97SThomas Hellstrom 			   ttm->dummy_read_page);
242ba4e7d97SThomas Hellstrom 	ttm->state = tt_unbound;
243ba4e7d97SThomas Hellstrom 	return 0;
244ba4e7d97SThomas Hellstrom }
245ba4e7d97SThomas Hellstrom 
246ba4e7d97SThomas Hellstrom #ifdef CONFIG_X86
247ba4e7d97SThomas Hellstrom static inline int ttm_tt_set_page_caching(struct page *p,
248ba4e7d97SThomas Hellstrom 					  enum ttm_caching_state c_state)
249ba4e7d97SThomas Hellstrom {
250ba4e7d97SThomas Hellstrom 	if (PageHighMem(p))
251ba4e7d97SThomas Hellstrom 		return 0;
252ba4e7d97SThomas Hellstrom 
253ba4e7d97SThomas Hellstrom 	switch (c_state) {
254ba4e7d97SThomas Hellstrom 	case tt_cached:
255ba4e7d97SThomas Hellstrom 		return set_pages_wb(p, 1);
256ba4e7d97SThomas Hellstrom 	case tt_wc:
257ba4e7d97SThomas Hellstrom 	    return set_memory_wc((unsigned long) page_address(p), 1);
258ba4e7d97SThomas Hellstrom 	default:
259ba4e7d97SThomas Hellstrom 		return set_pages_uc(p, 1);
260ba4e7d97SThomas Hellstrom 	}
261ba4e7d97SThomas Hellstrom }
262ba4e7d97SThomas Hellstrom #else /* CONFIG_X86 */
263ba4e7d97SThomas Hellstrom static inline int ttm_tt_set_page_caching(struct page *p,
264ba4e7d97SThomas Hellstrom 					  enum ttm_caching_state c_state)
265ba4e7d97SThomas Hellstrom {
266ba4e7d97SThomas Hellstrom 	return 0;
267ba4e7d97SThomas Hellstrom }
268ba4e7d97SThomas Hellstrom #endif /* CONFIG_X86 */
269ba4e7d97SThomas Hellstrom 
270ba4e7d97SThomas Hellstrom /*
271ba4e7d97SThomas Hellstrom  * Change caching policy for the linear kernel map
272ba4e7d97SThomas Hellstrom  * for range of pages in a ttm.
273ba4e7d97SThomas Hellstrom  */
274ba4e7d97SThomas Hellstrom 
275ba4e7d97SThomas Hellstrom static int ttm_tt_set_caching(struct ttm_tt *ttm,
276ba4e7d97SThomas Hellstrom 			      enum ttm_caching_state c_state)
277ba4e7d97SThomas Hellstrom {
278ba4e7d97SThomas Hellstrom 	int i, j;
279ba4e7d97SThomas Hellstrom 	struct page *cur_page;
280ba4e7d97SThomas Hellstrom 	int ret;
281ba4e7d97SThomas Hellstrom 
282ba4e7d97SThomas Hellstrom 	if (ttm->caching_state == c_state)
283ba4e7d97SThomas Hellstrom 		return 0;
284ba4e7d97SThomas Hellstrom 
285ba4e7d97SThomas Hellstrom 	if (c_state != tt_cached) {
286ba4e7d97SThomas Hellstrom 		ret = ttm_tt_populate(ttm);
287ba4e7d97SThomas Hellstrom 		if (unlikely(ret != 0))
288ba4e7d97SThomas Hellstrom 			return ret;
289ba4e7d97SThomas Hellstrom 	}
290ba4e7d97SThomas Hellstrom 
291ba4e7d97SThomas Hellstrom 	if (ttm->caching_state == tt_cached)
292ba4e7d97SThomas Hellstrom 		ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
293ba4e7d97SThomas Hellstrom 
294ba4e7d97SThomas Hellstrom 	for (i = 0; i < ttm->num_pages; ++i) {
295ba4e7d97SThomas Hellstrom 		cur_page = ttm->pages[i];
296ba4e7d97SThomas Hellstrom 		if (likely(cur_page != NULL)) {
297ba4e7d97SThomas Hellstrom 			ret = ttm_tt_set_page_caching(cur_page, c_state);
298ba4e7d97SThomas Hellstrom 			if (unlikely(ret != 0))
299ba4e7d97SThomas Hellstrom 				goto out_err;
300ba4e7d97SThomas Hellstrom 		}
301ba4e7d97SThomas Hellstrom 	}
302ba4e7d97SThomas Hellstrom 
303ba4e7d97SThomas Hellstrom 	ttm->caching_state = c_state;
304ba4e7d97SThomas Hellstrom 
305ba4e7d97SThomas Hellstrom 	return 0;
306ba4e7d97SThomas Hellstrom 
307ba4e7d97SThomas Hellstrom out_err:
308ba4e7d97SThomas Hellstrom 	for (j = 0; j < i; ++j) {
309ba4e7d97SThomas Hellstrom 		cur_page = ttm->pages[j];
310ba4e7d97SThomas Hellstrom 		if (likely(cur_page != NULL)) {
311ba4e7d97SThomas Hellstrom 			(void)ttm_tt_set_page_caching(cur_page,
312ba4e7d97SThomas Hellstrom 						      ttm->caching_state);
313ba4e7d97SThomas Hellstrom 		}
314ba4e7d97SThomas Hellstrom 	}
315ba4e7d97SThomas Hellstrom 
316ba4e7d97SThomas Hellstrom 	return ret;
317ba4e7d97SThomas Hellstrom }
318ba4e7d97SThomas Hellstrom 
319ba4e7d97SThomas Hellstrom int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
320ba4e7d97SThomas Hellstrom {
321ba4e7d97SThomas Hellstrom 	enum ttm_caching_state state;
322ba4e7d97SThomas Hellstrom 
323ba4e7d97SThomas Hellstrom 	if (placement & TTM_PL_FLAG_WC)
324ba4e7d97SThomas Hellstrom 		state = tt_wc;
325ba4e7d97SThomas Hellstrom 	else if (placement & TTM_PL_FLAG_UNCACHED)
326ba4e7d97SThomas Hellstrom 		state = tt_uncached;
327ba4e7d97SThomas Hellstrom 	else
328ba4e7d97SThomas Hellstrom 		state = tt_cached;
329ba4e7d97SThomas Hellstrom 
330ba4e7d97SThomas Hellstrom 	return ttm_tt_set_caching(ttm, state);
331ba4e7d97SThomas Hellstrom }
332ba4e7d97SThomas Hellstrom 
333ba4e7d97SThomas Hellstrom static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
334ba4e7d97SThomas Hellstrom {
335ba4e7d97SThomas Hellstrom 	int i;
336ba4e7d97SThomas Hellstrom 	struct page *cur_page;
337ba4e7d97SThomas Hellstrom 	struct ttm_backend *be = ttm->be;
338ba4e7d97SThomas Hellstrom 
339ba4e7d97SThomas Hellstrom 	if (be)
340ba4e7d97SThomas Hellstrom 		be->func->clear(be);
341ba4e7d97SThomas Hellstrom 	(void)ttm_tt_set_caching(ttm, tt_cached);
342ba4e7d97SThomas Hellstrom 	for (i = 0; i < ttm->num_pages; ++i) {
343ba4e7d97SThomas Hellstrom 		cur_page = ttm->pages[i];
344ba4e7d97SThomas Hellstrom 		ttm->pages[i] = NULL;
345ba4e7d97SThomas Hellstrom 		if (cur_page) {
346ba4e7d97SThomas Hellstrom 			if (page_count(cur_page) != 1)
347ba4e7d97SThomas Hellstrom 				printk(KERN_ERR TTM_PFX
348ba4e7d97SThomas Hellstrom 				       "Erroneous page count. "
349ba4e7d97SThomas Hellstrom 				       "Leaking pages.\n");
350ba4e7d97SThomas Hellstrom 			ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
351ba4e7d97SThomas Hellstrom 					    PageHighMem(cur_page));
352ba4e7d97SThomas Hellstrom 			__free_page(cur_page);
353ba4e7d97SThomas Hellstrom 		}
354ba4e7d97SThomas Hellstrom 	}
355ba4e7d97SThomas Hellstrom 	ttm->state = tt_unpopulated;
356ba4e7d97SThomas Hellstrom 	ttm->first_himem_page = ttm->num_pages;
357ba4e7d97SThomas Hellstrom 	ttm->last_lomem_page = -1;
358ba4e7d97SThomas Hellstrom }
359ba4e7d97SThomas Hellstrom 
360ba4e7d97SThomas Hellstrom void ttm_tt_destroy(struct ttm_tt *ttm)
361ba4e7d97SThomas Hellstrom {
362ba4e7d97SThomas Hellstrom 	struct ttm_backend *be;
363ba4e7d97SThomas Hellstrom 
364ba4e7d97SThomas Hellstrom 	if (unlikely(ttm == NULL))
365ba4e7d97SThomas Hellstrom 		return;
366ba4e7d97SThomas Hellstrom 
367ba4e7d97SThomas Hellstrom 	be = ttm->be;
368ba4e7d97SThomas Hellstrom 	if (likely(be != NULL)) {
369ba4e7d97SThomas Hellstrom 		be->func->destroy(be);
370ba4e7d97SThomas Hellstrom 		ttm->be = NULL;
371ba4e7d97SThomas Hellstrom 	}
372ba4e7d97SThomas Hellstrom 
373ba4e7d97SThomas Hellstrom 	if (likely(ttm->pages != NULL)) {
374ba4e7d97SThomas Hellstrom 		if (ttm->page_flags & TTM_PAGE_FLAG_USER)
375ba4e7d97SThomas Hellstrom 			ttm_tt_free_user_pages(ttm);
376ba4e7d97SThomas Hellstrom 		else
377ba4e7d97SThomas Hellstrom 			ttm_tt_free_alloced_pages(ttm);
378ba4e7d97SThomas Hellstrom 
379ba4e7d97SThomas Hellstrom 		ttm_tt_free_page_directory(ttm);
380ba4e7d97SThomas Hellstrom 	}
381ba4e7d97SThomas Hellstrom 
382ba4e7d97SThomas Hellstrom 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
383ba4e7d97SThomas Hellstrom 	    ttm->swap_storage)
384ba4e7d97SThomas Hellstrom 		fput(ttm->swap_storage);
385ba4e7d97SThomas Hellstrom 
386ba4e7d97SThomas Hellstrom 	kfree(ttm);
387ba4e7d97SThomas Hellstrom }
388ba4e7d97SThomas Hellstrom 
389ba4e7d97SThomas Hellstrom int ttm_tt_set_user(struct ttm_tt *ttm,
390ba4e7d97SThomas Hellstrom 		    struct task_struct *tsk,
391ba4e7d97SThomas Hellstrom 		    unsigned long start, unsigned long num_pages)
392ba4e7d97SThomas Hellstrom {
393ba4e7d97SThomas Hellstrom 	struct mm_struct *mm = tsk->mm;
394ba4e7d97SThomas Hellstrom 	int ret;
395ba4e7d97SThomas Hellstrom 	int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
396ba4e7d97SThomas Hellstrom 	struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
397ba4e7d97SThomas Hellstrom 
398ba4e7d97SThomas Hellstrom 	BUG_ON(num_pages != ttm->num_pages);
399ba4e7d97SThomas Hellstrom 	BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
400ba4e7d97SThomas Hellstrom 
401ba4e7d97SThomas Hellstrom 	/**
402ba4e7d97SThomas Hellstrom 	 * Account user pages as lowmem pages for now.
403ba4e7d97SThomas Hellstrom 	 */
404ba4e7d97SThomas Hellstrom 
405ba4e7d97SThomas Hellstrom 	ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
406ba4e7d97SThomas Hellstrom 				   false, false, false);
407ba4e7d97SThomas Hellstrom 	if (unlikely(ret != 0))
408ba4e7d97SThomas Hellstrom 		return ret;
409ba4e7d97SThomas Hellstrom 
410ba4e7d97SThomas Hellstrom 	down_read(&mm->mmap_sem);
411ba4e7d97SThomas Hellstrom 	ret = get_user_pages(tsk, mm, start, num_pages,
412ba4e7d97SThomas Hellstrom 			     write, 0, ttm->pages, NULL);
413ba4e7d97SThomas Hellstrom 	up_read(&mm->mmap_sem);
414ba4e7d97SThomas Hellstrom 
415ba4e7d97SThomas Hellstrom 	if (ret != num_pages && write) {
416ba4e7d97SThomas Hellstrom 		ttm_tt_free_user_pages(ttm);
417ba4e7d97SThomas Hellstrom 		ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
418ba4e7d97SThomas Hellstrom 		return -ENOMEM;
419ba4e7d97SThomas Hellstrom 	}
420ba4e7d97SThomas Hellstrom 
421ba4e7d97SThomas Hellstrom 	ttm->tsk = tsk;
422ba4e7d97SThomas Hellstrom 	ttm->start = start;
423ba4e7d97SThomas Hellstrom 	ttm->state = tt_unbound;
424ba4e7d97SThomas Hellstrom 
425ba4e7d97SThomas Hellstrom 	return 0;
426ba4e7d97SThomas Hellstrom }
427ba4e7d97SThomas Hellstrom 
428ba4e7d97SThomas Hellstrom struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
429ba4e7d97SThomas Hellstrom 			     uint32_t page_flags, struct page *dummy_read_page)
430ba4e7d97SThomas Hellstrom {
431ba4e7d97SThomas Hellstrom 	struct ttm_bo_driver *bo_driver = bdev->driver;
432ba4e7d97SThomas Hellstrom 	struct ttm_tt *ttm;
433ba4e7d97SThomas Hellstrom 
434ba4e7d97SThomas Hellstrom 	if (!bo_driver)
435ba4e7d97SThomas Hellstrom 		return NULL;
436ba4e7d97SThomas Hellstrom 
437ba4e7d97SThomas Hellstrom 	ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
438ba4e7d97SThomas Hellstrom 	if (!ttm)
439ba4e7d97SThomas Hellstrom 		return NULL;
440ba4e7d97SThomas Hellstrom 
441ba4e7d97SThomas Hellstrom 	ttm->bdev = bdev;
442ba4e7d97SThomas Hellstrom 
443ba4e7d97SThomas Hellstrom 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
444ba4e7d97SThomas Hellstrom 	ttm->first_himem_page = ttm->num_pages;
445ba4e7d97SThomas Hellstrom 	ttm->last_lomem_page = -1;
446ba4e7d97SThomas Hellstrom 	ttm->caching_state = tt_cached;
447ba4e7d97SThomas Hellstrom 	ttm->page_flags = page_flags;
448ba4e7d97SThomas Hellstrom 
449ba4e7d97SThomas Hellstrom 	ttm->dummy_read_page = dummy_read_page;
450ba4e7d97SThomas Hellstrom 
451ba4e7d97SThomas Hellstrom 	ttm_tt_alloc_page_directory(ttm);
452ba4e7d97SThomas Hellstrom 	if (!ttm->pages) {
453ba4e7d97SThomas Hellstrom 		ttm_tt_destroy(ttm);
454ba4e7d97SThomas Hellstrom 		printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
455ba4e7d97SThomas Hellstrom 		return NULL;
456ba4e7d97SThomas Hellstrom 	}
457ba4e7d97SThomas Hellstrom 	ttm->be = bo_driver->create_ttm_backend_entry(bdev);
458ba4e7d97SThomas Hellstrom 	if (!ttm->be) {
459ba4e7d97SThomas Hellstrom 		ttm_tt_destroy(ttm);
460ba4e7d97SThomas Hellstrom 		printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
461ba4e7d97SThomas Hellstrom 		return NULL;
462ba4e7d97SThomas Hellstrom 	}
463ba4e7d97SThomas Hellstrom 	ttm->state = tt_unpopulated;
464ba4e7d97SThomas Hellstrom 	return ttm;
465ba4e7d97SThomas Hellstrom }
466ba4e7d97SThomas Hellstrom 
467ba4e7d97SThomas Hellstrom void ttm_tt_unbind(struct ttm_tt *ttm)
468ba4e7d97SThomas Hellstrom {
469ba4e7d97SThomas Hellstrom 	int ret;
470ba4e7d97SThomas Hellstrom 	struct ttm_backend *be = ttm->be;
471ba4e7d97SThomas Hellstrom 
472ba4e7d97SThomas Hellstrom 	if (ttm->state == tt_bound) {
473ba4e7d97SThomas Hellstrom 		ret = be->func->unbind(be);
474ba4e7d97SThomas Hellstrom 		BUG_ON(ret);
475ba4e7d97SThomas Hellstrom 		ttm->state = tt_unbound;
476ba4e7d97SThomas Hellstrom 	}
477ba4e7d97SThomas Hellstrom }
478ba4e7d97SThomas Hellstrom 
479ba4e7d97SThomas Hellstrom int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
480ba4e7d97SThomas Hellstrom {
481ba4e7d97SThomas Hellstrom 	int ret = 0;
482ba4e7d97SThomas Hellstrom 	struct ttm_backend *be;
483ba4e7d97SThomas Hellstrom 
484ba4e7d97SThomas Hellstrom 	if (!ttm)
485ba4e7d97SThomas Hellstrom 		return -EINVAL;
486ba4e7d97SThomas Hellstrom 
487ba4e7d97SThomas Hellstrom 	if (ttm->state == tt_bound)
488ba4e7d97SThomas Hellstrom 		return 0;
489ba4e7d97SThomas Hellstrom 
490ba4e7d97SThomas Hellstrom 	be = ttm->be;
491ba4e7d97SThomas Hellstrom 
492ba4e7d97SThomas Hellstrom 	ret = ttm_tt_populate(ttm);
493ba4e7d97SThomas Hellstrom 	if (ret)
494ba4e7d97SThomas Hellstrom 		return ret;
495ba4e7d97SThomas Hellstrom 
496ba4e7d97SThomas Hellstrom 	ret = be->func->bind(be, bo_mem);
497ba4e7d97SThomas Hellstrom 	if (ret) {
498ba4e7d97SThomas Hellstrom 		printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
499ba4e7d97SThomas Hellstrom 		return ret;
500ba4e7d97SThomas Hellstrom 	}
501ba4e7d97SThomas Hellstrom 
502ba4e7d97SThomas Hellstrom 	ttm->state = tt_bound;
503ba4e7d97SThomas Hellstrom 
504ba4e7d97SThomas Hellstrom 	if (ttm->page_flags & TTM_PAGE_FLAG_USER)
505ba4e7d97SThomas Hellstrom 		ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
506ba4e7d97SThomas Hellstrom 	return 0;
507ba4e7d97SThomas Hellstrom }
508ba4e7d97SThomas Hellstrom EXPORT_SYMBOL(ttm_tt_bind);
509ba4e7d97SThomas Hellstrom 
510ba4e7d97SThomas Hellstrom static int ttm_tt_swapin(struct ttm_tt *ttm)
511ba4e7d97SThomas Hellstrom {
512ba4e7d97SThomas Hellstrom 	struct address_space *swap_space;
513ba4e7d97SThomas Hellstrom 	struct file *swap_storage;
514ba4e7d97SThomas Hellstrom 	struct page *from_page;
515ba4e7d97SThomas Hellstrom 	struct page *to_page;
516ba4e7d97SThomas Hellstrom 	void *from_virtual;
517ba4e7d97SThomas Hellstrom 	void *to_virtual;
518ba4e7d97SThomas Hellstrom 	int i;
519ba4e7d97SThomas Hellstrom 	int ret;
520ba4e7d97SThomas Hellstrom 
521ba4e7d97SThomas Hellstrom 	if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
522ba4e7d97SThomas Hellstrom 		ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
523ba4e7d97SThomas Hellstrom 				      ttm->num_pages);
524ba4e7d97SThomas Hellstrom 		if (unlikely(ret != 0))
525ba4e7d97SThomas Hellstrom 			return ret;
526ba4e7d97SThomas Hellstrom 
527ba4e7d97SThomas Hellstrom 		ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
528ba4e7d97SThomas Hellstrom 		return 0;
529ba4e7d97SThomas Hellstrom 	}
530ba4e7d97SThomas Hellstrom 
531ba4e7d97SThomas Hellstrom 	swap_storage = ttm->swap_storage;
532ba4e7d97SThomas Hellstrom 	BUG_ON(swap_storage == NULL);
533ba4e7d97SThomas Hellstrom 
534ba4e7d97SThomas Hellstrom 	swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
535ba4e7d97SThomas Hellstrom 
536ba4e7d97SThomas Hellstrom 	for (i = 0; i < ttm->num_pages; ++i) {
537ba4e7d97SThomas Hellstrom 		from_page = read_mapping_page(swap_space, i, NULL);
538ba4e7d97SThomas Hellstrom 		if (IS_ERR(from_page))
539ba4e7d97SThomas Hellstrom 			goto out_err;
540ba4e7d97SThomas Hellstrom 		to_page = __ttm_tt_get_page(ttm, i);
541ba4e7d97SThomas Hellstrom 		if (unlikely(to_page == NULL))
542ba4e7d97SThomas Hellstrom 			goto out_err;
543ba4e7d97SThomas Hellstrom 
544ba4e7d97SThomas Hellstrom 		preempt_disable();
545ba4e7d97SThomas Hellstrom 		from_virtual = kmap_atomic(from_page, KM_USER0);
546ba4e7d97SThomas Hellstrom 		to_virtual = kmap_atomic(to_page, KM_USER1);
547ba4e7d97SThomas Hellstrom 		memcpy(to_virtual, from_virtual, PAGE_SIZE);
548ba4e7d97SThomas Hellstrom 		kunmap_atomic(to_virtual, KM_USER1);
549ba4e7d97SThomas Hellstrom 		kunmap_atomic(from_virtual, KM_USER0);
550ba4e7d97SThomas Hellstrom 		preempt_enable();
551ba4e7d97SThomas Hellstrom 		page_cache_release(from_page);
552ba4e7d97SThomas Hellstrom 	}
553ba4e7d97SThomas Hellstrom 
554ba4e7d97SThomas Hellstrom 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
555ba4e7d97SThomas Hellstrom 		fput(swap_storage);
556ba4e7d97SThomas Hellstrom 	ttm->swap_storage = NULL;
557ba4e7d97SThomas Hellstrom 	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
558ba4e7d97SThomas Hellstrom 
559ba4e7d97SThomas Hellstrom 	return 0;
560ba4e7d97SThomas Hellstrom out_err:
561ba4e7d97SThomas Hellstrom 	ttm_tt_free_alloced_pages(ttm);
562ba4e7d97SThomas Hellstrom 	return -ENOMEM;
563ba4e7d97SThomas Hellstrom }
564ba4e7d97SThomas Hellstrom 
565ba4e7d97SThomas Hellstrom int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
566ba4e7d97SThomas Hellstrom {
567ba4e7d97SThomas Hellstrom 	struct address_space *swap_space;
568ba4e7d97SThomas Hellstrom 	struct file *swap_storage;
569ba4e7d97SThomas Hellstrom 	struct page *from_page;
570ba4e7d97SThomas Hellstrom 	struct page *to_page;
571ba4e7d97SThomas Hellstrom 	void *from_virtual;
572ba4e7d97SThomas Hellstrom 	void *to_virtual;
573ba4e7d97SThomas Hellstrom 	int i;
574ba4e7d97SThomas Hellstrom 
575ba4e7d97SThomas Hellstrom 	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
576ba4e7d97SThomas Hellstrom 	BUG_ON(ttm->caching_state != tt_cached);
577ba4e7d97SThomas Hellstrom 
578ba4e7d97SThomas Hellstrom 	/*
579ba4e7d97SThomas Hellstrom 	 * For user buffers, just unpin the pages, as there should be
580ba4e7d97SThomas Hellstrom 	 * vma references.
581ba4e7d97SThomas Hellstrom 	 */
582ba4e7d97SThomas Hellstrom 
583ba4e7d97SThomas Hellstrom 	if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
584ba4e7d97SThomas Hellstrom 		ttm_tt_free_user_pages(ttm);
585ba4e7d97SThomas Hellstrom 		ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
586ba4e7d97SThomas Hellstrom 		ttm->swap_storage = NULL;
587ba4e7d97SThomas Hellstrom 		return 0;
588ba4e7d97SThomas Hellstrom 	}
589ba4e7d97SThomas Hellstrom 
590ba4e7d97SThomas Hellstrom 	if (!persistant_swap_storage) {
591ba4e7d97SThomas Hellstrom 		swap_storage = shmem_file_setup("ttm swap",
592ba4e7d97SThomas Hellstrom 						ttm->num_pages << PAGE_SHIFT,
593ba4e7d97SThomas Hellstrom 						0);
594ba4e7d97SThomas Hellstrom 		if (unlikely(IS_ERR(swap_storage))) {
595ba4e7d97SThomas Hellstrom 			printk(KERN_ERR "Failed allocating swap storage.\n");
596ba4e7d97SThomas Hellstrom 			return -ENOMEM;
597ba4e7d97SThomas Hellstrom 		}
598ba4e7d97SThomas Hellstrom 	} else
599ba4e7d97SThomas Hellstrom 		swap_storage = persistant_swap_storage;
600ba4e7d97SThomas Hellstrom 
601ba4e7d97SThomas Hellstrom 	swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
602ba4e7d97SThomas Hellstrom 
603ba4e7d97SThomas Hellstrom 	for (i = 0; i < ttm->num_pages; ++i) {
604ba4e7d97SThomas Hellstrom 		from_page = ttm->pages[i];
605ba4e7d97SThomas Hellstrom 		if (unlikely(from_page == NULL))
606ba4e7d97SThomas Hellstrom 			continue;
607ba4e7d97SThomas Hellstrom 		to_page = read_mapping_page(swap_space, i, NULL);
608ba4e7d97SThomas Hellstrom 		if (unlikely(to_page == NULL))
609ba4e7d97SThomas Hellstrom 			goto out_err;
610ba4e7d97SThomas Hellstrom 
611ba4e7d97SThomas Hellstrom 		preempt_disable();
612ba4e7d97SThomas Hellstrom 		from_virtual = kmap_atomic(from_page, KM_USER0);
613ba4e7d97SThomas Hellstrom 		to_virtual = kmap_atomic(to_page, KM_USER1);
614ba4e7d97SThomas Hellstrom 		memcpy(to_virtual, from_virtual, PAGE_SIZE);
615ba4e7d97SThomas Hellstrom 		kunmap_atomic(to_virtual, KM_USER1);
616ba4e7d97SThomas Hellstrom 		kunmap_atomic(from_virtual, KM_USER0);
617ba4e7d97SThomas Hellstrom 		preempt_enable();
618ba4e7d97SThomas Hellstrom 		set_page_dirty(to_page);
619ba4e7d97SThomas Hellstrom 		mark_page_accessed(to_page);
620ba4e7d97SThomas Hellstrom 		page_cache_release(to_page);
621ba4e7d97SThomas Hellstrom 	}
622ba4e7d97SThomas Hellstrom 
623ba4e7d97SThomas Hellstrom 	ttm_tt_free_alloced_pages(ttm);
624ba4e7d97SThomas Hellstrom 	ttm->swap_storage = swap_storage;
625ba4e7d97SThomas Hellstrom 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
626ba4e7d97SThomas Hellstrom 	if (persistant_swap_storage)
627ba4e7d97SThomas Hellstrom 		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
628ba4e7d97SThomas Hellstrom 
629ba4e7d97SThomas Hellstrom 	return 0;
630ba4e7d97SThomas Hellstrom out_err:
631ba4e7d97SThomas Hellstrom 	if (!persistant_swap_storage)
632ba4e7d97SThomas Hellstrom 		fput(swap_storage);
633ba4e7d97SThomas Hellstrom 
634ba4e7d97SThomas Hellstrom 	return -ENOMEM;
635ba4e7d97SThomas Hellstrom }
636