1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10 
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 
14 #include "gt/intel_gt.h"
15 #include "i915_drv.h"
16 #include "i915_gem_object.h"
17 #include "i915_gem_region.h"
18 #include "i915_scatterlist.h"
19 
20 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
21 {
22 	struct address_space *mapping = obj->base.filp->f_mapping;
23 	struct scatterlist *sg;
24 	struct sg_table *st;
25 	dma_addr_t dma;
26 	void *vaddr;
27 	void *dst;
28 	int i;
29 
30 	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
31 		return -EINVAL;
32 
33 	/*
34 	 * Always aligning to the object size, allows a single allocation
35 	 * to handle all possible callers, and given typical object sizes,
36 	 * the alignment of the buddy allocation will naturally match.
37 	 */
38 	vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
39 				   roundup_pow_of_two(obj->base.size),
40 				   &dma, GFP_KERNEL);
41 	if (!vaddr)
42 		return -ENOMEM;
43 
44 	st = kmalloc(sizeof(*st), GFP_KERNEL);
45 	if (!st)
46 		goto err_pci;
47 
48 	if (sg_alloc_table(st, 1, GFP_KERNEL))
49 		goto err_st;
50 
51 	sg = st->sgl;
52 	sg->offset = 0;
53 	sg->length = obj->base.size;
54 
55 	sg_assign_page(sg, (struct page *)vaddr);
56 	sg_dma_address(sg) = dma;
57 	sg_dma_len(sg) = obj->base.size;
58 
59 	dst = vaddr;
60 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
61 		struct page *page;
62 		void *src;
63 
64 		page = shmem_read_mapping_page(mapping, i);
65 		if (IS_ERR(page))
66 			goto err_st;
67 
68 		src = kmap_atomic(page);
69 		memcpy(dst, src, PAGE_SIZE);
70 		drm_clflush_virt_range(dst, PAGE_SIZE);
71 		kunmap_atomic(src);
72 
73 		put_page(page);
74 		dst += PAGE_SIZE;
75 	}
76 
77 	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
78 
79 	__i915_gem_object_set_pages(obj, st, sg->length);
80 
81 	return 0;
82 
83 err_st:
84 	kfree(st);
85 err_pci:
86 	dma_free_coherent(&obj->base.dev->pdev->dev,
87 			  roundup_pow_of_two(obj->base.size),
88 			  vaddr, dma);
89 	return -ENOMEM;
90 }
91 
92 static void
93 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
94 			       struct sg_table *pages)
95 {
96 	dma_addr_t dma = sg_dma_address(pages->sgl);
97 	void *vaddr = sg_page(pages->sgl);
98 
99 	__i915_gem_object_release_shmem(obj, pages, false);
100 
101 	if (obj->mm.dirty) {
102 		struct address_space *mapping = obj->base.filp->f_mapping;
103 		void *src = vaddr;
104 		int i;
105 
106 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
107 			struct page *page;
108 			char *dst;
109 
110 			page = shmem_read_mapping_page(mapping, i);
111 			if (IS_ERR(page))
112 				continue;
113 
114 			dst = kmap_atomic(page);
115 			drm_clflush_virt_range(src, PAGE_SIZE);
116 			memcpy(dst, src, PAGE_SIZE);
117 			kunmap_atomic(dst);
118 
119 			set_page_dirty(page);
120 			if (obj->mm.madv == I915_MADV_WILLNEED)
121 				mark_page_accessed(page);
122 			put_page(page);
123 
124 			src += PAGE_SIZE;
125 		}
126 		obj->mm.dirty = false;
127 	}
128 
129 	sg_free_table(pages);
130 	kfree(pages);
131 
132 	dma_free_coherent(&obj->base.dev->pdev->dev,
133 			  roundup_pow_of_two(obj->base.size),
134 			  vaddr, dma);
135 }
136 
137 static void phys_release(struct drm_i915_gem_object *obj)
138 {
139 	fput(obj->base.filp);
140 }
141 
142 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
143 	.get_pages = i915_gem_object_get_pages_phys,
144 	.put_pages = i915_gem_object_put_pages_phys,
145 
146 	.release = phys_release,
147 };
148 
149 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
150 {
151 	struct sg_table *pages;
152 	int err;
153 
154 	if (align > obj->base.size)
155 		return -EINVAL;
156 
157 	if (obj->ops == &i915_gem_phys_ops)
158 		return 0;
159 
160 	if (obj->ops != &i915_gem_shmem_ops)
161 		return -EINVAL;
162 
163 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
164 	if (err)
165 		return err;
166 
167 	mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
168 
169 	if (obj->mm.madv != I915_MADV_WILLNEED) {
170 		err = -EFAULT;
171 		goto err_unlock;
172 	}
173 
174 	if (obj->mm.quirked) {
175 		err = -EFAULT;
176 		goto err_unlock;
177 	}
178 
179 	if (obj->mm.mapping) {
180 		err = -EBUSY;
181 		goto err_unlock;
182 	}
183 
184 	pages = __i915_gem_object_unset_pages(obj);
185 
186 	obj->ops = &i915_gem_phys_ops;
187 
188 	err = ____i915_gem_object_get_pages(obj);
189 	if (err)
190 		goto err_xfer;
191 
192 	/* Perma-pin (until release) the physical set of pages */
193 	__i915_gem_object_pin_pages(obj);
194 
195 	if (!IS_ERR_OR_NULL(pages))
196 		i915_gem_shmem_ops.put_pages(obj, pages);
197 
198 	i915_gem_object_release_memory_region(obj);
199 
200 	mutex_unlock(&obj->mm.lock);
201 	return 0;
202 
203 err_xfer:
204 	obj->ops = &i915_gem_shmem_ops;
205 	if (!IS_ERR_OR_NULL(pages)) {
206 		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
207 
208 		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
209 	}
210 err_unlock:
211 	mutex_unlock(&obj->mm.lock);
212 	return err;
213 }
214 
215 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
216 #include "selftests/i915_gem_phys.c"
217 #endif
218