1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10 
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
14 #include <drm/drm_pci.h>
15 
16 #include "gt/intel_gt.h"
17 #include "i915_drv.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_region.h"
20 #include "i915_scatterlist.h"
21 
22 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
23 {
24 	struct address_space *mapping = obj->base.filp->f_mapping;
25 	struct scatterlist *sg;
26 	struct sg_table *st;
27 	dma_addr_t dma;
28 	void *vaddr;
29 	void *dst;
30 	int i;
31 
32 	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
33 		return -EINVAL;
34 
35 	/*
36 	 * Always aligning to the object size, allows a single allocation
37 	 * to handle all possible callers, and given typical object sizes,
38 	 * the alignment of the buddy allocation will naturally match.
39 	 */
40 	vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
41 				   roundup_pow_of_two(obj->base.size),
42 				   &dma, GFP_KERNEL);
43 	if (!vaddr)
44 		return -ENOMEM;
45 
46 	st = kmalloc(sizeof(*st), GFP_KERNEL);
47 	if (!st)
48 		goto err_pci;
49 
50 	if (sg_alloc_table(st, 1, GFP_KERNEL))
51 		goto err_st;
52 
53 	sg = st->sgl;
54 	sg->offset = 0;
55 	sg->length = obj->base.size;
56 
57 	sg_assign_page(sg, (struct page *)vaddr);
58 	sg_dma_address(sg) = dma;
59 	sg_dma_len(sg) = obj->base.size;
60 
61 	dst = vaddr;
62 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
63 		struct page *page;
64 		void *src;
65 
66 		page = shmem_read_mapping_page(mapping, i);
67 		if (IS_ERR(page))
68 			goto err_st;
69 
70 		src = kmap_atomic(page);
71 		memcpy(dst, src, PAGE_SIZE);
72 		drm_clflush_virt_range(dst, PAGE_SIZE);
73 		kunmap_atomic(src);
74 
75 		put_page(page);
76 		dst += PAGE_SIZE;
77 	}
78 
79 	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
80 
81 	__i915_gem_object_set_pages(obj, st, sg->length);
82 
83 	return 0;
84 
85 err_st:
86 	kfree(st);
87 err_pci:
88 	dma_free_coherent(&obj->base.dev->pdev->dev,
89 			  roundup_pow_of_two(obj->base.size),
90 			  vaddr, dma);
91 	return -ENOMEM;
92 }
93 
94 static void
95 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
96 			       struct sg_table *pages)
97 {
98 	dma_addr_t dma = sg_dma_address(pages->sgl);
99 	void *vaddr = sg_page(pages->sgl);
100 
101 	__i915_gem_object_release_shmem(obj, pages, false);
102 
103 	if (obj->mm.dirty) {
104 		struct address_space *mapping = obj->base.filp->f_mapping;
105 		void *src = vaddr;
106 		int i;
107 
108 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
109 			struct page *page;
110 			char *dst;
111 
112 			page = shmem_read_mapping_page(mapping, i);
113 			if (IS_ERR(page))
114 				continue;
115 
116 			dst = kmap_atomic(page);
117 			drm_clflush_virt_range(src, PAGE_SIZE);
118 			memcpy(dst, src, PAGE_SIZE);
119 			kunmap_atomic(dst);
120 
121 			set_page_dirty(page);
122 			if (obj->mm.madv == I915_MADV_WILLNEED)
123 				mark_page_accessed(page);
124 			put_page(page);
125 
126 			src += PAGE_SIZE;
127 		}
128 		obj->mm.dirty = false;
129 	}
130 
131 	sg_free_table(pages);
132 	kfree(pages);
133 
134 	dma_free_coherent(&obj->base.dev->pdev->dev,
135 			  roundup_pow_of_two(obj->base.size),
136 			  vaddr, dma);
137 }
138 
139 static void phys_release(struct drm_i915_gem_object *obj)
140 {
141 	fput(obj->base.filp);
142 }
143 
144 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
145 	.get_pages = i915_gem_object_get_pages_phys,
146 	.put_pages = i915_gem_object_put_pages_phys,
147 
148 	.release = phys_release,
149 };
150 
151 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
152 {
153 	struct sg_table *pages;
154 	int err;
155 
156 	if (align > obj->base.size)
157 		return -EINVAL;
158 
159 	if (obj->ops == &i915_gem_phys_ops)
160 		return 0;
161 
162 	if (obj->ops != &i915_gem_shmem_ops)
163 		return -EINVAL;
164 
165 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
166 	if (err)
167 		return err;
168 
169 	mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
170 
171 	if (obj->mm.madv != I915_MADV_WILLNEED) {
172 		err = -EFAULT;
173 		goto err_unlock;
174 	}
175 
176 	if (obj->mm.quirked) {
177 		err = -EFAULT;
178 		goto err_unlock;
179 	}
180 
181 	if (obj->mm.mapping) {
182 		err = -EBUSY;
183 		goto err_unlock;
184 	}
185 
186 	pages = __i915_gem_object_unset_pages(obj);
187 
188 	obj->ops = &i915_gem_phys_ops;
189 
190 	err = ____i915_gem_object_get_pages(obj);
191 	if (err)
192 		goto err_xfer;
193 
194 	/* Perma-pin (until release) the physical set of pages */
195 	__i915_gem_object_pin_pages(obj);
196 
197 	if (!IS_ERR_OR_NULL(pages))
198 		i915_gem_shmem_ops.put_pages(obj, pages);
199 
200 	i915_gem_object_release_memory_region(obj);
201 
202 	mutex_unlock(&obj->mm.lock);
203 	return 0;
204 
205 err_xfer:
206 	obj->ops = &i915_gem_shmem_ops;
207 	if (!IS_ERR_OR_NULL(pages)) {
208 		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
209 
210 		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
211 	}
212 err_unlock:
213 	mutex_unlock(&obj->mm.lock);
214 	return err;
215 }
216 
217 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
218 #include "selftests/i915_gem_phys.c"
219 #endif
220