1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10 
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
14 #include <drm/drm_pci.h>
15 
16 #include "gt/intel_gt.h"
17 #include "i915_drv.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_region.h"
20 #include "i915_scatterlist.h"
21 
22 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
23 {
24 	struct address_space *mapping = obj->base.filp->f_mapping;
25 	struct drm_dma_handle *phys;
26 	struct sg_table *st;
27 	struct scatterlist *sg;
28 	char *vaddr;
29 	int i;
30 	int err;
31 
32 	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
33 		return -EINVAL;
34 
35 	/* Always aligning to the object size, allows a single allocation
36 	 * to handle all possible callers, and given typical object sizes,
37 	 * the alignment of the buddy allocation will naturally match.
38 	 */
39 	phys = drm_pci_alloc(obj->base.dev,
40 			     roundup_pow_of_two(obj->base.size),
41 			     roundup_pow_of_two(obj->base.size));
42 	if (!phys)
43 		return -ENOMEM;
44 
45 	vaddr = phys->vaddr;
46 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
47 		struct page *page;
48 		char *src;
49 
50 		page = shmem_read_mapping_page(mapping, i);
51 		if (IS_ERR(page)) {
52 			err = PTR_ERR(page);
53 			goto err_phys;
54 		}
55 
56 		src = kmap_atomic(page);
57 		memcpy(vaddr, src, PAGE_SIZE);
58 		drm_clflush_virt_range(vaddr, PAGE_SIZE);
59 		kunmap_atomic(src);
60 
61 		put_page(page);
62 		vaddr += PAGE_SIZE;
63 	}
64 
65 	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
66 
67 	st = kmalloc(sizeof(*st), GFP_KERNEL);
68 	if (!st) {
69 		err = -ENOMEM;
70 		goto err_phys;
71 	}
72 
73 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
74 		kfree(st);
75 		err = -ENOMEM;
76 		goto err_phys;
77 	}
78 
79 	sg = st->sgl;
80 	sg->offset = 0;
81 	sg->length = obj->base.size;
82 
83 	sg_dma_address(sg) = phys->busaddr;
84 	sg_dma_len(sg) = obj->base.size;
85 
86 	obj->phys_handle = phys;
87 
88 	__i915_gem_object_set_pages(obj, st, sg->length);
89 
90 	return 0;
91 
92 err_phys:
93 	drm_pci_free(obj->base.dev, phys);
94 
95 	return err;
96 }
97 
98 static void
99 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
100 			       struct sg_table *pages)
101 {
102 	__i915_gem_object_release_shmem(obj, pages, false);
103 
104 	if (obj->mm.dirty) {
105 		struct address_space *mapping = obj->base.filp->f_mapping;
106 		char *vaddr = obj->phys_handle->vaddr;
107 		int i;
108 
109 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
110 			struct page *page;
111 			char *dst;
112 
113 			page = shmem_read_mapping_page(mapping, i);
114 			if (IS_ERR(page))
115 				continue;
116 
117 			dst = kmap_atomic(page);
118 			drm_clflush_virt_range(vaddr, PAGE_SIZE);
119 			memcpy(dst, vaddr, PAGE_SIZE);
120 			kunmap_atomic(dst);
121 
122 			set_page_dirty(page);
123 			if (obj->mm.madv == I915_MADV_WILLNEED)
124 				mark_page_accessed(page);
125 			put_page(page);
126 			vaddr += PAGE_SIZE;
127 		}
128 		obj->mm.dirty = false;
129 	}
130 
131 	sg_free_table(pages);
132 	kfree(pages);
133 
134 	drm_pci_free(obj->base.dev, obj->phys_handle);
135 }
136 
137 static void phys_release(struct drm_i915_gem_object *obj)
138 {
139 	fput(obj->base.filp);
140 }
141 
142 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
143 	.get_pages = i915_gem_object_get_pages_phys,
144 	.put_pages = i915_gem_object_put_pages_phys,
145 
146 	.release = phys_release,
147 };
148 
149 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
150 {
151 	struct sg_table *pages;
152 	int err;
153 
154 	if (align > obj->base.size)
155 		return -EINVAL;
156 
157 	if (obj->ops == &i915_gem_phys_ops)
158 		return 0;
159 
160 	if (obj->ops != &i915_gem_shmem_ops)
161 		return -EINVAL;
162 
163 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
164 	if (err)
165 		return err;
166 
167 	mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
168 
169 	if (obj->mm.madv != I915_MADV_WILLNEED) {
170 		err = -EFAULT;
171 		goto err_unlock;
172 	}
173 
174 	if (obj->mm.quirked) {
175 		err = -EFAULT;
176 		goto err_unlock;
177 	}
178 
179 	if (obj->mm.mapping) {
180 		err = -EBUSY;
181 		goto err_unlock;
182 	}
183 
184 	pages = __i915_gem_object_unset_pages(obj);
185 
186 	obj->ops = &i915_gem_phys_ops;
187 
188 	err = ____i915_gem_object_get_pages(obj);
189 	if (err)
190 		goto err_xfer;
191 
192 	/* Perma-pin (until release) the physical set of pages */
193 	__i915_gem_object_pin_pages(obj);
194 
195 	if (!IS_ERR_OR_NULL(pages)) {
196 		i915_gem_shmem_ops.put_pages(obj, pages);
197 		i915_gem_object_release_memory_region(obj);
198 	}
199 	mutex_unlock(&obj->mm.lock);
200 	return 0;
201 
202 err_xfer:
203 	obj->ops = &i915_gem_shmem_ops;
204 	if (!IS_ERR_OR_NULL(pages)) {
205 		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
206 
207 		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
208 	}
209 err_unlock:
210 	mutex_unlock(&obj->mm.lock);
211 	return err;
212 }
213 
214 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
215 #include "selftests/i915_gem_phys.c"
216 #endif
217