1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10 
11 #include <drm/drm.h> /* for drm_legacy.h! */
12 #include <drm/drm_cache.h>
13 #include <drm/drm_legacy.h> /* for drm_pci.h! */
14 #include <drm/drm_pci.h>
15 
16 #include "i915_drv.h"
17 #include "i915_gem_object.h"
18 #include "i915_scatterlist.h"
19 
20 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
21 {
22 	struct address_space *mapping = obj->base.filp->f_mapping;
23 	struct drm_dma_handle *phys;
24 	struct sg_table *st;
25 	struct scatterlist *sg;
26 	char *vaddr;
27 	int i;
28 	int err;
29 
30 	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
31 		return -EINVAL;
32 
33 	/* Always aligning to the object size, allows a single allocation
34 	 * to handle all possible callers, and given typical object sizes,
35 	 * the alignment of the buddy allocation will naturally match.
36 	 */
37 	phys = drm_pci_alloc(obj->base.dev,
38 			     roundup_pow_of_two(obj->base.size),
39 			     roundup_pow_of_two(obj->base.size));
40 	if (!phys)
41 		return -ENOMEM;
42 
43 	vaddr = phys->vaddr;
44 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
45 		struct page *page;
46 		char *src;
47 
48 		page = shmem_read_mapping_page(mapping, i);
49 		if (IS_ERR(page)) {
50 			err = PTR_ERR(page);
51 			goto err_phys;
52 		}
53 
54 		src = kmap_atomic(page);
55 		memcpy(vaddr, src, PAGE_SIZE);
56 		drm_clflush_virt_range(vaddr, PAGE_SIZE);
57 		kunmap_atomic(src);
58 
59 		put_page(page);
60 		vaddr += PAGE_SIZE;
61 	}
62 
63 	i915_gem_chipset_flush(to_i915(obj->base.dev));
64 
65 	st = kmalloc(sizeof(*st), GFP_KERNEL);
66 	if (!st) {
67 		err = -ENOMEM;
68 		goto err_phys;
69 	}
70 
71 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
72 		kfree(st);
73 		err = -ENOMEM;
74 		goto err_phys;
75 	}
76 
77 	sg = st->sgl;
78 	sg->offset = 0;
79 	sg->length = obj->base.size;
80 
81 	sg_dma_address(sg) = phys->busaddr;
82 	sg_dma_len(sg) = obj->base.size;
83 
84 	obj->phys_handle = phys;
85 
86 	__i915_gem_object_set_pages(obj, st, sg->length);
87 
88 	return 0;
89 
90 err_phys:
91 	drm_pci_free(obj->base.dev, phys);
92 
93 	return err;
94 }
95 
96 static void
97 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
98 			       struct sg_table *pages)
99 {
100 	__i915_gem_object_release_shmem(obj, pages, false);
101 
102 	if (obj->mm.dirty) {
103 		struct address_space *mapping = obj->base.filp->f_mapping;
104 		char *vaddr = obj->phys_handle->vaddr;
105 		int i;
106 
107 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
108 			struct page *page;
109 			char *dst;
110 
111 			page = shmem_read_mapping_page(mapping, i);
112 			if (IS_ERR(page))
113 				continue;
114 
115 			dst = kmap_atomic(page);
116 			drm_clflush_virt_range(vaddr, PAGE_SIZE);
117 			memcpy(dst, vaddr, PAGE_SIZE);
118 			kunmap_atomic(dst);
119 
120 			set_page_dirty(page);
121 			if (obj->mm.madv == I915_MADV_WILLNEED)
122 				mark_page_accessed(page);
123 			put_page(page);
124 			vaddr += PAGE_SIZE;
125 		}
126 		obj->mm.dirty = false;
127 	}
128 
129 	sg_free_table(pages);
130 	kfree(pages);
131 
132 	drm_pci_free(obj->base.dev, obj->phys_handle);
133 }
134 
135 static void
136 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
137 {
138 	i915_gem_object_unpin_pages(obj);
139 }
140 
141 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
142 	.get_pages = i915_gem_object_get_pages_phys,
143 	.put_pages = i915_gem_object_put_pages_phys,
144 	.release = i915_gem_object_release_phys,
145 };
146 
147 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
148 {
149 	struct sg_table *pages;
150 	int err;
151 
152 	if (align > obj->base.size)
153 		return -EINVAL;
154 
155 	if (obj->ops == &i915_gem_phys_ops)
156 		return 0;
157 
158 	if (obj->ops != &i915_gem_shmem_ops)
159 		return -EINVAL;
160 
161 	err = i915_gem_object_unbind(obj);
162 	if (err)
163 		return err;
164 
165 	mutex_lock(&obj->mm.lock);
166 
167 	if (obj->mm.madv != I915_MADV_WILLNEED) {
168 		err = -EFAULT;
169 		goto err_unlock;
170 	}
171 
172 	if (obj->mm.quirked) {
173 		err = -EFAULT;
174 		goto err_unlock;
175 	}
176 
177 	if (obj->mm.mapping) {
178 		err = -EBUSY;
179 		goto err_unlock;
180 	}
181 
182 	pages = __i915_gem_object_unset_pages(obj);
183 
184 	obj->ops = &i915_gem_phys_ops;
185 
186 	err = ____i915_gem_object_get_pages(obj);
187 	if (err)
188 		goto err_xfer;
189 
190 	/* Perma-pin (until release) the physical set of pages */
191 	__i915_gem_object_pin_pages(obj);
192 
193 	if (!IS_ERR_OR_NULL(pages))
194 		i915_gem_shmem_ops.put_pages(obj, pages);
195 	mutex_unlock(&obj->mm.lock);
196 	return 0;
197 
198 err_xfer:
199 	obj->ops = &i915_gem_shmem_ops;
200 	if (!IS_ERR_OR_NULL(pages)) {
201 		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
202 
203 		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
204 	}
205 err_unlock:
206 	mutex_unlock(&obj->mm.lock);
207 	return err;
208 }
209 
210 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
211 #include "selftests/i915_gem_phys.c"
212 #endif
213