xref: /openbmc/linux/drivers/gpu/drm/i915/gt/shmem_utils.c (revision 82df5b73)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/shmem_fs.h>
9 
10 #include "gem/i915_gem_object.h"
11 #include "shmem_utils.h"
12 
13 struct file *shmem_create_from_data(const char *name, void *data, size_t len)
14 {
15 	struct file *file;
16 	int err;
17 
18 	file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
19 	if (IS_ERR(file))
20 		return file;
21 
22 	err = shmem_write(file, 0, data, len);
23 	if (err) {
24 		fput(file);
25 		return ERR_PTR(err);
26 	}
27 
28 	return file;
29 }
30 
31 struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
32 {
33 	struct file *file;
34 	void *ptr;
35 
36 	if (obj->ops == &i915_gem_shmem_ops) {
37 		file = obj->base.filp;
38 		atomic_long_inc(&file->f_count);
39 		return file;
40 	}
41 
42 	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
43 	if (IS_ERR(ptr))
44 		return ERR_CAST(ptr);
45 
46 	file = shmem_create_from_data("", ptr, obj->base.size);
47 	i915_gem_object_unpin_map(obj);
48 
49 	return file;
50 }
51 
52 static size_t shmem_npte(struct file *file)
53 {
54 	return file->f_mapping->host->i_size >> PAGE_SHIFT;
55 }
56 
57 static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
58 {
59 	unsigned long pfn;
60 
61 	vunmap(ptr);
62 
63 	for (pfn = 0; pfn < n_pte; pfn++) {
64 		struct page *page;
65 
66 		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
67 						   GFP_KERNEL);
68 		if (!WARN_ON(IS_ERR(page))) {
69 			put_page(page);
70 			put_page(page);
71 		}
72 	}
73 }
74 
75 void *shmem_pin_map(struct file *file)
76 {
77 	const size_t n_pte = shmem_npte(file);
78 	pte_t *stack[32], **ptes, **mem;
79 	struct vm_struct *area;
80 	unsigned long pfn;
81 
82 	mem = stack;
83 	if (n_pte > ARRAY_SIZE(stack)) {
84 		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
85 		if (!mem)
86 			return NULL;
87 	}
88 
89 	area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
90 	if (!area) {
91 		if (mem != stack)
92 			kvfree(mem);
93 		return NULL;
94 	}
95 
96 	ptes = mem;
97 	for (pfn = 0; pfn < n_pte; pfn++) {
98 		struct page *page;
99 
100 		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
101 						   GFP_KERNEL);
102 		if (IS_ERR(page))
103 			goto err_page;
104 
105 		**ptes++ = mk_pte(page,  PAGE_KERNEL);
106 	}
107 
108 	if (mem != stack)
109 		kvfree(mem);
110 
111 	mapping_set_unevictable(file->f_mapping);
112 	return area->addr;
113 
114 err_page:
115 	if (mem != stack)
116 		kvfree(mem);
117 
118 	__shmem_unpin_map(file, area->addr, pfn);
119 	return NULL;
120 }
121 
122 void shmem_unpin_map(struct file *file, void *ptr)
123 {
124 	mapping_clear_unevictable(file->f_mapping);
125 	__shmem_unpin_map(file, ptr, shmem_npte(file));
126 }
127 
128 static int __shmem_rw(struct file *file, loff_t off,
129 		      void *ptr, size_t len,
130 		      bool write)
131 {
132 	unsigned long pfn;
133 
134 	for (pfn = off >> PAGE_SHIFT; len; pfn++) {
135 		unsigned int this =
136 			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
137 		struct page *page;
138 		void *vaddr;
139 
140 		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
141 						   GFP_KERNEL);
142 		if (IS_ERR(page))
143 			return PTR_ERR(page);
144 
145 		vaddr = kmap(page);
146 		if (write)
147 			memcpy(vaddr + offset_in_page(off), ptr, this);
148 		else
149 			memcpy(ptr, vaddr + offset_in_page(off), this);
150 		kunmap(page);
151 		put_page(page);
152 
153 		len -= this;
154 		ptr += this;
155 		off = 0;
156 	}
157 
158 	return 0;
159 }
160 
161 int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
162 {
163 	return __shmem_rw(file, off, dst, len, false);
164 }
165 
166 int shmem_write(struct file *file, loff_t off, void *src, size_t len)
167 {
168 	return __shmem_rw(file, off, src, len, true);
169 }
170 
171 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
172 #include "st_shmem_utils.c"
173 #endif
174