xref: /openbmc/linux/tools/testing/nvdimm/test/iomap.c (revision de2bdb3d)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/memremap.h>
14 #include <linux/rculist.h>
15 #include <linux/export.h>
16 #include <linux/ioport.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/pfn_t.h>
20 #include <linux/acpi.h>
21 #include <linux/io.h>
22 #include <linux/mm.h>
23 #include "nfit_test.h"
24 
25 static LIST_HEAD(iomap_head);
26 
27 static struct iomap_ops {
28 	nfit_test_lookup_fn nfit_test_lookup;
29 	struct list_head list;
30 } iomap_ops = {
31 	.list = LIST_HEAD_INIT(iomap_ops.list),
32 };
33 
34 void nfit_test_setup(nfit_test_lookup_fn lookup)
35 {
36 	iomap_ops.nfit_test_lookup = lookup;
37 	list_add_rcu(&iomap_ops.list, &iomap_head);
38 }
39 EXPORT_SYMBOL(nfit_test_setup);
40 
41 void nfit_test_teardown(void)
42 {
43 	list_del_rcu(&iomap_ops.list);
44 	synchronize_rcu();
45 }
46 EXPORT_SYMBOL(nfit_test_teardown);
47 
48 static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
49 {
50 	struct iomap_ops *ops;
51 
52 	ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
53 	if (ops)
54 		return ops->nfit_test_lookup(resource);
55 	return NULL;
56 }
57 
58 struct nfit_test_resource *get_nfit_res(resource_size_t resource)
59 {
60 	struct nfit_test_resource *res;
61 
62 	rcu_read_lock();
63 	res = __get_nfit_res(resource);
64 	rcu_read_unlock();
65 
66 	return res;
67 }
68 EXPORT_SYMBOL(get_nfit_res);
69 
70 void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
71 		void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
72 {
73 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
74 
75 	if (nfit_res)
76 		return (void __iomem *) nfit_res->buf + offset
77 			- nfit_res->res.start;
78 	return fallback_fn(offset, size);
79 }
80 
81 void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
82 		resource_size_t offset, unsigned long size)
83 {
84 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
85 
86 	if (nfit_res)
87 		return (void __iomem *) nfit_res->buf + offset
88 			- nfit_res->res.start;
89 	return devm_ioremap_nocache(dev, offset, size);
90 }
91 EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
92 
93 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
94 		size_t size, unsigned long flags)
95 {
96 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
97 
98 	if (nfit_res)
99 		return nfit_res->buf + offset - nfit_res->res.start;
100 	return devm_memremap(dev, offset, size, flags);
101 }
102 EXPORT_SYMBOL(__wrap_devm_memremap);
103 
104 void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res,
105 		struct percpu_ref *ref, struct vmem_altmap *altmap)
106 {
107 	resource_size_t offset = res->start;
108 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
109 
110 	if (nfit_res)
111 		return nfit_res->buf + offset - nfit_res->res.start;
112 	return devm_memremap_pages(dev, res, ref, altmap);
113 }
114 EXPORT_SYMBOL(__wrap_devm_memremap_pages);
115 
116 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
117 {
118 	struct nfit_test_resource *nfit_res = get_nfit_res(addr);
119 
120 	if (nfit_res)
121 		flags &= ~PFN_MAP;
122         return phys_to_pfn_t(addr, flags);
123 }
124 EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
125 
126 void *__wrap_memremap(resource_size_t offset, size_t size,
127 		unsigned long flags)
128 {
129 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
130 
131 	if (nfit_res)
132 		return nfit_res->buf + offset - nfit_res->res.start;
133 	return memremap(offset, size, flags);
134 }
135 EXPORT_SYMBOL(__wrap_memremap);
136 
137 void __wrap_devm_memunmap(struct device *dev, void *addr)
138 {
139 	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
140 
141 	if (nfit_res)
142 		return;
143 	return devm_memunmap(dev, addr);
144 }
145 EXPORT_SYMBOL(__wrap_devm_memunmap);
146 
147 void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
148 {
149 	return __nfit_test_ioremap(offset, size, ioremap_nocache);
150 }
151 EXPORT_SYMBOL(__wrap_ioremap_nocache);
152 
153 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
154 {
155 	return __nfit_test_ioremap(offset, size, ioremap_wc);
156 }
157 EXPORT_SYMBOL(__wrap_ioremap_wc);
158 
159 void __wrap_iounmap(volatile void __iomem *addr)
160 {
161 	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
162 	if (nfit_res)
163 		return;
164 	return iounmap(addr);
165 }
166 EXPORT_SYMBOL(__wrap_iounmap);
167 
168 void __wrap_memunmap(void *addr)
169 {
170 	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
171 
172 	if (nfit_res)
173 		return;
174 	return memunmap(addr);
175 }
176 EXPORT_SYMBOL(__wrap_memunmap);
177 
178 static bool nfit_test_release_region(struct device *dev,
179 		struct resource *parent, resource_size_t start,
180 		resource_size_t n);
181 
182 static void nfit_devres_release(struct device *dev, void *data)
183 {
184 	struct resource *res = *((struct resource **) data);
185 
186 	WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
187 			resource_size(res)));
188 }
189 
190 static int match(struct device *dev, void *__res, void *match_data)
191 {
192 	struct resource *res = *((struct resource **) __res);
193 	resource_size_t start = *((resource_size_t *) match_data);
194 
195 	return res->start == start;
196 }
197 
198 static bool nfit_test_release_region(struct device *dev,
199 		struct resource *parent, resource_size_t start,
200 		resource_size_t n)
201 {
202 	if (parent == &iomem_resource) {
203 		struct nfit_test_resource *nfit_res = get_nfit_res(start);
204 
205 		if (nfit_res) {
206 			struct nfit_test_request *req;
207 			struct resource *res = NULL;
208 
209 			if (dev) {
210 				devres_release(dev, nfit_devres_release, match,
211 						&start);
212 				return true;
213 			}
214 
215 			spin_lock(&nfit_res->lock);
216 			list_for_each_entry(req, &nfit_res->requests, list)
217 				if (req->res.start == start) {
218 					res = &req->res;
219 					list_del(&req->list);
220 					break;
221 				}
222 			spin_unlock(&nfit_res->lock);
223 
224 			WARN(!res || resource_size(res) != n,
225 					"%s: start: %llx n: %llx mismatch: %pr\n",
226 						__func__, start, n, res);
227 			if (res)
228 				kfree(req);
229 			return true;
230 		}
231 	}
232 	return false;
233 }
234 
235 static struct resource *nfit_test_request_region(struct device *dev,
236 		struct resource *parent, resource_size_t start,
237 		resource_size_t n, const char *name, int flags)
238 {
239 	struct nfit_test_resource *nfit_res;
240 
241 	if (parent == &iomem_resource) {
242 		nfit_res = get_nfit_res(start);
243 		if (nfit_res) {
244 			struct nfit_test_request *req;
245 			struct resource *res = NULL;
246 
247 			if (start + n > nfit_res->res.start
248 					+ resource_size(&nfit_res->res)) {
249 				pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
250 						__func__, start, n,
251 						&nfit_res->res);
252 				return NULL;
253 			}
254 
255 			spin_lock(&nfit_res->lock);
256 			list_for_each_entry(req, &nfit_res->requests, list)
257 				if (start == req->res.start) {
258 					res = &req->res;
259 					break;
260 				}
261 			spin_unlock(&nfit_res->lock);
262 
263 			if (res) {
264 				WARN(1, "%pr already busy\n", res);
265 				return NULL;
266 			}
267 
268 			req = kzalloc(sizeof(*req), GFP_KERNEL);
269 			if (!req)
270 				return NULL;
271 			INIT_LIST_HEAD(&req->list);
272 			res = &req->res;
273 
274 			res->start = start;
275 			res->end = start + n - 1;
276 			res->name = name;
277 			res->flags = resource_type(parent);
278 			res->flags |= IORESOURCE_BUSY | flags;
279 			spin_lock(&nfit_res->lock);
280 			list_add(&req->list, &nfit_res->requests);
281 			spin_unlock(&nfit_res->lock);
282 
283 			if (dev) {
284 				struct resource **d;
285 
286 				d = devres_alloc(nfit_devres_release,
287 						sizeof(struct resource *),
288 						GFP_KERNEL);
289 				if (!d)
290 					return NULL;
291 				*d = res;
292 				devres_add(dev, d);
293 			}
294 
295 			pr_debug("%s: %pr\n", __func__, res);
296 			return res;
297 		}
298 	}
299 	if (dev)
300 		return __devm_request_region(dev, parent, start, n, name);
301 	return __request_region(parent, start, n, name, flags);
302 }
303 
304 struct resource *__wrap___request_region(struct resource *parent,
305 		resource_size_t start, resource_size_t n, const char *name,
306 		int flags)
307 {
308 	return nfit_test_request_region(NULL, parent, start, n, name, flags);
309 }
310 EXPORT_SYMBOL(__wrap___request_region);
311 
312 int __wrap_insert_resource(struct resource *parent, struct resource *res)
313 {
314 	if (get_nfit_res(res->start))
315 		return 0;
316 	return insert_resource(parent, res);
317 }
318 EXPORT_SYMBOL(__wrap_insert_resource);
319 
320 int __wrap_remove_resource(struct resource *res)
321 {
322 	if (get_nfit_res(res->start))
323 		return 0;
324 	return remove_resource(res);
325 }
326 EXPORT_SYMBOL(__wrap_remove_resource);
327 
328 struct resource *__wrap___devm_request_region(struct device *dev,
329 		struct resource *parent, resource_size_t start,
330 		resource_size_t n, const char *name)
331 {
332 	if (!dev)
333 		return NULL;
334 	return nfit_test_request_region(dev, parent, start, n, name, 0);
335 }
336 EXPORT_SYMBOL(__wrap___devm_request_region);
337 
338 void __wrap___release_region(struct resource *parent, resource_size_t start,
339 		resource_size_t n)
340 {
341 	if (!nfit_test_release_region(NULL, parent, start, n))
342 		__release_region(parent, start, n);
343 }
344 EXPORT_SYMBOL(__wrap___release_region);
345 
346 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
347 		resource_size_t start, resource_size_t n)
348 {
349 	if (!nfit_test_release_region(dev, parent, start, n))
350 		__devm_release_region(dev, parent, start, n);
351 }
352 EXPORT_SYMBOL(__wrap___devm_release_region);
353 
354 acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
355 		struct acpi_object_list *p, struct acpi_buffer *buf)
356 {
357 	struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
358 	union acpi_object **obj;
359 
360 	if (!nfit_res || strcmp(path, "_FIT") || !buf)
361 		return acpi_evaluate_object(handle, path, p, buf);
362 
363 	obj = nfit_res->buf;
364 	buf->length = sizeof(union acpi_object);
365 	buf->pointer = *obj;
366 	return AE_OK;
367 }
368 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
369 
370 MODULE_LICENSE("GPL v2");
371