xref: /openbmc/linux/tools/testing/nvdimm/test/iomap.c (revision c8f14e2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/memremap.h>
6 #include <linux/rculist.h>
7 #include <linux/export.h>
8 #include <linux/ioport.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/pfn_t.h>
12 #include <linux/acpi.h>
13 #include <linux/io.h>
14 #include <linux/mm.h>
15 #include "nfit_test.h"
16 
17 static LIST_HEAD(iomap_head);
18 
19 static struct iomap_ops {
20 	nfit_test_lookup_fn nfit_test_lookup;
21 	nfit_test_evaluate_dsm_fn evaluate_dsm;
22 	struct list_head list;
23 } iomap_ops = {
24 	.list = LIST_HEAD_INIT(iomap_ops.list),
25 };
26 
27 void nfit_test_setup(nfit_test_lookup_fn lookup,
28 		nfit_test_evaluate_dsm_fn evaluate)
29 {
30 	iomap_ops.nfit_test_lookup = lookup;
31 	iomap_ops.evaluate_dsm = evaluate;
32 	list_add_rcu(&iomap_ops.list, &iomap_head);
33 }
34 EXPORT_SYMBOL(nfit_test_setup);
35 
36 void nfit_test_teardown(void)
37 {
38 	list_del_rcu(&iomap_ops.list);
39 	synchronize_rcu();
40 }
41 EXPORT_SYMBOL(nfit_test_teardown);
42 
43 static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
44 {
45 	struct iomap_ops *ops;
46 
47 	ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
48 	if (ops)
49 		return ops->nfit_test_lookup(resource);
50 	return NULL;
51 }
52 
53 struct nfit_test_resource *get_nfit_res(resource_size_t resource)
54 {
55 	struct nfit_test_resource *res;
56 
57 	rcu_read_lock();
58 	res = __get_nfit_res(resource);
59 	rcu_read_unlock();
60 
61 	return res;
62 }
63 EXPORT_SYMBOL(get_nfit_res);
64 
65 static void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
66 		void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
67 {
68 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
69 
70 	if (nfit_res)
71 		return (void __iomem *) nfit_res->buf + offset
72 			- nfit_res->res.start;
73 	return fallback_fn(offset, size);
74 }
75 
76 void __iomem *__wrap_devm_ioremap(struct device *dev,
77 		resource_size_t offset, unsigned long size)
78 {
79 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
80 
81 	if (nfit_res)
82 		return (void __iomem *) nfit_res->buf + offset
83 			- nfit_res->res.start;
84 	return devm_ioremap(dev, offset, size);
85 }
86 EXPORT_SYMBOL(__wrap_devm_ioremap);
87 
88 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
89 		size_t size, unsigned long flags)
90 {
91 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
92 
93 	if (nfit_res)
94 		return nfit_res->buf + offset - nfit_res->res.start;
95 	return devm_memremap(dev, offset, size, flags);
96 }
97 EXPORT_SYMBOL(__wrap_devm_memremap);
98 
99 static void nfit_test_kill(void *_pgmap)
100 {
101 	struct dev_pagemap *pgmap = _pgmap;
102 
103 	WARN_ON(!pgmap);
104 
105 	percpu_ref_kill(&pgmap->ref);
106 
107 	wait_for_completion(&pgmap->done);
108 	percpu_ref_exit(&pgmap->ref);
109 }
110 
111 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
112 {
113 	struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
114 
115 	complete(&pgmap->done);
116 }
117 
118 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
119 {
120 	int error;
121 	resource_size_t offset = pgmap->range.start;
122 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
123 
124 	if (!nfit_res)
125 		return devm_memremap_pages(dev, pgmap);
126 
127 	init_completion(&pgmap->done);
128 	error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
129 				GFP_KERNEL);
130 	if (error)
131 		return ERR_PTR(error);
132 
133 	error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
134 	if (error)
135 		return ERR_PTR(error);
136 	return nfit_res->buf + offset - nfit_res->res.start;
137 }
138 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
139 
140 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
141 {
142 	struct nfit_test_resource *nfit_res = get_nfit_res(addr);
143 
144 	if (nfit_res)
145 		flags &= ~PFN_MAP;
146         return phys_to_pfn_t(addr, flags);
147 }
148 EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
149 
150 void *__wrap_memremap(resource_size_t offset, size_t size,
151 		unsigned long flags)
152 {
153 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
154 
155 	if (nfit_res)
156 		return nfit_res->buf + offset - nfit_res->res.start;
157 	return memremap(offset, size, flags);
158 }
159 EXPORT_SYMBOL(__wrap_memremap);
160 
161 void __wrap_devm_memunmap(struct device *dev, void *addr)
162 {
163 	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
164 
165 	if (nfit_res)
166 		return;
167 	return devm_memunmap(dev, addr);
168 }
169 EXPORT_SYMBOL(__wrap_devm_memunmap);
170 
171 void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size)
172 {
173 	return __nfit_test_ioremap(offset, size, ioremap);
174 }
175 EXPORT_SYMBOL(__wrap_ioremap);
176 
177 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
178 {
179 	return __nfit_test_ioremap(offset, size, ioremap_wc);
180 }
181 EXPORT_SYMBOL(__wrap_ioremap_wc);
182 
183 void __wrap_iounmap(volatile void __iomem *addr)
184 {
185 	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
186 	if (nfit_res)
187 		return;
188 	return iounmap(addr);
189 }
190 EXPORT_SYMBOL(__wrap_iounmap);
191 
192 void __wrap_memunmap(void *addr)
193 {
194 	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
195 
196 	if (nfit_res)
197 		return;
198 	return memunmap(addr);
199 }
200 EXPORT_SYMBOL(__wrap_memunmap);
201 
202 static bool nfit_test_release_region(struct device *dev,
203 		struct resource *parent, resource_size_t start,
204 		resource_size_t n);
205 
206 static void nfit_devres_release(struct device *dev, void *data)
207 {
208 	struct resource *res = *((struct resource **) data);
209 
210 	WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
211 			resource_size(res)));
212 }
213 
214 static int match(struct device *dev, void *__res, void *match_data)
215 {
216 	struct resource *res = *((struct resource **) __res);
217 	resource_size_t start = *((resource_size_t *) match_data);
218 
219 	return res->start == start;
220 }
221 
222 static bool nfit_test_release_region(struct device *dev,
223 		struct resource *parent, resource_size_t start,
224 		resource_size_t n)
225 {
226 	if (parent == &iomem_resource) {
227 		struct nfit_test_resource *nfit_res = get_nfit_res(start);
228 
229 		if (nfit_res) {
230 			struct nfit_test_request *req;
231 			struct resource *res = NULL;
232 
233 			if (dev) {
234 				devres_release(dev, nfit_devres_release, match,
235 						&start);
236 				return true;
237 			}
238 
239 			spin_lock(&nfit_res->lock);
240 			list_for_each_entry(req, &nfit_res->requests, list)
241 				if (req->res.start == start) {
242 					res = &req->res;
243 					list_del(&req->list);
244 					break;
245 				}
246 			spin_unlock(&nfit_res->lock);
247 
248 			WARN(!res || resource_size(res) != n,
249 					"%s: start: %llx n: %llx mismatch: %pr\n",
250 						__func__, start, n, res);
251 			if (res)
252 				kfree(req);
253 			return true;
254 		}
255 	}
256 	return false;
257 }
258 
259 static struct resource *nfit_test_request_region(struct device *dev,
260 		struct resource *parent, resource_size_t start,
261 		resource_size_t n, const char *name, int flags)
262 {
263 	struct nfit_test_resource *nfit_res;
264 
265 	if (parent == &iomem_resource) {
266 		nfit_res = get_nfit_res(start);
267 		if (nfit_res) {
268 			struct nfit_test_request *req;
269 			struct resource *res = NULL;
270 
271 			if (start + n > nfit_res->res.start
272 					+ resource_size(&nfit_res->res)) {
273 				pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
274 						__func__, start, n,
275 						&nfit_res->res);
276 				return NULL;
277 			}
278 
279 			spin_lock(&nfit_res->lock);
280 			list_for_each_entry(req, &nfit_res->requests, list)
281 				if (start == req->res.start) {
282 					res = &req->res;
283 					break;
284 				}
285 			spin_unlock(&nfit_res->lock);
286 
287 			if (res) {
288 				WARN(1, "%pr already busy\n", res);
289 				return NULL;
290 			}
291 
292 			req = kzalloc(sizeof(*req), GFP_KERNEL);
293 			if (!req)
294 				return NULL;
295 			INIT_LIST_HEAD(&req->list);
296 			res = &req->res;
297 
298 			res->start = start;
299 			res->end = start + n - 1;
300 			res->name = name;
301 			res->flags = resource_type(parent);
302 			res->flags |= IORESOURCE_BUSY | flags;
303 			spin_lock(&nfit_res->lock);
304 			list_add(&req->list, &nfit_res->requests);
305 			spin_unlock(&nfit_res->lock);
306 
307 			if (dev) {
308 				struct resource **d;
309 
310 				d = devres_alloc(nfit_devres_release,
311 						sizeof(struct resource *),
312 						GFP_KERNEL);
313 				if (!d)
314 					return NULL;
315 				*d = res;
316 				devres_add(dev, d);
317 			}
318 
319 			pr_debug("%s: %pr\n", __func__, res);
320 			return res;
321 		}
322 	}
323 	if (dev)
324 		return __devm_request_region(dev, parent, start, n, name);
325 	return __request_region(parent, start, n, name, flags);
326 }
327 
328 struct resource *__wrap___request_region(struct resource *parent,
329 		resource_size_t start, resource_size_t n, const char *name,
330 		int flags)
331 {
332 	return nfit_test_request_region(NULL, parent, start, n, name, flags);
333 }
334 EXPORT_SYMBOL(__wrap___request_region);
335 
336 int __wrap_insert_resource(struct resource *parent, struct resource *res)
337 {
338 	if (get_nfit_res(res->start))
339 		return 0;
340 	return insert_resource(parent, res);
341 }
342 EXPORT_SYMBOL(__wrap_insert_resource);
343 
344 int __wrap_remove_resource(struct resource *res)
345 {
346 	if (get_nfit_res(res->start))
347 		return 0;
348 	return remove_resource(res);
349 }
350 EXPORT_SYMBOL(__wrap_remove_resource);
351 
352 struct resource *__wrap___devm_request_region(struct device *dev,
353 		struct resource *parent, resource_size_t start,
354 		resource_size_t n, const char *name)
355 {
356 	if (!dev)
357 		return NULL;
358 	return nfit_test_request_region(dev, parent, start, n, name, 0);
359 }
360 EXPORT_SYMBOL(__wrap___devm_request_region);
361 
362 void __wrap___release_region(struct resource *parent, resource_size_t start,
363 		resource_size_t n)
364 {
365 	if (!nfit_test_release_region(NULL, parent, start, n))
366 		__release_region(parent, start, n);
367 }
368 EXPORT_SYMBOL(__wrap___release_region);
369 
370 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
371 		resource_size_t start, resource_size_t n)
372 {
373 	if (!nfit_test_release_region(dev, parent, start, n))
374 		__devm_release_region(dev, parent, start, n);
375 }
376 EXPORT_SYMBOL(__wrap___devm_release_region);
377 
378 acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
379 		struct acpi_object_list *p, struct acpi_buffer *buf)
380 {
381 	struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
382 	union acpi_object **obj;
383 
384 	if (!nfit_res || strcmp(path, "_FIT") || !buf)
385 		return acpi_evaluate_object(handle, path, p, buf);
386 
387 	obj = nfit_res->buf;
388 	buf->length = sizeof(union acpi_object);
389 	buf->pointer = *obj;
390 	return AE_OK;
391 }
392 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
393 
394 union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
395 		u64 rev, u64 func, union acpi_object *argv4)
396 {
397 	union acpi_object *obj = ERR_PTR(-ENXIO);
398 	struct iomap_ops *ops;
399 
400 	rcu_read_lock();
401 	ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
402 	if (ops)
403 		obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
404 	rcu_read_unlock();
405 
406 	if (IS_ERR(obj))
407 		return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
408 	return obj;
409 }
410 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
411 
412 MODULE_LICENSE("GPL v2");
413