xref: /openbmc/linux/tools/testing/nvdimm/test/iomap.c (revision cd238eff)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/memremap.h>
6 #include <linux/rculist.h>
7 #include <linux/export.h>
8 #include <linux/ioport.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/pfn_t.h>
12 #include <linux/acpi.h>
13 #include <linux/io.h>
14 #include <linux/mm.h>
15 #include "nfit_test.h"
16 
17 static LIST_HEAD(iomap_head);
18 
19 static struct iomap_ops {
20 	nfit_test_lookup_fn nfit_test_lookup;
21 	nfit_test_evaluate_dsm_fn evaluate_dsm;
22 	struct list_head list;
23 } iomap_ops = {
24 	.list = LIST_HEAD_INIT(iomap_ops.list),
25 };
26 
27 void nfit_test_setup(nfit_test_lookup_fn lookup,
28 		nfit_test_evaluate_dsm_fn evaluate)
29 {
30 	iomap_ops.nfit_test_lookup = lookup;
31 	iomap_ops.evaluate_dsm = evaluate;
32 	list_add_rcu(&iomap_ops.list, &iomap_head);
33 }
34 EXPORT_SYMBOL(nfit_test_setup);
35 
36 void nfit_test_teardown(void)
37 {
38 	list_del_rcu(&iomap_ops.list);
39 	synchronize_rcu();
40 }
41 EXPORT_SYMBOL(nfit_test_teardown);
42 
43 static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
44 {
45 	struct iomap_ops *ops;
46 
47 	ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
48 	if (ops)
49 		return ops->nfit_test_lookup(resource);
50 	return NULL;
51 }
52 
53 struct nfit_test_resource *get_nfit_res(resource_size_t resource)
54 {
55 	struct nfit_test_resource *res;
56 
57 	rcu_read_lock();
58 	res = __get_nfit_res(resource);
59 	rcu_read_unlock();
60 
61 	return res;
62 }
63 EXPORT_SYMBOL(get_nfit_res);
64 
65 void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
66 		void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
67 {
68 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
69 
70 	if (nfit_res)
71 		return (void __iomem *) nfit_res->buf + offset
72 			- nfit_res->res.start;
73 	return fallback_fn(offset, size);
74 }
75 
76 void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
77 		resource_size_t offset, unsigned long size)
78 {
79 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
80 
81 	if (nfit_res)
82 		return (void __iomem *) nfit_res->buf + offset
83 			- nfit_res->res.start;
84 	return devm_ioremap_nocache(dev, offset, size);
85 }
86 EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
87 
88 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
89 		size_t size, unsigned long flags)
90 {
91 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
92 
93 	if (nfit_res)
94 		return nfit_res->buf + offset - nfit_res->res.start;
95 	return devm_memremap(dev, offset, size, flags);
96 }
97 EXPORT_SYMBOL(__wrap_devm_memremap);
98 
99 static void nfit_test_kill(void *_pgmap)
100 {
101 	struct dev_pagemap *pgmap = _pgmap;
102 
103 	pgmap->kill(pgmap->ref);
104 }
105 
106 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
107 {
108 	resource_size_t offset = pgmap->res.start;
109 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
110 
111 	if (nfit_res) {
112 		int rc;
113 
114 		rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
115 		if (rc)
116 			return ERR_PTR(rc);
117 		return nfit_res->buf + offset - nfit_res->res.start;
118 	}
119 	return devm_memremap_pages(dev, pgmap);
120 }
121 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
122 
123 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
124 {
125 	struct nfit_test_resource *nfit_res = get_nfit_res(addr);
126 
127 	if (nfit_res)
128 		flags &= ~PFN_MAP;
129         return phys_to_pfn_t(addr, flags);
130 }
131 EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
132 
133 void *__wrap_memremap(resource_size_t offset, size_t size,
134 		unsigned long flags)
135 {
136 	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
137 
138 	if (nfit_res)
139 		return nfit_res->buf + offset - nfit_res->res.start;
140 	return memremap(offset, size, flags);
141 }
142 EXPORT_SYMBOL(__wrap_memremap);
143 
144 void __wrap_devm_memunmap(struct device *dev, void *addr)
145 {
146 	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
147 
148 	if (nfit_res)
149 		return;
150 	return devm_memunmap(dev, addr);
151 }
152 EXPORT_SYMBOL(__wrap_devm_memunmap);
153 
154 void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
155 {
156 	return __nfit_test_ioremap(offset, size, ioremap_nocache);
157 }
158 EXPORT_SYMBOL(__wrap_ioremap_nocache);
159 
160 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
161 {
162 	return __nfit_test_ioremap(offset, size, ioremap_wc);
163 }
164 EXPORT_SYMBOL(__wrap_ioremap_wc);
165 
166 void __wrap_iounmap(volatile void __iomem *addr)
167 {
168 	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
169 	if (nfit_res)
170 		return;
171 	return iounmap(addr);
172 }
173 EXPORT_SYMBOL(__wrap_iounmap);
174 
175 void __wrap_memunmap(void *addr)
176 {
177 	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
178 
179 	if (nfit_res)
180 		return;
181 	return memunmap(addr);
182 }
183 EXPORT_SYMBOL(__wrap_memunmap);
184 
185 static bool nfit_test_release_region(struct device *dev,
186 		struct resource *parent, resource_size_t start,
187 		resource_size_t n);
188 
189 static void nfit_devres_release(struct device *dev, void *data)
190 {
191 	struct resource *res = *((struct resource **) data);
192 
193 	WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
194 			resource_size(res)));
195 }
196 
197 static int match(struct device *dev, void *__res, void *match_data)
198 {
199 	struct resource *res = *((struct resource **) __res);
200 	resource_size_t start = *((resource_size_t *) match_data);
201 
202 	return res->start == start;
203 }
204 
205 static bool nfit_test_release_region(struct device *dev,
206 		struct resource *parent, resource_size_t start,
207 		resource_size_t n)
208 {
209 	if (parent == &iomem_resource) {
210 		struct nfit_test_resource *nfit_res = get_nfit_res(start);
211 
212 		if (nfit_res) {
213 			struct nfit_test_request *req;
214 			struct resource *res = NULL;
215 
216 			if (dev) {
217 				devres_release(dev, nfit_devres_release, match,
218 						&start);
219 				return true;
220 			}
221 
222 			spin_lock(&nfit_res->lock);
223 			list_for_each_entry(req, &nfit_res->requests, list)
224 				if (req->res.start == start) {
225 					res = &req->res;
226 					list_del(&req->list);
227 					break;
228 				}
229 			spin_unlock(&nfit_res->lock);
230 
231 			WARN(!res || resource_size(res) != n,
232 					"%s: start: %llx n: %llx mismatch: %pr\n",
233 						__func__, start, n, res);
234 			if (res)
235 				kfree(req);
236 			return true;
237 		}
238 	}
239 	return false;
240 }
241 
242 static struct resource *nfit_test_request_region(struct device *dev,
243 		struct resource *parent, resource_size_t start,
244 		resource_size_t n, const char *name, int flags)
245 {
246 	struct nfit_test_resource *nfit_res;
247 
248 	if (parent == &iomem_resource) {
249 		nfit_res = get_nfit_res(start);
250 		if (nfit_res) {
251 			struct nfit_test_request *req;
252 			struct resource *res = NULL;
253 
254 			if (start + n > nfit_res->res.start
255 					+ resource_size(&nfit_res->res)) {
256 				pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
257 						__func__, start, n,
258 						&nfit_res->res);
259 				return NULL;
260 			}
261 
262 			spin_lock(&nfit_res->lock);
263 			list_for_each_entry(req, &nfit_res->requests, list)
264 				if (start == req->res.start) {
265 					res = &req->res;
266 					break;
267 				}
268 			spin_unlock(&nfit_res->lock);
269 
270 			if (res) {
271 				WARN(1, "%pr already busy\n", res);
272 				return NULL;
273 			}
274 
275 			req = kzalloc(sizeof(*req), GFP_KERNEL);
276 			if (!req)
277 				return NULL;
278 			INIT_LIST_HEAD(&req->list);
279 			res = &req->res;
280 
281 			res->start = start;
282 			res->end = start + n - 1;
283 			res->name = name;
284 			res->flags = resource_type(parent);
285 			res->flags |= IORESOURCE_BUSY | flags;
286 			spin_lock(&nfit_res->lock);
287 			list_add(&req->list, &nfit_res->requests);
288 			spin_unlock(&nfit_res->lock);
289 
290 			if (dev) {
291 				struct resource **d;
292 
293 				d = devres_alloc(nfit_devres_release,
294 						sizeof(struct resource *),
295 						GFP_KERNEL);
296 				if (!d)
297 					return NULL;
298 				*d = res;
299 				devres_add(dev, d);
300 			}
301 
302 			pr_debug("%s: %pr\n", __func__, res);
303 			return res;
304 		}
305 	}
306 	if (dev)
307 		return __devm_request_region(dev, parent, start, n, name);
308 	return __request_region(parent, start, n, name, flags);
309 }
310 
311 struct resource *__wrap___request_region(struct resource *parent,
312 		resource_size_t start, resource_size_t n, const char *name,
313 		int flags)
314 {
315 	return nfit_test_request_region(NULL, parent, start, n, name, flags);
316 }
317 EXPORT_SYMBOL(__wrap___request_region);
318 
319 int __wrap_insert_resource(struct resource *parent, struct resource *res)
320 {
321 	if (get_nfit_res(res->start))
322 		return 0;
323 	return insert_resource(parent, res);
324 }
325 EXPORT_SYMBOL(__wrap_insert_resource);
326 
327 int __wrap_remove_resource(struct resource *res)
328 {
329 	if (get_nfit_res(res->start))
330 		return 0;
331 	return remove_resource(res);
332 }
333 EXPORT_SYMBOL(__wrap_remove_resource);
334 
335 struct resource *__wrap___devm_request_region(struct device *dev,
336 		struct resource *parent, resource_size_t start,
337 		resource_size_t n, const char *name)
338 {
339 	if (!dev)
340 		return NULL;
341 	return nfit_test_request_region(dev, parent, start, n, name, 0);
342 }
343 EXPORT_SYMBOL(__wrap___devm_request_region);
344 
345 void __wrap___release_region(struct resource *parent, resource_size_t start,
346 		resource_size_t n)
347 {
348 	if (!nfit_test_release_region(NULL, parent, start, n))
349 		__release_region(parent, start, n);
350 }
351 EXPORT_SYMBOL(__wrap___release_region);
352 
353 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
354 		resource_size_t start, resource_size_t n)
355 {
356 	if (!nfit_test_release_region(dev, parent, start, n))
357 		__devm_release_region(dev, parent, start, n);
358 }
359 EXPORT_SYMBOL(__wrap___devm_release_region);
360 
361 acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
362 		struct acpi_object_list *p, struct acpi_buffer *buf)
363 {
364 	struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
365 	union acpi_object **obj;
366 
367 	if (!nfit_res || strcmp(path, "_FIT") || !buf)
368 		return acpi_evaluate_object(handle, path, p, buf);
369 
370 	obj = nfit_res->buf;
371 	buf->length = sizeof(union acpi_object);
372 	buf->pointer = *obj;
373 	return AE_OK;
374 }
375 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
376 
377 union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
378 		u64 rev, u64 func, union acpi_object *argv4)
379 {
380 	union acpi_object *obj = ERR_PTR(-ENXIO);
381 	struct iomap_ops *ops;
382 
383 	rcu_read_lock();
384 	ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
385 	if (ops)
386 		obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
387 	rcu_read_unlock();
388 
389 	if (IS_ERR(obj))
390 		return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
391 	return obj;
392 }
393 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
394 
395 MODULE_LICENSE("GPL v2");
396