1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * imr.c -- Intel Isolated Memory Region driver
4  *
5  * Copyright(c) 2013 Intel Corporation.
6  * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
7  *
8  * IMR registers define an isolated region of memory that can
9  * be masked to prohibit certain system agents from accessing memory.
10  * When a device behind a masked port performs an access - snooped or
11  * not, an IMR may optionally prevent that transaction from changing
12  * the state of memory or from getting correct data in response to the
13  * operation.
14  *
15  * Write data will be dropped and reads will return 0xFFFFFFFF, the
16  * system will reset and system BIOS will print out an error message to
17  * inform the user that an IMR has been violated.
18  *
19  * This code is based on the Linux MTRR code and reference code from
20  * Intel's Quark BSP EFI, Linux and grub code.
21  *
22  * See quark-x1000-datasheet.pdf for register definitions.
23  * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
24  */
25 
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 
28 #include <asm-generic/sections.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/imr.h>
31 #include <asm/iosf_mbi.h>
32 #include <linux/debugfs.h>
33 #include <linux/init.h>
34 #include <linux/mm.h>
35 #include <linux/types.h>
36 
37 struct imr_device {
38 	struct dentry	*file;
39 	bool		init;
40 	struct mutex	lock;
41 	int		max_imr;
42 	int		reg_base;
43 };
44 
45 static struct imr_device imr_dev;
46 
47 /*
48  * IMR read/write mask control registers.
49  * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
50  * bit definitions.
51  *
52  * addr_hi
53  * 31		Lock bit
54  * 30:24	Reserved
55  * 23:2		1 KiB aligned lo address
56  * 1:0		Reserved
57  *
58  * addr_hi
59  * 31:24	Reserved
60  * 23:2		1 KiB aligned hi address
61  * 1:0		Reserved
62  */
63 #define IMR_LOCK	BIT(31)
64 
65 struct imr_regs {
66 	u32 addr_lo;
67 	u32 addr_hi;
68 	u32 rmask;
69 	u32 wmask;
70 };
71 
72 #define IMR_NUM_REGS	(sizeof(struct imr_regs)/sizeof(u32))
73 #define IMR_SHIFT	8
74 #define imr_to_phys(x)	((x) << IMR_SHIFT)
75 #define phys_to_imr(x)	((x) >> IMR_SHIFT)
76 
77 /**
78  * imr_is_enabled - true if an IMR is enabled false otherwise.
79  *
80  * Determines if an IMR is enabled based on address range and read/write
81  * mask. An IMR set with an address range set to zero and a read/write
82  * access mask set to all is considered to be disabled. An IMR in any
83  * other state - for example set to zero but without read/write access
84  * all is considered to be enabled. This definition of disabled is how
85  * firmware switches off an IMR and is maintained in kernel for
86  * consistency.
87  *
88  * @imr:	pointer to IMR descriptor.
89  * @return:	true if IMR enabled false if disabled.
90  */
91 static inline int imr_is_enabled(struct imr_regs *imr)
92 {
93 	return !(imr->rmask == IMR_READ_ACCESS_ALL &&
94 		 imr->wmask == IMR_WRITE_ACCESS_ALL &&
95 		 imr_to_phys(imr->addr_lo) == 0 &&
96 		 imr_to_phys(imr->addr_hi) == 0);
97 }
98 
99 /**
100  * imr_read - read an IMR at a given index.
101  *
102  * Requires caller to hold imr mutex.
103  *
104  * @idev:	pointer to imr_device structure.
105  * @imr_id:	IMR entry to read.
106  * @imr:	IMR structure representing address and access masks.
107  * @return:	0 on success or error code passed from mbi_iosf on failure.
108  */
109 static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
110 {
111 	u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
112 	int ret;
113 
114 	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_lo);
115 	if (ret)
116 		return ret;
117 
118 	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_hi);
119 	if (ret)
120 		return ret;
121 
122 	ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->rmask);
123 	if (ret)
124 		return ret;
125 
126 	return iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->wmask);
127 }
128 
129 /**
130  * imr_write - write an IMR at a given index.
131  *
132  * Requires caller to hold imr mutex.
133  * Note lock bits need to be written independently of address bits.
134  *
135  * @idev:	pointer to imr_device structure.
136  * @imr_id:	IMR entry to write.
137  * @imr:	IMR structure representing address and access masks.
138  * @return:	0 on success or error code passed from mbi_iosf on failure.
139  */
140 static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
141 {
142 	unsigned long flags;
143 	u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
144 	int ret;
145 
146 	local_irq_save(flags);
147 
148 	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_lo);
149 	if (ret)
150 		goto failed;
151 
152 	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_hi);
153 	if (ret)
154 		goto failed;
155 
156 	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->rmask);
157 	if (ret)
158 		goto failed;
159 
160 	ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->wmask);
161 	if (ret)
162 		goto failed;
163 
164 	local_irq_restore(flags);
165 	return 0;
166 failed:
167 	/*
168 	 * If writing to the IOSF failed then we're in an unknown state,
169 	 * likely a very bad state. An IMR in an invalid state will almost
170 	 * certainly lead to a memory access violation.
171 	 */
172 	local_irq_restore(flags);
173 	WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
174 	     imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
175 
176 	return ret;
177 }
178 
179 /**
180  * imr_dbgfs_state_show - print state of IMR registers.
181  *
182  * @s:		pointer to seq_file for output.
183  * @unused:	unused parameter.
184  * @return:	0 on success or error code passed from mbi_iosf on failure.
185  */
186 static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
187 {
188 	phys_addr_t base;
189 	phys_addr_t end;
190 	int i;
191 	struct imr_device *idev = s->private;
192 	struct imr_regs imr;
193 	size_t size;
194 	int ret = -ENODEV;
195 
196 	mutex_lock(&idev->lock);
197 
198 	for (i = 0; i < idev->max_imr; i++) {
199 
200 		ret = imr_read(idev, i, &imr);
201 		if (ret)
202 			break;
203 
204 		/*
205 		 * Remember to add IMR_ALIGN bytes to size to indicate the
206 		 * inherent IMR_ALIGN size bytes contained in the masked away
207 		 * lower ten bits.
208 		 */
209 		if (imr_is_enabled(&imr)) {
210 			base = imr_to_phys(imr.addr_lo);
211 			end = imr_to_phys(imr.addr_hi) + IMR_MASK;
212 			size = end - base + 1;
213 		} else {
214 			base = 0;
215 			end = 0;
216 			size = 0;
217 		}
218 		seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
219 			   "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
220 			   &base, &end, size, imr.rmask, imr.wmask,
221 			   imr_is_enabled(&imr) ? "enabled " : "disabled",
222 			   imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
223 	}
224 
225 	mutex_unlock(&idev->lock);
226 	return ret;
227 }
228 DEFINE_SHOW_ATTRIBUTE(imr_dbgfs_state);
229 
230 /**
231  * imr_debugfs_register - register debugfs hooks.
232  *
233  * @idev:	pointer to imr_device structure.
234  * @return:	0 on success - errno on failure.
235  */
236 static int imr_debugfs_register(struct imr_device *idev)
237 {
238 	idev->file = debugfs_create_file("imr_state", 0444, NULL, idev,
239 					 &imr_dbgfs_state_fops);
240 	return PTR_ERR_OR_ZERO(idev->file);
241 }
242 
243 /**
244  * imr_check_params - check passed address range IMR alignment and non-zero size
245  *
246  * @base:	base address of intended IMR.
247  * @size:	size of intended IMR.
248  * @return:	zero on valid range -EINVAL on unaligned base/size.
249  */
250 static int imr_check_params(phys_addr_t base, size_t size)
251 {
252 	if ((base & IMR_MASK) || (size & IMR_MASK)) {
253 		pr_err("base %pa size 0x%08zx must align to 1KiB\n",
254 			&base, size);
255 		return -EINVAL;
256 	}
257 	if (size == 0)
258 		return -EINVAL;
259 
260 	return 0;
261 }
262 
263 /**
264  * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
265  *
266  * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
267  * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
268  * as a result.
269  *
270  * @size:	input size bytes.
271  * @return:	reduced size.
272  */
273 static inline size_t imr_raw_size(size_t size)
274 {
275 	return size - IMR_ALIGN;
276 }
277 
278 /**
279  * imr_address_overlap - detects an address overlap.
280  *
281  * @addr:	address to check against an existing IMR.
282  * @imr:	imr being checked.
283  * @return:	true for overlap false for no overlap.
284  */
285 static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
286 {
287 	return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
288 }
289 
290 /**
291  * imr_add_range - add an Isolated Memory Region.
292  *
293  * @base:	physical base address of region aligned to 1KiB.
294  * @size:	physical size of region in bytes must be aligned to 1KiB.
295  * @read_mask:	read access mask.
296  * @write_mask:	write access mask.
297  * @return:	zero on success or negative value indicating error.
298  */
299 int imr_add_range(phys_addr_t base, size_t size,
300 		  unsigned int rmask, unsigned int wmask)
301 {
302 	phys_addr_t end;
303 	unsigned int i;
304 	struct imr_device *idev = &imr_dev;
305 	struct imr_regs imr;
306 	size_t raw_size;
307 	int reg;
308 	int ret;
309 
310 	if (WARN_ONCE(idev->init == false, "driver not initialized"))
311 		return -ENODEV;
312 
313 	ret = imr_check_params(base, size);
314 	if (ret)
315 		return ret;
316 
317 	/* Tweak the size value. */
318 	raw_size = imr_raw_size(size);
319 	end = base + raw_size;
320 
321 	/*
322 	 * Check for reserved IMR value common to firmware, kernel and grub
323 	 * indicating a disabled IMR.
324 	 */
325 	imr.addr_lo = phys_to_imr(base);
326 	imr.addr_hi = phys_to_imr(end);
327 	imr.rmask = rmask;
328 	imr.wmask = wmask;
329 	if (!imr_is_enabled(&imr))
330 		return -ENOTSUPP;
331 
332 	mutex_lock(&idev->lock);
333 
334 	/*
335 	 * Find a free IMR while checking for an existing overlapping range.
336 	 * Note there's no restriction in silicon to prevent IMR overlaps.
337 	 * For the sake of simplicity and ease in defining/debugging an IMR
338 	 * memory map we exclude IMR overlaps.
339 	 */
340 	reg = -1;
341 	for (i = 0; i < idev->max_imr; i++) {
342 		ret = imr_read(idev, i, &imr);
343 		if (ret)
344 			goto failed;
345 
346 		/* Find overlap @ base or end of requested range. */
347 		ret = -EINVAL;
348 		if (imr_is_enabled(&imr)) {
349 			if (imr_address_overlap(base, &imr))
350 				goto failed;
351 			if (imr_address_overlap(end, &imr))
352 				goto failed;
353 		} else {
354 			reg = i;
355 		}
356 	}
357 
358 	/* Error out if we have no free IMR entries. */
359 	if (reg == -1) {
360 		ret = -ENOMEM;
361 		goto failed;
362 	}
363 
364 	pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
365 		 reg, &base, &end, raw_size, rmask, wmask);
366 
367 	/* Enable IMR at specified range and access mask. */
368 	imr.addr_lo = phys_to_imr(base);
369 	imr.addr_hi = phys_to_imr(end);
370 	imr.rmask = rmask;
371 	imr.wmask = wmask;
372 
373 	ret = imr_write(idev, reg, &imr);
374 	if (ret < 0) {
375 		/*
376 		 * In the highly unlikely event iosf_mbi_write failed
377 		 * attempt to rollback the IMR setup skipping the trapping
378 		 * of further IOSF write failures.
379 		 */
380 		imr.addr_lo = 0;
381 		imr.addr_hi = 0;
382 		imr.rmask = IMR_READ_ACCESS_ALL;
383 		imr.wmask = IMR_WRITE_ACCESS_ALL;
384 		imr_write(idev, reg, &imr);
385 	}
386 failed:
387 	mutex_unlock(&idev->lock);
388 	return ret;
389 }
390 EXPORT_SYMBOL_GPL(imr_add_range);
391 
392 /**
393  * __imr_remove_range - delete an Isolated Memory Region.
394  *
395  * This function allows you to delete an IMR by its index specified by reg or
396  * by address range specified by base and size respectively. If you specify an
397  * index on its own the base and size parameters are ignored.
398  * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
399  * imr_remove_range(-1, base, size); delete IMR from base to base+size.
400  *
401  * @reg:	imr index to remove.
402  * @base:	physical base address of region aligned to 1 KiB.
403  * @size:	physical size of region in bytes aligned to 1 KiB.
404  * @return:	-EINVAL on invalid range or out or range id
405  *		-ENODEV if reg is valid but no IMR exists or is locked
406  *		0 on success.
407  */
408 static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
409 {
410 	phys_addr_t end;
411 	bool found = false;
412 	unsigned int i;
413 	struct imr_device *idev = &imr_dev;
414 	struct imr_regs imr;
415 	size_t raw_size;
416 	int ret = 0;
417 
418 	if (WARN_ONCE(idev->init == false, "driver not initialized"))
419 		return -ENODEV;
420 
421 	/*
422 	 * Validate address range if deleting by address, else we are
423 	 * deleting by index where base and size will be ignored.
424 	 */
425 	if (reg == -1) {
426 		ret = imr_check_params(base, size);
427 		if (ret)
428 			return ret;
429 	}
430 
431 	/* Tweak the size value. */
432 	raw_size = imr_raw_size(size);
433 	end = base + raw_size;
434 
435 	mutex_lock(&idev->lock);
436 
437 	if (reg >= 0) {
438 		/* If a specific IMR is given try to use it. */
439 		ret = imr_read(idev, reg, &imr);
440 		if (ret)
441 			goto failed;
442 
443 		if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
444 			ret = -ENODEV;
445 			goto failed;
446 		}
447 		found = true;
448 	} else {
449 		/* Search for match based on address range. */
450 		for (i = 0; i < idev->max_imr; i++) {
451 			ret = imr_read(idev, i, &imr);
452 			if (ret)
453 				goto failed;
454 
455 			if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
456 				continue;
457 
458 			if ((imr_to_phys(imr.addr_lo) == base) &&
459 			    (imr_to_phys(imr.addr_hi) == end)) {
460 				found = true;
461 				reg = i;
462 				break;
463 			}
464 		}
465 	}
466 
467 	if (!found) {
468 		ret = -ENODEV;
469 		goto failed;
470 	}
471 
472 	pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
473 
474 	/* Tear down the IMR. */
475 	imr.addr_lo = 0;
476 	imr.addr_hi = 0;
477 	imr.rmask = IMR_READ_ACCESS_ALL;
478 	imr.wmask = IMR_WRITE_ACCESS_ALL;
479 
480 	ret = imr_write(idev, reg, &imr);
481 
482 failed:
483 	mutex_unlock(&idev->lock);
484 	return ret;
485 }
486 
487 /**
488  * imr_remove_range - delete an Isolated Memory Region by address
489  *
490  * This function allows you to delete an IMR by an address range specified
491  * by base and size respectively.
492  * imr_remove_range(base, size); delete IMR from base to base+size.
493  *
494  * @base:	physical base address of region aligned to 1 KiB.
495  * @size:	physical size of region in bytes aligned to 1 KiB.
496  * @return:	-EINVAL on invalid range or out or range id
497  *		-ENODEV if reg is valid but no IMR exists or is locked
498  *		0 on success.
499  */
500 int imr_remove_range(phys_addr_t base, size_t size)
501 {
502 	return __imr_remove_range(-1, base, size);
503 }
504 EXPORT_SYMBOL_GPL(imr_remove_range);
505 
506 /**
507  * imr_clear - delete an Isolated Memory Region by index
508  *
509  * This function allows you to delete an IMR by an address range specified
510  * by the index of the IMR. Useful for initial sanitization of the IMR
511  * address map.
512  * imr_ge(base, size); delete IMR from base to base+size.
513  *
514  * @reg:	imr index to remove.
515  * @return:	-EINVAL on invalid range or out or range id
516  *		-ENODEV if reg is valid but no IMR exists or is locked
517  *		0 on success.
518  */
519 static inline int imr_clear(int reg)
520 {
521 	return __imr_remove_range(reg, 0, 0);
522 }
523 
524 /**
525  * imr_fixup_memmap - Tear down IMRs used during bootup.
526  *
527  * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
528  * that need to be removed before the kernel hands out one of the IMR
529  * encased addresses to a downstream DMA agent such as the SD or Ethernet.
530  * IMRs on Galileo are setup to immediately reset the system on violation.
531  * As a result if you're running a root filesystem from SD - you'll need
532  * the boot-time IMRs torn down or you'll find seemingly random resets when
533  * using your filesystem.
534  *
535  * @idev:	pointer to imr_device structure.
536  * @return:
537  */
538 static void __init imr_fixup_memmap(struct imr_device *idev)
539 {
540 	phys_addr_t base = virt_to_phys(&_text);
541 	size_t size = virt_to_phys(&__end_rodata) - base;
542 	unsigned long start, end;
543 	int i;
544 	int ret;
545 
546 	/* Tear down all existing unlocked IMRs. */
547 	for (i = 0; i < idev->max_imr; i++)
548 		imr_clear(i);
549 
550 	start = (unsigned long)_text;
551 	end = (unsigned long)__end_rodata - 1;
552 
553 	/*
554 	 * Setup an unlocked IMR around the physical extent of the kernel
555 	 * from the beginning of the .text secton to the end of the
556 	 * .rodata section as one physically contiguous block.
557 	 *
558 	 * We don't round up @size since it is already PAGE_SIZE aligned.
559 	 * See vmlinux.lds.S for details.
560 	 */
561 	ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
562 	if (ret < 0) {
563 		pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
564 			size / 1024, start, end);
565 	} else {
566 		pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
567 			size / 1024, start, end);
568 	}
569 
570 }
571 
572 static const struct x86_cpu_id imr_ids[] __initconst = {
573 	{ X86_VENDOR_INTEL, 5, 9 },	/* Intel Quark SoC X1000. */
574 	{}
575 };
576 
577 /**
578  * imr_init - entry point for IMR driver.
579  *
580  * return: -ENODEV for no IMR support 0 if good to go.
581  */
582 static int __init imr_init(void)
583 {
584 	struct imr_device *idev = &imr_dev;
585 	int ret;
586 
587 	if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
588 		return -ENODEV;
589 
590 	idev->max_imr = QUARK_X1000_IMR_MAX;
591 	idev->reg_base = QUARK_X1000_IMR_REGBASE;
592 	idev->init = true;
593 
594 	mutex_init(&idev->lock);
595 	ret = imr_debugfs_register(idev);
596 	if (ret != 0)
597 		pr_warn("debugfs register failed!\n");
598 	imr_fixup_memmap(idev);
599 	return 0;
600 }
601 device_initcall(imr_init);
602