xref: /openbmc/linux/arch/x86/kernel/cpu/mtrr/amd.c (revision 42204455)
12ec1df41SThomas Gleixner #include <linux/init.h>
22ec1df41SThomas Gleixner #include <linux/mm.h>
32ec1df41SThomas Gleixner #include <asm/mtrr.h>
42ec1df41SThomas Gleixner #include <asm/msr.h>
52ec1df41SThomas Gleixner 
62ec1df41SThomas Gleixner #include "mtrr.h"
72ec1df41SThomas Gleixner 
82ec1df41SThomas Gleixner static void
92ec1df41SThomas Gleixner amd_get_mtrr(unsigned int reg, unsigned long *base,
102ec1df41SThomas Gleixner 	     unsigned long *size, mtrr_type *type)
112ec1df41SThomas Gleixner {
122ec1df41SThomas Gleixner 	unsigned long low, high;
132ec1df41SThomas Gleixner 
142ec1df41SThomas Gleixner 	rdmsr(MSR_K6_UWCCR, low, high);
152ec1df41SThomas Gleixner 	/* Upper dword is region 1, lower is region 0 */
162ec1df41SThomas Gleixner 	if (reg == 1)
172ec1df41SThomas Gleixner 		low = high;
182ec1df41SThomas Gleixner 	/* The base masks off on the right alignment */
192ec1df41SThomas Gleixner 	*base = (low & 0xFFFE0000) >> PAGE_SHIFT;
202ec1df41SThomas Gleixner 	*type = 0;
212ec1df41SThomas Gleixner 	if (low & 1)
222ec1df41SThomas Gleixner 		*type = MTRR_TYPE_UNCACHABLE;
232ec1df41SThomas Gleixner 	if (low & 2)
242ec1df41SThomas Gleixner 		*type = MTRR_TYPE_WRCOMB;
252ec1df41SThomas Gleixner 	if (!(low & 3)) {
262ec1df41SThomas Gleixner 		*size = 0;
272ec1df41SThomas Gleixner 		return;
282ec1df41SThomas Gleixner 	}
292ec1df41SThomas Gleixner 	/*
302ec1df41SThomas Gleixner 	 * This needs a little explaining. The size is stored as an
312ec1df41SThomas Gleixner 	 * inverted mask of bits of 128K granularity 15 bits long offset
3242204455SJaswinder Singh Rajput 	 * 2 bits.
332ec1df41SThomas Gleixner 	 *
342ec1df41SThomas Gleixner 	 * So to get a size we do invert the mask and add 1 to the lowest
352ec1df41SThomas Gleixner 	 * mask bit (4 as its 2 bits in). This gives us a size we then shift
3642204455SJaswinder Singh Rajput 	 * to turn into 128K blocks.
372ec1df41SThomas Gleixner 	 *
382ec1df41SThomas Gleixner 	 * eg              111 1111 1111 1100      is 512K
392ec1df41SThomas Gleixner 	 *
402ec1df41SThomas Gleixner 	 * invert          000 0000 0000 0011
412ec1df41SThomas Gleixner 	 * +1              000 0000 0000 0100
422ec1df41SThomas Gleixner 	 * *128K   ...
432ec1df41SThomas Gleixner 	 */
442ec1df41SThomas Gleixner 	low = (~low) & 0x1FFFC;
452ec1df41SThomas Gleixner 	*size = (low + 4) << (15 - PAGE_SHIFT);
462ec1df41SThomas Gleixner }
472ec1df41SThomas Gleixner 
4842204455SJaswinder Singh Rajput /**
4942204455SJaswinder Singh Rajput  * amd_set_mtrr - Set variable MTRR register on the local CPU.
5042204455SJaswinder Singh Rajput  *
5142204455SJaswinder Singh Rajput  * @reg The register to set.
5242204455SJaswinder Singh Rajput  * @base The base address of the region.
5342204455SJaswinder Singh Rajput  * @size The size of the region. If this is 0 the region is disabled.
5442204455SJaswinder Singh Rajput  * @type The type of the region.
5542204455SJaswinder Singh Rajput  *
5642204455SJaswinder Singh Rajput  * Returns nothing.
572ec1df41SThomas Gleixner  */
5842204455SJaswinder Singh Rajput static void
5942204455SJaswinder Singh Rajput amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
602ec1df41SThomas Gleixner {
612ec1df41SThomas Gleixner 	u32 regs[2];
622ec1df41SThomas Gleixner 
632ec1df41SThomas Gleixner 	/*
642ec1df41SThomas Gleixner 	 * Low is MTRR0, High MTRR 1
652ec1df41SThomas Gleixner 	 */
662ec1df41SThomas Gleixner 	rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
672ec1df41SThomas Gleixner 	/*
682ec1df41SThomas Gleixner 	 * Blank to disable
692ec1df41SThomas Gleixner 	 */
7042204455SJaswinder Singh Rajput 	if (size == 0) {
712ec1df41SThomas Gleixner 		regs[reg] = 0;
7242204455SJaswinder Singh Rajput 	} else {
7342204455SJaswinder Singh Rajput 		/*
7442204455SJaswinder Singh Rajput 		 * Set the register to the base, the type (off by one) and an
7542204455SJaswinder Singh Rajput 		 * inverted bitmask of the size The size is the only odd
7642204455SJaswinder Singh Rajput 		 * bit. We are fed say 512K We invert this and we get 111 1111
7742204455SJaswinder Singh Rajput 		 * 1111 1011 but if you subtract one and invert you get the
7842204455SJaswinder Singh Rajput 		 * desired 111 1111 1111 1100 mask
7942204455SJaswinder Singh Rajput 		 *
8042204455SJaswinder Singh Rajput 		 *  But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!
8142204455SJaswinder Singh Rajput 		 */
822ec1df41SThomas Gleixner 		regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
832ec1df41SThomas Gleixner 		    | (base << PAGE_SHIFT) | (type + 1);
8442204455SJaswinder Singh Rajput 	}
852ec1df41SThomas Gleixner 
862ec1df41SThomas Gleixner 	/*
872ec1df41SThomas Gleixner 	 * The writeback rule is quite specific. See the manual. Its
882ec1df41SThomas Gleixner 	 * disable local interrupts, write back the cache, set the mtrr
892ec1df41SThomas Gleixner 	 */
902ec1df41SThomas Gleixner 	wbinvd();
912ec1df41SThomas Gleixner 	wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
922ec1df41SThomas Gleixner }
932ec1df41SThomas Gleixner 
9442204455SJaswinder Singh Rajput static int
9542204455SJaswinder Singh Rajput amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
962ec1df41SThomas Gleixner {
9742204455SJaswinder Singh Rajput 	/*
9842204455SJaswinder Singh Rajput 	 * Apply the K6 block alignment and size rules
9942204455SJaswinder Singh Rajput 	 * In order
10042204455SJaswinder Singh Rajput 	 * o Uncached or gathering only
10142204455SJaswinder Singh Rajput 	 * o 128K or bigger block
10242204455SJaswinder Singh Rajput 	 * o Power of 2 block
10342204455SJaswinder Singh Rajput 	 * o base suitably aligned to the power
1042ec1df41SThomas Gleixner 	 */
1052ec1df41SThomas Gleixner 	if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
1062ec1df41SThomas Gleixner 	    || (size & ~(size - 1)) - size || (base & (size - 1)))
1072ec1df41SThomas Gleixner 		return -EINVAL;
1082ec1df41SThomas Gleixner 	return 0;
1092ec1df41SThomas Gleixner }
1102ec1df41SThomas Gleixner 
1112ec1df41SThomas Gleixner static struct mtrr_ops amd_mtrr_ops = {
1122ec1df41SThomas Gleixner 	.vendor            = X86_VENDOR_AMD,
1132ec1df41SThomas Gleixner 	.set               = amd_set_mtrr,
1142ec1df41SThomas Gleixner 	.get               = amd_get_mtrr,
1152ec1df41SThomas Gleixner 	.get_free_region   = generic_get_free_region,
1162ec1df41SThomas Gleixner 	.validate_add_page = amd_validate_add_page,
1172ec1df41SThomas Gleixner 	.have_wrcomb       = positive_have_wrcomb,
1182ec1df41SThomas Gleixner };
1192ec1df41SThomas Gleixner 
1202ec1df41SThomas Gleixner int __init amd_init_mtrr(void)
1212ec1df41SThomas Gleixner {
1222ec1df41SThomas Gleixner 	set_mtrr_ops(&amd_mtrr_ops);
1232ec1df41SThomas Gleixner 	return 0;
1242ec1df41SThomas Gleixner }
125