xref: /openbmc/linux/arch/x86/kernel/cpu/mtrr/amd.c (revision 2ec1df41)
12ec1df41SThomas Gleixner #include <linux/init.h>
22ec1df41SThomas Gleixner #include <linux/mm.h>
32ec1df41SThomas Gleixner #include <asm/mtrr.h>
42ec1df41SThomas Gleixner #include <asm/msr.h>
52ec1df41SThomas Gleixner 
62ec1df41SThomas Gleixner #include "mtrr.h"
72ec1df41SThomas Gleixner 
82ec1df41SThomas Gleixner static void
92ec1df41SThomas Gleixner amd_get_mtrr(unsigned int reg, unsigned long *base,
102ec1df41SThomas Gleixner 	     unsigned long *size, mtrr_type * type)
112ec1df41SThomas Gleixner {
122ec1df41SThomas Gleixner 	unsigned long low, high;
132ec1df41SThomas Gleixner 
142ec1df41SThomas Gleixner 	rdmsr(MSR_K6_UWCCR, low, high);
152ec1df41SThomas Gleixner 	/*  Upper dword is region 1, lower is region 0  */
162ec1df41SThomas Gleixner 	if (reg == 1)
172ec1df41SThomas Gleixner 		low = high;
182ec1df41SThomas Gleixner 	/*  The base masks off on the right alignment  */
192ec1df41SThomas Gleixner 	*base = (low & 0xFFFE0000) >> PAGE_SHIFT;
202ec1df41SThomas Gleixner 	*type = 0;
212ec1df41SThomas Gleixner 	if (low & 1)
222ec1df41SThomas Gleixner 		*type = MTRR_TYPE_UNCACHABLE;
232ec1df41SThomas Gleixner 	if (low & 2)
242ec1df41SThomas Gleixner 		*type = MTRR_TYPE_WRCOMB;
252ec1df41SThomas Gleixner 	if (!(low & 3)) {
262ec1df41SThomas Gleixner 		*size = 0;
272ec1df41SThomas Gleixner 		return;
282ec1df41SThomas Gleixner 	}
292ec1df41SThomas Gleixner 	/*
302ec1df41SThomas Gleixner 	 *  This needs a little explaining. The size is stored as an
312ec1df41SThomas Gleixner 	 *  inverted mask of bits of 128K granularity 15 bits long offset
322ec1df41SThomas Gleixner 	 *  2 bits
332ec1df41SThomas Gleixner 	 *
342ec1df41SThomas Gleixner 	 *  So to get a size we do invert the mask and add 1 to the lowest
352ec1df41SThomas Gleixner 	 *  mask bit (4 as its 2 bits in). This gives us a size we then shift
362ec1df41SThomas Gleixner 	 *  to turn into 128K blocks
372ec1df41SThomas Gleixner 	 *
382ec1df41SThomas Gleixner 	 *  eg              111 1111 1111 1100      is 512K
392ec1df41SThomas Gleixner 	 *
402ec1df41SThomas Gleixner 	 *  invert          000 0000 0000 0011
412ec1df41SThomas Gleixner 	 *  +1              000 0000 0000 0100
422ec1df41SThomas Gleixner 	 *  *128K   ...
432ec1df41SThomas Gleixner 	 */
442ec1df41SThomas Gleixner 	low = (~low) & 0x1FFFC;
452ec1df41SThomas Gleixner 	*size = (low + 4) << (15 - PAGE_SHIFT);
462ec1df41SThomas Gleixner 	return;
472ec1df41SThomas Gleixner }
482ec1df41SThomas Gleixner 
492ec1df41SThomas Gleixner static void amd_set_mtrr(unsigned int reg, unsigned long base,
502ec1df41SThomas Gleixner 			 unsigned long size, mtrr_type type)
512ec1df41SThomas Gleixner /*  [SUMMARY] Set variable MTRR register on the local CPU.
522ec1df41SThomas Gleixner     <reg> The register to set.
532ec1df41SThomas Gleixner     <base> The base address of the region.
542ec1df41SThomas Gleixner     <size> The size of the region. If this is 0 the region is disabled.
552ec1df41SThomas Gleixner     <type> The type of the region.
562ec1df41SThomas Gleixner     <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
572ec1df41SThomas Gleixner     be done externally.
582ec1df41SThomas Gleixner     [RETURNS] Nothing.
592ec1df41SThomas Gleixner */
602ec1df41SThomas Gleixner {
612ec1df41SThomas Gleixner 	u32 regs[2];
622ec1df41SThomas Gleixner 
632ec1df41SThomas Gleixner 	/*
642ec1df41SThomas Gleixner 	 *  Low is MTRR0 , High MTRR 1
652ec1df41SThomas Gleixner 	 */
662ec1df41SThomas Gleixner 	rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
672ec1df41SThomas Gleixner 	/*
682ec1df41SThomas Gleixner 	 *  Blank to disable
692ec1df41SThomas Gleixner 	 */
702ec1df41SThomas Gleixner 	if (size == 0)
712ec1df41SThomas Gleixner 		regs[reg] = 0;
722ec1df41SThomas Gleixner 	else
732ec1df41SThomas Gleixner 		/* Set the register to the base, the type (off by one) and an
742ec1df41SThomas Gleixner 		   inverted bitmask of the size The size is the only odd
752ec1df41SThomas Gleixner 		   bit. We are fed say 512K We invert this and we get 111 1111
762ec1df41SThomas Gleixner 		   1111 1011 but if you subtract one and invert you get the
772ec1df41SThomas Gleixner 		   desired 111 1111 1111 1100 mask
782ec1df41SThomas Gleixner 
792ec1df41SThomas Gleixner 		   But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!  */
802ec1df41SThomas Gleixner 		regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
812ec1df41SThomas Gleixner 		    | (base << PAGE_SHIFT) | (type + 1);
822ec1df41SThomas Gleixner 
832ec1df41SThomas Gleixner 	/*
842ec1df41SThomas Gleixner 	 *  The writeback rule is quite specific. See the manual. Its
852ec1df41SThomas Gleixner 	 *  disable local interrupts, write back the cache, set the mtrr
862ec1df41SThomas Gleixner 	 */
872ec1df41SThomas Gleixner 	wbinvd();
882ec1df41SThomas Gleixner 	wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
892ec1df41SThomas Gleixner }
902ec1df41SThomas Gleixner 
912ec1df41SThomas Gleixner static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
922ec1df41SThomas Gleixner {
932ec1df41SThomas Gleixner 	/* Apply the K6 block alignment and size rules
942ec1df41SThomas Gleixner 	   In order
952ec1df41SThomas Gleixner 	   o Uncached or gathering only
962ec1df41SThomas Gleixner 	   o 128K or bigger block
972ec1df41SThomas Gleixner 	   o Power of 2 block
982ec1df41SThomas Gleixner 	   o base suitably aligned to the power
992ec1df41SThomas Gleixner 	*/
1002ec1df41SThomas Gleixner 	if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
1012ec1df41SThomas Gleixner 	    || (size & ~(size - 1)) - size || (base & (size - 1)))
1022ec1df41SThomas Gleixner 		return -EINVAL;
1032ec1df41SThomas Gleixner 	return 0;
1042ec1df41SThomas Gleixner }
1052ec1df41SThomas Gleixner 
1062ec1df41SThomas Gleixner static struct mtrr_ops amd_mtrr_ops = {
1072ec1df41SThomas Gleixner 	.vendor            = X86_VENDOR_AMD,
1082ec1df41SThomas Gleixner 	.set               = amd_set_mtrr,
1092ec1df41SThomas Gleixner 	.get               = amd_get_mtrr,
1102ec1df41SThomas Gleixner 	.get_free_region   = generic_get_free_region,
1112ec1df41SThomas Gleixner 	.validate_add_page = amd_validate_add_page,
1122ec1df41SThomas Gleixner 	.have_wrcomb       = positive_have_wrcomb,
1132ec1df41SThomas Gleixner };
1142ec1df41SThomas Gleixner 
1152ec1df41SThomas Gleixner int __init amd_init_mtrr(void)
1162ec1df41SThomas Gleixner {
1172ec1df41SThomas Gleixner 	set_mtrr_ops(&amd_mtrr_ops);
1182ec1df41SThomas Gleixner 	return 0;
1192ec1df41SThomas Gleixner }
1202ec1df41SThomas Gleixner 
1212ec1df41SThomas Gleixner //arch_initcall(amd_mtrr_init);
122