xref: /openbmc/linux/arch/x86/kernel/cpu/mtrr/amd.c (revision 2ec1df41)
1 #include <linux/init.h>
2 #include <linux/mm.h>
3 #include <asm/mtrr.h>
4 #include <asm/msr.h>
5 
6 #include "mtrr.h"
7 
8 static void
9 amd_get_mtrr(unsigned int reg, unsigned long *base,
10 	     unsigned long *size, mtrr_type * type)
11 {
12 	unsigned long low, high;
13 
14 	rdmsr(MSR_K6_UWCCR, low, high);
15 	/*  Upper dword is region 1, lower is region 0  */
16 	if (reg == 1)
17 		low = high;
18 	/*  The base masks off on the right alignment  */
19 	*base = (low & 0xFFFE0000) >> PAGE_SHIFT;
20 	*type = 0;
21 	if (low & 1)
22 		*type = MTRR_TYPE_UNCACHABLE;
23 	if (low & 2)
24 		*type = MTRR_TYPE_WRCOMB;
25 	if (!(low & 3)) {
26 		*size = 0;
27 		return;
28 	}
29 	/*
30 	 *  This needs a little explaining. The size is stored as an
31 	 *  inverted mask of bits of 128K granularity 15 bits long offset
32 	 *  2 bits
33 	 *
34 	 *  So to get a size we do invert the mask and add 1 to the lowest
35 	 *  mask bit (4 as its 2 bits in). This gives us a size we then shift
36 	 *  to turn into 128K blocks
37 	 *
38 	 *  eg              111 1111 1111 1100      is 512K
39 	 *
40 	 *  invert          000 0000 0000 0011
41 	 *  +1              000 0000 0000 0100
42 	 *  *128K   ...
43 	 */
44 	low = (~low) & 0x1FFFC;
45 	*size = (low + 4) << (15 - PAGE_SHIFT);
46 	return;
47 }
48 
49 static void amd_set_mtrr(unsigned int reg, unsigned long base,
50 			 unsigned long size, mtrr_type type)
51 /*  [SUMMARY] Set variable MTRR register on the local CPU.
52     <reg> The register to set.
53     <base> The base address of the region.
54     <size> The size of the region. If this is 0 the region is disabled.
55     <type> The type of the region.
56     <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
57     be done externally.
58     [RETURNS] Nothing.
59 */
60 {
61 	u32 regs[2];
62 
63 	/*
64 	 *  Low is MTRR0 , High MTRR 1
65 	 */
66 	rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
67 	/*
68 	 *  Blank to disable
69 	 */
70 	if (size == 0)
71 		regs[reg] = 0;
72 	else
73 		/* Set the register to the base, the type (off by one) and an
74 		   inverted bitmask of the size The size is the only odd
75 		   bit. We are fed say 512K We invert this and we get 111 1111
76 		   1111 1011 but if you subtract one and invert you get the
77 		   desired 111 1111 1111 1100 mask
78 
79 		   But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!  */
80 		regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
81 		    | (base << PAGE_SHIFT) | (type + 1);
82 
83 	/*
84 	 *  The writeback rule is quite specific. See the manual. Its
85 	 *  disable local interrupts, write back the cache, set the mtrr
86 	 */
87 	wbinvd();
88 	wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
89 }
90 
91 static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
92 {
93 	/* Apply the K6 block alignment and size rules
94 	   In order
95 	   o Uncached or gathering only
96 	   o 128K or bigger block
97 	   o Power of 2 block
98 	   o base suitably aligned to the power
99 	*/
100 	if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
101 	    || (size & ~(size - 1)) - size || (base & (size - 1)))
102 		return -EINVAL;
103 	return 0;
104 }
105 
106 static struct mtrr_ops amd_mtrr_ops = {
107 	.vendor            = X86_VENDOR_AMD,
108 	.set               = amd_set_mtrr,
109 	.get               = amd_get_mtrr,
110 	.get_free_region   = generic_get_free_region,
111 	.validate_add_page = amd_validate_add_page,
112 	.have_wrcomb       = positive_have_wrcomb,
113 };
114 
115 int __init amd_init_mtrr(void)
116 {
117 	set_mtrr_ops(&amd_mtrr_ops);
118 	return 0;
119 }
120 
121 //arch_initcall(amd_mtrr_init);
122