1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 22ec1df41SThomas Gleixner #include <linux/init.h> 32ec1df41SThomas Gleixner #include <linux/mm.h> 42ec1df41SThomas Gleixner #include <asm/mtrr.h> 52ec1df41SThomas Gleixner #include <asm/msr.h> 62ec1df41SThomas Gleixner 72ec1df41SThomas Gleixner #include "mtrr.h" 82ec1df41SThomas Gleixner 92ec1df41SThomas Gleixner static void 102ec1df41SThomas Gleixner amd_get_mtrr(unsigned int reg, unsigned long *base, 112ec1df41SThomas Gleixner unsigned long *size, mtrr_type *type) 122ec1df41SThomas Gleixner { 132ec1df41SThomas Gleixner unsigned long low, high; 142ec1df41SThomas Gleixner 152ec1df41SThomas Gleixner rdmsr(MSR_K6_UWCCR, low, high); 162ec1df41SThomas Gleixner /* Upper dword is region 1, lower is region 0 */ 172ec1df41SThomas Gleixner if (reg == 1) 182ec1df41SThomas Gleixner low = high; 192ec1df41SThomas Gleixner /* The base masks off on the right alignment */ 202ec1df41SThomas Gleixner *base = (low & 0xFFFE0000) >> PAGE_SHIFT; 212ec1df41SThomas Gleixner *type = 0; 222ec1df41SThomas Gleixner if (low & 1) 232ec1df41SThomas Gleixner *type = MTRR_TYPE_UNCACHABLE; 242ec1df41SThomas Gleixner if (low & 2) 252ec1df41SThomas Gleixner *type = MTRR_TYPE_WRCOMB; 262ec1df41SThomas Gleixner if (!(low & 3)) { 272ec1df41SThomas Gleixner *size = 0; 282ec1df41SThomas Gleixner return; 292ec1df41SThomas Gleixner } 302ec1df41SThomas Gleixner /* 312ec1df41SThomas Gleixner * This needs a little explaining. The size is stored as an 322ec1df41SThomas Gleixner * inverted mask of bits of 128K granularity 15 bits long offset 3342204455SJaswinder Singh Rajput * 2 bits. 342ec1df41SThomas Gleixner * 352ec1df41SThomas Gleixner * So to get a size we do invert the mask and add 1 to the lowest 362ec1df41SThomas Gleixner * mask bit (4 as its 2 bits in). This gives us a size we then shift 3742204455SJaswinder Singh Rajput * to turn into 128K blocks. 382ec1df41SThomas Gleixner * 392ec1df41SThomas Gleixner * eg 111 1111 1111 1100 is 512K 402ec1df41SThomas Gleixner * 412ec1df41SThomas Gleixner * invert 000 0000 0000 0011 422ec1df41SThomas Gleixner * +1 000 0000 0000 0100 432ec1df41SThomas Gleixner * *128K ... 442ec1df41SThomas Gleixner */ 452ec1df41SThomas Gleixner low = (~low) & 0x1FFFC; 462ec1df41SThomas Gleixner *size = (low + 4) << (15 - PAGE_SHIFT); 472ec1df41SThomas Gleixner } 482ec1df41SThomas Gleixner 4942204455SJaswinder Singh Rajput /** 5042204455SJaswinder Singh Rajput * amd_set_mtrr - Set variable MTRR register on the local CPU. 5142204455SJaswinder Singh Rajput * 5242204455SJaswinder Singh Rajput * @reg The register to set. 5342204455SJaswinder Singh Rajput * @base The base address of the region. 5442204455SJaswinder Singh Rajput * @size The size of the region. If this is 0 the region is disabled. 5542204455SJaswinder Singh Rajput * @type The type of the region. 5642204455SJaswinder Singh Rajput * 5742204455SJaswinder Singh Rajput * Returns nothing. 582ec1df41SThomas Gleixner */ 5942204455SJaswinder Singh Rajput static void 6042204455SJaswinder Singh Rajput amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) 612ec1df41SThomas Gleixner { 622ec1df41SThomas Gleixner u32 regs[2]; 632ec1df41SThomas Gleixner 642ec1df41SThomas Gleixner /* 652ec1df41SThomas Gleixner * Low is MTRR0, High MTRR 1 662ec1df41SThomas Gleixner */ 672ec1df41SThomas Gleixner rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); 682ec1df41SThomas Gleixner /* 692ec1df41SThomas Gleixner * Blank to disable 702ec1df41SThomas Gleixner */ 7142204455SJaswinder Singh Rajput if (size == 0) { 722ec1df41SThomas Gleixner regs[reg] = 0; 7342204455SJaswinder Singh Rajput } else { 7442204455SJaswinder Singh Rajput /* 7542204455SJaswinder Singh Rajput * Set the register to the base, the type (off by one) and an 7642204455SJaswinder Singh Rajput * inverted bitmask of the size The size is the only odd 7742204455SJaswinder Singh Rajput * bit. We are fed say 512K We invert this and we get 111 1111 7842204455SJaswinder Singh Rajput * 1111 1011 but if you subtract one and invert you get the 7942204455SJaswinder Singh Rajput * desired 111 1111 1111 1100 mask 8042204455SJaswinder Singh Rajput * 8142204455SJaswinder Singh Rajput * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! 8242204455SJaswinder Singh Rajput */ 832ec1df41SThomas Gleixner regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) 842ec1df41SThomas Gleixner | (base << PAGE_SHIFT) | (type + 1); 8542204455SJaswinder Singh Rajput } 862ec1df41SThomas Gleixner 872ec1df41SThomas Gleixner /* 882ec1df41SThomas Gleixner * The writeback rule is quite specific. See the manual. Its 892ec1df41SThomas Gleixner * disable local interrupts, write back the cache, set the mtrr 902ec1df41SThomas Gleixner */ 912ec1df41SThomas Gleixner wbinvd(); 922ec1df41SThomas Gleixner wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); 932ec1df41SThomas Gleixner } 942ec1df41SThomas Gleixner 9542204455SJaswinder Singh Rajput static int 9642204455SJaswinder Singh Rajput amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) 972ec1df41SThomas Gleixner { 9842204455SJaswinder Singh Rajput /* 9942204455SJaswinder Singh Rajput * Apply the K6 block alignment and size rules 10042204455SJaswinder Singh Rajput * In order 10142204455SJaswinder Singh Rajput * o Uncached or gathering only 10242204455SJaswinder Singh Rajput * o 128K or bigger block 10342204455SJaswinder Singh Rajput * o Power of 2 block 10442204455SJaswinder Singh Rajput * o base suitably aligned to the power 1052ec1df41SThomas Gleixner */ 1062ec1df41SThomas Gleixner if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) 1072ec1df41SThomas Gleixner || (size & ~(size - 1)) - size || (base & (size - 1))) 1082ec1df41SThomas Gleixner return -EINVAL; 1092ec1df41SThomas Gleixner return 0; 1102ec1df41SThomas Gleixner } 1112ec1df41SThomas Gleixner 112*f8bd9f25SJuergen Gross const struct mtrr_ops amd_mtrr_ops = { 1132ec1df41SThomas Gleixner .vendor = X86_VENDOR_AMD, 1142ec1df41SThomas Gleixner .set = amd_set_mtrr, 1152ec1df41SThomas Gleixner .get = amd_get_mtrr, 1162ec1df41SThomas Gleixner .get_free_region = generic_get_free_region, 1172ec1df41SThomas Gleixner .validate_add_page = amd_validate_add_page, 1182ec1df41SThomas Gleixner .have_wrcomb = positive_have_wrcomb, 1192ec1df41SThomas Gleixner }; 120