1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/types.h> 4 #include <linux/slab.h> 5 #include <linux/syscore_ops.h> 6 #include <asm/cpufeature.h> 7 #include <asm/mtrr.h> 8 #include <asm/processor.h> 9 #include "mtrr.h" 10 11 void mtrr_set_if(void) 12 { 13 switch (boot_cpu_data.x86_vendor) { 14 case X86_VENDOR_AMD: 15 /* Pre-Athlon (K6) AMD CPU MTRRs */ 16 if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) 17 mtrr_if = &amd_mtrr_ops; 18 break; 19 case X86_VENDOR_CENTAUR: 20 if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) 21 mtrr_if = ¢aur_mtrr_ops; 22 break; 23 case X86_VENDOR_CYRIX: 24 if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) 25 mtrr_if = &cyrix_mtrr_ops; 26 break; 27 default: 28 break; 29 } 30 } 31 32 /* 33 * The suspend/resume methods are only for CPUs without MTRR. CPUs using generic 34 * MTRR driver don't require this. 35 */ 36 struct mtrr_value { 37 mtrr_type ltype; 38 unsigned long lbase; 39 unsigned long lsize; 40 }; 41 42 static struct mtrr_value *mtrr_value; 43 44 static int mtrr_save(void) 45 { 46 int i; 47 48 if (!mtrr_value) 49 return -ENOMEM; 50 51 for (i = 0; i < num_var_ranges; i++) { 52 mtrr_if->get(i, &mtrr_value[i].lbase, 53 &mtrr_value[i].lsize, 54 &mtrr_value[i].ltype); 55 } 56 return 0; 57 } 58 59 static void mtrr_restore(void) 60 { 61 int i; 62 63 for (i = 0; i < num_var_ranges; i++) { 64 if (mtrr_value[i].lsize) { 65 mtrr_if->set(i, mtrr_value[i].lbase, 66 mtrr_value[i].lsize, 67 mtrr_value[i].ltype); 68 } 69 } 70 } 71 72 static struct syscore_ops mtrr_syscore_ops = { 73 .suspend = mtrr_save, 74 .resume = mtrr_restore, 75 }; 76 77 void mtrr_register_syscore(void) 78 { 79 mtrr_value = kcalloc(num_var_ranges, sizeof(*mtrr_value), GFP_KERNEL); 80 81 /* 82 * The CPU has no MTRR and seems to not support SMP. They have 83 * specific drivers, we use a tricky method to support 84 * suspend/resume for them. 85 * 86 * TBD: is there any system with such CPU which supports 87 * suspend/resume? If no, we should remove the code. 88 */ 89 register_syscore_ops(&mtrr_syscore_ops); 90 } 91