1 #include <linux/threads.h> 2 #include <linux/cpumask.h> 3 #include <linux/string.h> 4 #include <linux/kernel.h> 5 #include <linux/ctype.h> 6 #include <linux/init.h> 7 #include <linux/dmar.h> 8 9 #include <asm/smp.h> 10 #include <asm/apic.h> 11 #include <asm/ipi.h> 12 13 DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid); 14 15 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 16 { 17 return x2apic_enabled(); 18 } 19 20 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 21 22 static const struct cpumask *x2apic_target_cpus(void) 23 { 24 return cpumask_of(0); 25 } 26 27 /* 28 * for now each logical cpu is in its own vector allocation domain. 29 */ 30 static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) 31 { 32 cpumask_clear(retmask); 33 cpumask_set_cpu(cpu, retmask); 34 } 35 36 static void 37 __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest) 38 { 39 unsigned long cfg; 40 41 cfg = __prepare_ICR(0, vector, dest); 42 43 /* 44 * send the IPI. 45 */ 46 native_x2apic_icr_write(cfg, apicid); 47 } 48 49 /* 50 * for now, we send the IPI's one by one in the cpumask. 51 * TBD: Based on the cpu mask, we can send the IPI's to the cluster group 52 * at once. We have 16 cpu's in a cluster. This will minimize IPI register 53 * writes. 54 */ 55 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) 56 { 57 unsigned long query_cpu; 58 unsigned long flags; 59 60 x2apic_wrmsr_fence(); 61 62 local_irq_save(flags); 63 for_each_cpu(query_cpu, mask) { 64 __x2apic_send_IPI_dest( 65 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 66 vector, apic->dest_logical); 67 } 68 local_irq_restore(flags); 69 } 70 71 static void 72 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 73 { 74 unsigned long this_cpu = smp_processor_id(); 75 unsigned long query_cpu; 76 unsigned long flags; 77 78 x2apic_wrmsr_fence(); 79 80 local_irq_save(flags); 81 for_each_cpu(query_cpu, mask) { 82 if (query_cpu == this_cpu) 83 continue; 84 __x2apic_send_IPI_dest( 85 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 86 vector, apic->dest_logical); 87 } 88 local_irq_restore(flags); 89 } 90 91 static void x2apic_send_IPI_allbutself(int vector) 92 { 93 unsigned long this_cpu = smp_processor_id(); 94 unsigned long query_cpu; 95 unsigned long flags; 96 97 x2apic_wrmsr_fence(); 98 99 local_irq_save(flags); 100 for_each_online_cpu(query_cpu) { 101 if (query_cpu == this_cpu) 102 continue; 103 __x2apic_send_IPI_dest( 104 per_cpu(x86_cpu_to_logical_apicid, query_cpu), 105 vector, apic->dest_logical); 106 } 107 local_irq_restore(flags); 108 } 109 110 static void x2apic_send_IPI_all(int vector) 111 { 112 x2apic_send_IPI_mask(cpu_online_mask, vector); 113 } 114 115 static int x2apic_apic_id_registered(void) 116 { 117 return 1; 118 } 119 120 static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) 121 { 122 /* 123 * We're using fixed IRQ delivery, can only return one logical APIC ID. 124 * May as well be the first. 125 */ 126 int cpu = cpumask_first(cpumask); 127 128 if ((unsigned)cpu < nr_cpu_ids) 129 return per_cpu(x86_cpu_to_logical_apicid, cpu); 130 else 131 return BAD_APICID; 132 } 133 134 static unsigned int 135 x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 136 const struct cpumask *andmask) 137 { 138 int cpu; 139 140 /* 141 * We're using fixed IRQ delivery, can only return one logical APIC ID. 142 * May as well be the first. 143 */ 144 for_each_cpu_and(cpu, cpumask, andmask) { 145 if (cpumask_test_cpu(cpu, cpu_online_mask)) 146 break; 147 } 148 149 if (cpu < nr_cpu_ids) 150 return per_cpu(x86_cpu_to_logical_apicid, cpu); 151 152 return BAD_APICID; 153 } 154 155 static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x) 156 { 157 unsigned int id; 158 159 id = x; 160 return id; 161 } 162 163 static unsigned long set_apic_id(unsigned int id) 164 { 165 unsigned long x; 166 167 x = id; 168 return x; 169 } 170 171 static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb) 172 { 173 return current_cpu_data.initial_apicid >> index_msb; 174 } 175 176 static void x2apic_send_IPI_self(int vector) 177 { 178 apic_write(APIC_SELF_IPI, vector); 179 } 180 181 static void init_x2apic_ldr(void) 182 { 183 int cpu = smp_processor_id(); 184 185 per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR); 186 } 187 188 struct apic apic_x2apic_cluster = { 189 190 .name = "cluster x2apic", 191 .probe = NULL, 192 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 193 .apic_id_registered = x2apic_apic_id_registered, 194 195 .irq_delivery_mode = dest_LowestPrio, 196 .irq_dest_mode = 1, /* logical */ 197 198 .target_cpus = x2apic_target_cpus, 199 .disable_esr = 0, 200 .dest_logical = APIC_DEST_LOGICAL, 201 .check_apicid_used = NULL, 202 .check_apicid_present = NULL, 203 204 .vector_allocation_domain = x2apic_vector_allocation_domain, 205 .init_apic_ldr = init_x2apic_ldr, 206 207 .ioapic_phys_id_map = NULL, 208 .setup_apic_routing = NULL, 209 .multi_timer_check = NULL, 210 .apicid_to_node = NULL, 211 .cpu_to_logical_apicid = NULL, 212 .cpu_present_to_apicid = default_cpu_present_to_apicid, 213 .apicid_to_cpu_present = NULL, 214 .setup_portio_remap = NULL, 215 .check_phys_apicid_present = default_check_phys_apicid_present, 216 .enable_apic_mode = NULL, 217 .phys_pkg_id = x2apic_cluster_phys_pkg_id, 218 .mps_oem_check = NULL, 219 220 .get_apic_id = x2apic_cluster_phys_get_apic_id, 221 .set_apic_id = set_apic_id, 222 .apic_id_mask = 0xFFFFFFFFu, 223 224 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, 225 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, 226 227 .send_IPI_mask = x2apic_send_IPI_mask, 228 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, 229 .send_IPI_allbutself = x2apic_send_IPI_allbutself, 230 .send_IPI_all = x2apic_send_IPI_all, 231 .send_IPI_self = x2apic_send_IPI_self, 232 233 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 234 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 235 .wait_for_init_deassert = NULL, 236 .smp_callin_clear_local_apic = NULL, 237 .inquire_remote_apic = NULL, 238 239 .read = native_apic_msr_read, 240 .write = native_apic_msr_write, 241 .icr_read = native_x2apic_icr_read, 242 .icr_write = native_x2apic_icr_write, 243 .wait_icr_idle = native_x2apic_wait_icr_idle, 244 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, 245 }; 246