1 /* 2 * sun4m SMP support. 3 * 4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 5 */ 6 7 #include <linux/interrupt.h> 8 #include <linux/profile.h> 9 #include <linux/delay.h> 10 #include <linux/cpu.h> 11 12 #include <asm/cacheflush.h> 13 #include <asm/switch_to.h> 14 #include <asm/tlbflush.h> 15 16 #include "irq.h" 17 #include "kernel.h" 18 19 #define IRQ_IPI_SINGLE 12 20 #define IRQ_IPI_MASK 13 21 #define IRQ_IPI_RESCHED 14 22 #define IRQ_CROSS_CALL 15 23 24 static inline unsigned long 25 swap_ulong(volatile unsigned long *ptr, unsigned long val) 26 { 27 __asm__ __volatile__("swap [%1], %0\n\t" : 28 "=&r" (val), "=&r" (ptr) : 29 "0" (val), "1" (ptr)); 30 return val; 31 } 32 33 static void smp4m_ipi_init(void); 34 static void smp_setup_percpu_timer(void); 35 36 void __cpuinit smp4m_callin(void) 37 { 38 int cpuid = hard_smp_processor_id(); 39 40 local_flush_cache_all(); 41 local_flush_tlb_all(); 42 43 notify_cpu_starting(cpuid); 44 45 /* Get our local ticker going. */ 46 smp_setup_percpu_timer(); 47 48 calibrate_delay(); 49 smp_store_cpu_info(cpuid); 50 51 local_flush_cache_all(); 52 local_flush_tlb_all(); 53 54 /* 55 * Unblock the master CPU _only_ when the scheduler state 56 * of all secondary CPUs will be up-to-date, so after 57 * the SMP initialization the master will be just allowed 58 * to call the scheduler code. 59 */ 60 /* Allow master to continue. */ 61 swap_ulong(&cpu_callin_map[cpuid], 1); 62 63 /* XXX: What's up with all the flushes? */ 64 local_flush_cache_all(); 65 local_flush_tlb_all(); 66 67 /* Fix idle thread fields. */ 68 __asm__ __volatile__("ld [%0], %%g6\n\t" 69 : : "r" (¤t_set[cpuid]) 70 : "memory" /* paranoid */); 71 72 /* Attach to the address space of init_task. */ 73 atomic_inc(&init_mm.mm_count); 74 current->active_mm = &init_mm; 75 76 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) 77 mb(); 78 79 local_irq_enable(); 80 81 set_cpu_online(cpuid, true); 82 } 83 84 /* 85 * Cycle through the processors asking the PROM to start each one. 86 */ 87 void __init smp4m_boot_cpus(void) 88 { 89 smp4m_ipi_init(); 90 smp_setup_percpu_timer(); 91 local_flush_cache_all(); 92 } 93 94 int __cpuinit smp4m_boot_one_cpu(int i) 95 { 96 unsigned long *entry = &sun4m_cpu_startup; 97 struct task_struct *p; 98 int timeout; 99 int cpu_node; 100 101 cpu_find_by_mid(i, &cpu_node); 102 103 /* Cook up an idler for this guy. */ 104 p = fork_idle(i); 105 current_set[i] = task_thread_info(p); 106 /* See trampoline.S for details... */ 107 entry += ((i - 1) * 3); 108 109 /* 110 * Initialize the contexts table 111 * Since the call to prom_startcpu() trashes the structure, 112 * we need to re-initialize it for each cpu 113 */ 114 smp_penguin_ctable.which_io = 0; 115 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; 116 smp_penguin_ctable.reg_size = 0; 117 118 /* whirrr, whirrr, whirrrrrrrrr... */ 119 printk(KERN_INFO "Starting CPU %d at %p\n", i, entry); 120 local_flush_cache_all(); 121 prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry); 122 123 /* wheee... it's going... */ 124 for (timeout = 0; timeout < 10000; timeout++) { 125 if (cpu_callin_map[i]) 126 break; 127 udelay(200); 128 } 129 130 if (!(cpu_callin_map[i])) { 131 printk(KERN_ERR "Processor %d is stuck.\n", i); 132 return -ENODEV; 133 } 134 135 local_flush_cache_all(); 136 return 0; 137 } 138 139 void __init smp4m_smp_done(void) 140 { 141 int i, first; 142 int *prev; 143 144 /* setup cpu list for irq rotation */ 145 first = 0; 146 prev = &first; 147 for_each_online_cpu(i) { 148 *prev = i; 149 prev = &cpu_data(i).next; 150 } 151 *prev = first; 152 local_flush_cache_all(); 153 154 /* Ok, they are spinning and ready to go. */ 155 } 156 157 158 /* Initialize IPIs on the SUN4M SMP machine */ 159 static void __init smp4m_ipi_init(void) 160 { 161 } 162 163 static void smp4m_ipi_resched(int cpu) 164 { 165 set_cpu_int(cpu, IRQ_IPI_RESCHED); 166 } 167 168 static void smp4m_ipi_single(int cpu) 169 { 170 set_cpu_int(cpu, IRQ_IPI_SINGLE); 171 } 172 173 static void smp4m_ipi_mask_one(int cpu) 174 { 175 set_cpu_int(cpu, IRQ_IPI_MASK); 176 } 177 178 static struct smp_funcall { 179 smpfunc_t func; 180 unsigned long arg1; 181 unsigned long arg2; 182 unsigned long arg3; 183 unsigned long arg4; 184 unsigned long arg5; 185 unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */ 186 unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */ 187 } ccall_info; 188 189 static DEFINE_SPINLOCK(cross_call_lock); 190 191 /* Cross calls must be serialized, at least currently. */ 192 static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, 193 unsigned long arg2, unsigned long arg3, 194 unsigned long arg4) 195 { 196 register int ncpus = SUN4M_NCPUS; 197 unsigned long flags; 198 199 spin_lock_irqsave(&cross_call_lock, flags); 200 201 /* Init function glue. */ 202 ccall_info.func = func; 203 ccall_info.arg1 = arg1; 204 ccall_info.arg2 = arg2; 205 ccall_info.arg3 = arg3; 206 ccall_info.arg4 = arg4; 207 ccall_info.arg5 = 0; 208 209 /* Init receive/complete mapping, plus fire the IPI's off. */ 210 { 211 register int i; 212 213 cpumask_clear_cpu(smp_processor_id(), &mask); 214 cpumask_and(&mask, cpu_online_mask, &mask); 215 for (i = 0; i < ncpus; i++) { 216 if (cpumask_test_cpu(i, &mask)) { 217 ccall_info.processors_in[i] = 0; 218 ccall_info.processors_out[i] = 0; 219 set_cpu_int(i, IRQ_CROSS_CALL); 220 } else { 221 ccall_info.processors_in[i] = 1; 222 ccall_info.processors_out[i] = 1; 223 } 224 } 225 } 226 227 { 228 register int i; 229 230 i = 0; 231 do { 232 if (!cpumask_test_cpu(i, &mask)) 233 continue; 234 while (!ccall_info.processors_in[i]) 235 barrier(); 236 } while (++i < ncpus); 237 238 i = 0; 239 do { 240 if (!cpumask_test_cpu(i, &mask)) 241 continue; 242 while (!ccall_info.processors_out[i]) 243 barrier(); 244 } while (++i < ncpus); 245 } 246 spin_unlock_irqrestore(&cross_call_lock, flags); 247 } 248 249 /* Running cross calls. */ 250 void smp4m_cross_call_irq(void) 251 { 252 int i = smp_processor_id(); 253 254 ccall_info.processors_in[i] = 1; 255 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, 256 ccall_info.arg4, ccall_info.arg5); 257 ccall_info.processors_out[i] = 1; 258 } 259 260 void smp4m_percpu_timer_interrupt(struct pt_regs *regs) 261 { 262 struct pt_regs *old_regs; 263 int cpu = smp_processor_id(); 264 265 old_regs = set_irq_regs(regs); 266 267 sun4m_clear_profile_irq(cpu); 268 269 profile_tick(CPU_PROFILING); 270 271 if (!--prof_counter(cpu)) { 272 int user = user_mode(regs); 273 274 irq_enter(); 275 update_process_times(user); 276 irq_exit(); 277 278 prof_counter(cpu) = prof_multiplier(cpu); 279 } 280 set_irq_regs(old_regs); 281 } 282 283 static void __cpuinit smp_setup_percpu_timer(void) 284 { 285 int cpu = smp_processor_id(); 286 287 prof_counter(cpu) = prof_multiplier(cpu) = 1; 288 load_profile_irq(cpu, lvl14_resolution); 289 290 if (cpu == boot_cpu_id) 291 sun4m_unmask_profile_irq(); 292 } 293 294 static void __init smp4m_blackbox_id(unsigned *addr) 295 { 296 int rd = *addr & 0x3e000000; 297 int rs1 = rd >> 11; 298 299 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */ 300 addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */ 301 addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */ 302 } 303 304 static void __init smp4m_blackbox_current(unsigned *addr) 305 { 306 int rd = *addr & 0x3e000000; 307 int rs1 = rd >> 11; 308 309 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */ 310 addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */ 311 addr[4] = 0x8008200c | rd | rs1; /* and reg, 0xc, reg */ 312 } 313 314 void __init sun4m_init_smp(void) 315 { 316 BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id); 317 BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current); 318 BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM); 319 BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM); 320 BTFIXUPSET_CALL(smp_ipi_resched, smp4m_ipi_resched, BTFIXUPCALL_NORM); 321 BTFIXUPSET_CALL(smp_ipi_single, smp4m_ipi_single, BTFIXUPCALL_NORM); 322 BTFIXUPSET_CALL(smp_ipi_mask_one, smp4m_ipi_mask_one, BTFIXUPCALL_NORM); 323 } 324