1*4553474dSStafford Horne /* 2*4553474dSStafford Horne * OR1K timer synchronisation 3*4553474dSStafford Horne * 4*4553474dSStafford Horne * Based on work from MIPS implementation. 5*4553474dSStafford Horne * 6*4553474dSStafford Horne * All CPUs will have their count registers synchronised to the CPU0 next time 7*4553474dSStafford Horne * value. This can cause a small timewarp for CPU0. All other CPU's should 8*4553474dSStafford Horne * not have done anything significant (but they may have had interrupts 9*4553474dSStafford Horne * enabled briefly - prom_smp_finish() should not be responsible for enabling 10*4553474dSStafford Horne * interrupts...) 11*4553474dSStafford Horne */ 12*4553474dSStafford Horne 13*4553474dSStafford Horne #include <linux/kernel.h> 14*4553474dSStafford Horne #include <linux/irqflags.h> 15*4553474dSStafford Horne #include <linux/cpumask.h> 16*4553474dSStafford Horne 17*4553474dSStafford Horne #include <asm/time.h> 18*4553474dSStafford Horne #include <asm/timex.h> 19*4553474dSStafford Horne #include <linux/atomic.h> 20*4553474dSStafford Horne #include <asm/barrier.h> 21*4553474dSStafford Horne 22*4553474dSStafford Horne #include <asm/spr.h> 23*4553474dSStafford Horne 24*4553474dSStafford Horne static unsigned int initcount; 25*4553474dSStafford Horne static atomic_t count_count_start = ATOMIC_INIT(0); 26*4553474dSStafford Horne static atomic_t count_count_stop = ATOMIC_INIT(0); 27*4553474dSStafford Horne 28*4553474dSStafford Horne #define COUNTON 100 29*4553474dSStafford Horne #define NR_LOOPS 3 30*4553474dSStafford Horne synchronise_count_master(int cpu)31*4553474dSStafford Hornevoid synchronise_count_master(int cpu) 32*4553474dSStafford Horne { 33*4553474dSStafford Horne int i; 34*4553474dSStafford Horne unsigned long flags; 35*4553474dSStafford Horne 36*4553474dSStafford Horne pr_info("Synchronize counters for CPU %u: ", cpu); 37*4553474dSStafford Horne 38*4553474dSStafford Horne local_irq_save(flags); 39*4553474dSStafford Horne 40*4553474dSStafford Horne /* 41*4553474dSStafford Horne * We loop a few times to get a primed instruction cache, 42*4553474dSStafford Horne * then the last pass is more or less synchronised and 43*4553474dSStafford Horne * the master and slaves each set their cycle counters to a known 44*4553474dSStafford Horne * value all at once. This reduces the chance of having random offsets 45*4553474dSStafford Horne * between the processors, and guarantees that the maximum 46*4553474dSStafford Horne * delay between the cycle counters is never bigger than 47*4553474dSStafford Horne * the latency of information-passing (cachelines) between 48*4553474dSStafford Horne * two CPUs. 49*4553474dSStafford Horne */ 50*4553474dSStafford Horne 51*4553474dSStafford Horne for (i = 0; i < NR_LOOPS; i++) { 52*4553474dSStafford Horne /* slaves loop on '!= 2' */ 53*4553474dSStafford Horne while (atomic_read(&count_count_start) != 1) 54*4553474dSStafford Horne mb(); 55*4553474dSStafford Horne atomic_set(&count_count_stop, 0); 56*4553474dSStafford Horne smp_wmb(); 57*4553474dSStafford Horne 58*4553474dSStafford Horne /* Let the slave writes its count register */ 59*4553474dSStafford Horne atomic_inc(&count_count_start); 60*4553474dSStafford Horne 61*4553474dSStafford Horne /* Count will be initialised to current timer */ 62*4553474dSStafford Horne if (i == 1) 63*4553474dSStafford Horne initcount = get_cycles(); 64*4553474dSStafford Horne 65*4553474dSStafford Horne /* 66*4553474dSStafford Horne * Everyone initialises count in the last loop: 67*4553474dSStafford Horne */ 68*4553474dSStafford Horne if (i == NR_LOOPS-1) 69*4553474dSStafford Horne openrisc_timer_set(initcount); 70*4553474dSStafford Horne 71*4553474dSStafford Horne /* 72*4553474dSStafford Horne * Wait for slave to leave the synchronization point: 73*4553474dSStafford Horne */ 74*4553474dSStafford Horne while (atomic_read(&count_count_stop) != 1) 75*4553474dSStafford Horne mb(); 76*4553474dSStafford Horne atomic_set(&count_count_start, 0); 77*4553474dSStafford Horne smp_wmb(); 78*4553474dSStafford Horne atomic_inc(&count_count_stop); 79*4553474dSStafford Horne } 80*4553474dSStafford Horne /* Arrange for an interrupt in a short while */ 81*4553474dSStafford Horne openrisc_timer_set_next(COUNTON); 82*4553474dSStafford Horne 83*4553474dSStafford Horne local_irq_restore(flags); 84*4553474dSStafford Horne 85*4553474dSStafford Horne /* 86*4553474dSStafford Horne * i386 code reported the skew here, but the 87*4553474dSStafford Horne * count registers were almost certainly out of sync 88*4553474dSStafford Horne * so no point in alarming people 89*4553474dSStafford Horne */ 90*4553474dSStafford Horne pr_cont("done.\n"); 91*4553474dSStafford Horne } 92*4553474dSStafford Horne synchronise_count_slave(int cpu)93*4553474dSStafford Hornevoid synchronise_count_slave(int cpu) 94*4553474dSStafford Horne { 95*4553474dSStafford Horne int i; 96*4553474dSStafford Horne 97*4553474dSStafford Horne /* 98*4553474dSStafford Horne * Not every cpu is online at the time this gets called, 99*4553474dSStafford Horne * so we first wait for the master to say everyone is ready 100*4553474dSStafford Horne */ 101*4553474dSStafford Horne 102*4553474dSStafford Horne for (i = 0; i < NR_LOOPS; i++) { 103*4553474dSStafford Horne atomic_inc(&count_count_start); 104*4553474dSStafford Horne while (atomic_read(&count_count_start) != 2) 105*4553474dSStafford Horne mb(); 106*4553474dSStafford Horne 107*4553474dSStafford Horne /* 108*4553474dSStafford Horne * Everyone initialises count in the last loop: 109*4553474dSStafford Horne */ 110*4553474dSStafford Horne if (i == NR_LOOPS-1) 111*4553474dSStafford Horne openrisc_timer_set(initcount); 112*4553474dSStafford Horne 113*4553474dSStafford Horne atomic_inc(&count_count_stop); 114*4553474dSStafford Horne while (atomic_read(&count_count_stop) != 2) 115*4553474dSStafford Horne mb(); 116*4553474dSStafford Horne } 117*4553474dSStafford Horne /* Arrange for an interrupt in a short while */ 118*4553474dSStafford Horne openrisc_timer_set_next(COUNTON); 119*4553474dSStafford Horne } 120*4553474dSStafford Horne #undef NR_LOOPS 121