xref: /openbmc/linux/arch/mips/kernel/sync-r4k.c (revision 8dd06ef34b6e2f41b29fbf5fc1663780f2524285)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
239b8d525SRalf Baechle /*
339b8d525SRalf Baechle  * Count register synchronisation.
439b8d525SRalf Baechle  *
5eb9b5141STim Anderson  * All CPUs will have their count registers synchronised to the CPU0 next time
639b8d525SRalf Baechle  * value. This can cause a small timewarp for CPU0. All other CPU's should
739b8d525SRalf Baechle  * not have done anything significant (but they may have had interrupts
839b8d525SRalf Baechle  * enabled briefly - prom_smp_finish() should not be responsible for enabling
939b8d525SRalf Baechle  * interrupts...)
1039b8d525SRalf Baechle  */
1139b8d525SRalf Baechle 
1239b8d525SRalf Baechle #include <linux/kernel.h>
1339b8d525SRalf Baechle #include <linux/irqflags.h>
14eb9b5141STim Anderson #include <linux/cpumask.h>
1539b8d525SRalf Baechle 
16eb9b5141STim Anderson #include <asm/r4k-timer.h>
1760063497SArun Sharma #include <linux/atomic.h>
1839b8d525SRalf Baechle #include <asm/barrier.h>
1939b8d525SRalf Baechle #include <asm/mipsregs.h>
2039b8d525SRalf Baechle 
21db0dbd57SHuacai Chen static unsigned int initcount = 0;
22078a55fcSPaul Gortmaker static atomic_t count_count_start = ATOMIC_INIT(0);
23078a55fcSPaul Gortmaker static atomic_t count_count_stop = ATOMIC_INIT(0);
2439b8d525SRalf Baechle 
2539b8d525SRalf Baechle #define COUNTON 100
26db0dbd57SHuacai Chen #define NR_LOOPS 3
2739b8d525SRalf Baechle 
synchronise_count_master(int cpu)28078a55fcSPaul Gortmaker void synchronise_count_master(int cpu)
2939b8d525SRalf Baechle {
3039b8d525SRalf Baechle 	int i;
3139b8d525SRalf Baechle 	unsigned long flags;
3239b8d525SRalf Baechle 
334fb69afaSMatt Redfearn 	pr_info("Synchronize counters for CPU %u: ", cpu);
3439b8d525SRalf Baechle 
3539b8d525SRalf Baechle 	local_irq_save(flags);
3639b8d525SRalf Baechle 
3739b8d525SRalf Baechle 	/*
3839b8d525SRalf Baechle 	 * We loop a few times to get a primed instruction cache,
3939b8d525SRalf Baechle 	 * then the last pass is more or less synchronised and
4039b8d525SRalf Baechle 	 * the master and slaves each set their cycle counters to a known
4139b8d525SRalf Baechle 	 * value all at once. This reduces the chance of having random offsets
4239b8d525SRalf Baechle 	 * between the processors, and guarantees that the maximum
4339b8d525SRalf Baechle 	 * delay between the cycle counters is never bigger than
4439b8d525SRalf Baechle 	 * the latency of information-passing (cachelines) between
4539b8d525SRalf Baechle 	 * two CPUs.
4639b8d525SRalf Baechle 	 */
4739b8d525SRalf Baechle 
4839b8d525SRalf Baechle 	for (i = 0; i < NR_LOOPS; i++) {
49cf9bfe55SJayachandran C 		/* slaves loop on '!= 2' */
50cf9bfe55SJayachandran C 		while (atomic_read(&count_count_start) != 1)
5139b8d525SRalf Baechle 			mb();
5239b8d525SRalf Baechle 		atomic_set(&count_count_stop, 0);
5339b8d525SRalf Baechle 		smp_wmb();
5439b8d525SRalf Baechle 
55db0dbd57SHuacai Chen 		/* Let the slave writes its count register */
5639b8d525SRalf Baechle 		atomic_inc(&count_count_start);
5739b8d525SRalf Baechle 
58db0dbd57SHuacai Chen 		/* Count will be initialised to current timer */
59db0dbd57SHuacai Chen 		if (i == 1)
60db0dbd57SHuacai Chen 			initcount = read_c0_count();
61db0dbd57SHuacai Chen 
6239b8d525SRalf Baechle 		/*
6339b8d525SRalf Baechle 		 * Everyone initialises count in the last loop:
6439b8d525SRalf Baechle 		 */
6539b8d525SRalf Baechle 		if (i == NR_LOOPS-1)
6639b8d525SRalf Baechle 			write_c0_count(initcount);
6739b8d525SRalf Baechle 
6839b8d525SRalf Baechle 		/*
69db0dbd57SHuacai Chen 		 * Wait for slave to leave the synchronization point:
7039b8d525SRalf Baechle 		 */
71cf9bfe55SJayachandran C 		while (atomic_read(&count_count_stop) != 1)
7239b8d525SRalf Baechle 			mb();
7339b8d525SRalf Baechle 		atomic_set(&count_count_start, 0);
7439b8d525SRalf Baechle 		smp_wmb();
7539b8d525SRalf Baechle 		atomic_inc(&count_count_stop);
7639b8d525SRalf Baechle 	}
7739b8d525SRalf Baechle 	/* Arrange for an interrupt in a short while */
7839b8d525SRalf Baechle 	write_c0_compare(read_c0_count() + COUNTON);
7939b8d525SRalf Baechle 
8039b8d525SRalf Baechle 	local_irq_restore(flags);
8139b8d525SRalf Baechle 
8239b8d525SRalf Baechle 	/*
8339b8d525SRalf Baechle 	 * i386 code reported the skew here, but the
8439b8d525SRalf Baechle 	 * count registers were almost certainly out of sync
8539b8d525SRalf Baechle 	 * so no point in alarming people
8639b8d525SRalf Baechle 	 */
874fb69afaSMatt Redfearn 	pr_cont("done.\n");
8839b8d525SRalf Baechle }
8939b8d525SRalf Baechle 
synchronise_count_slave(int cpu)90078a55fcSPaul Gortmaker void synchronise_count_slave(int cpu)
9139b8d525SRalf Baechle {
9239b8d525SRalf Baechle 	int i;
93*0956be29SSergey Korolev 	unsigned long flags;
94*0956be29SSergey Korolev 
95*0956be29SSergey Korolev 	local_irq_save(flags);
9639b8d525SRalf Baechle 
9739b8d525SRalf Baechle 	/*
9839b8d525SRalf Baechle 	 * Not every cpu is online at the time this gets called,
9939b8d525SRalf Baechle 	 * so we first wait for the master to say everyone is ready
10039b8d525SRalf Baechle 	 */
10139b8d525SRalf Baechle 
10239b8d525SRalf Baechle 	for (i = 0; i < NR_LOOPS; i++) {
10339b8d525SRalf Baechle 		atomic_inc(&count_count_start);
104cf9bfe55SJayachandran C 		while (atomic_read(&count_count_start) != 2)
10539b8d525SRalf Baechle 			mb();
10639b8d525SRalf Baechle 
10739b8d525SRalf Baechle 		/*
10839b8d525SRalf Baechle 		 * Everyone initialises count in the last loop:
10939b8d525SRalf Baechle 		 */
11039b8d525SRalf Baechle 		if (i == NR_LOOPS-1)
11139b8d525SRalf Baechle 			write_c0_count(initcount);
11239b8d525SRalf Baechle 
11339b8d525SRalf Baechle 		atomic_inc(&count_count_stop);
114cf9bfe55SJayachandran C 		while (atomic_read(&count_count_stop) != 2)
11539b8d525SRalf Baechle 			mb();
11639b8d525SRalf Baechle 	}
11739b8d525SRalf Baechle 	/* Arrange for an interrupt in a short while */
11839b8d525SRalf Baechle 	write_c0_compare(read_c0_count() + COUNTON);
119*0956be29SSergey Korolev 
120*0956be29SSergey Korolev 	local_irq_restore(flags);
12139b8d525SRalf Baechle }
12239b8d525SRalf Baechle #undef NR_LOOPS
123