xref: /openbmc/linux/arch/mips/sgi-ip27/ip27-nmi.c (revision 8ffdff6a)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/mmzone.h>
4 #include <linux/nodemask.h>
5 #include <linux/spinlock.h>
6 #include <linux/smp.h>
7 #include <linux/atomic.h>
8 #include <asm/sn/types.h>
9 #include <asm/sn/addrs.h>
10 #include <asm/sn/nmi.h>
11 #include <asm/sn/arch.h>
12 #include <asm/sn/agent.h>
13 
14 #if 0
15 #define NODE_NUM_CPUS(n)	CNODE_NUM_CPUS(n)
16 #else
17 #define NODE_NUM_CPUS(n)	CPUS_PER_NODE
18 #endif
19 
20 #define SEND_NMI(_nasid, _slice)	\
21 	REMOTE_HUB_S((_nasid),  (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
22 
23 typedef unsigned long machreg_t;
24 
25 static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
26 
27 /*
28  * Let's see what else we need to do here. Set up sp, gp?
29  */
30 void nmi_dump(void)
31 {
32 	void cont_nmi_dump(void);
33 
34 	cont_nmi_dump();
35 }
36 
37 void install_cpu_nmi_handler(int slice)
38 {
39 	nmi_t *nmi_addr;
40 
41 	nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
42 	if (nmi_addr->call_addr)
43 		return;
44 	nmi_addr->magic = NMI_MAGIC;
45 	nmi_addr->call_addr = (void *)nmi_dump;
46 	nmi_addr->call_addr_c =
47 		(void *)(~((unsigned long)(nmi_addr->call_addr)));
48 	nmi_addr->call_parm = 0;
49 }
50 
51 /*
52  * Copy the cpu registers which have been saved in the IP27prom format
53  * into the eframe format for the node under consideration.
54  */
55 
56 void nmi_cpu_eframe_save(nasid_t nasid, int slice)
57 {
58 	struct reg_struct *nr;
59 	int		i;
60 
61 	/* Get the pointer to the current cpu's register set. */
62 	nr = (struct reg_struct *)
63 		(TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
64 		slice * IP27_NMI_KREGS_CPU_SIZE);
65 
66 	pr_emerg("NMI nasid %d: slice %d\n", nasid, slice);
67 
68 	/*
69 	 * Saved main processor registers
70 	 */
71 	for (i = 0; i < 32; ) {
72 		if ((i % 4) == 0)
73 			pr_emerg("$%2d   :", i);
74 		pr_cont(" %016lx", nr->gpr[i]);
75 
76 		i++;
77 		if ((i % 4) == 0)
78 			pr_cont("\n");
79 	}
80 
81 	pr_emerg("Hi    : (value lost)\n");
82 	pr_emerg("Lo    : (value lost)\n");
83 
84 	/*
85 	 * Saved cp0 registers
86 	 */
87 	pr_emerg("epc   : %016lx %pS\n", nr->epc, (void *)nr->epc);
88 	pr_emerg("%s\n", print_tainted());
89 	pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc);
90 	pr_emerg("ra    : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]);
91 	pr_emerg("Status: %08lx	      ", nr->sr);
92 
93 	if (nr->sr & ST0_KX)
94 		pr_cont("KX ");
95 	if (nr->sr & ST0_SX)
96 		pr_cont("SX ");
97 	if (nr->sr & ST0_UX)
98 		pr_cont("UX ");
99 
100 	switch (nr->sr & ST0_KSU) {
101 	case KSU_USER:
102 		pr_cont("USER ");
103 		break;
104 	case KSU_SUPERVISOR:
105 		pr_cont("SUPERVISOR ");
106 		break;
107 	case KSU_KERNEL:
108 		pr_cont("KERNEL ");
109 		break;
110 	default:
111 		pr_cont("BAD_MODE ");
112 		break;
113 	}
114 
115 	if (nr->sr & ST0_ERL)
116 		pr_cont("ERL ");
117 	if (nr->sr & ST0_EXL)
118 		pr_cont("EXL ");
119 	if (nr->sr & ST0_IE)
120 		pr_cont("IE ");
121 	pr_cont("\n");
122 
123 	pr_emerg("Cause : %08lx\n", nr->cause);
124 	pr_emerg("PrId  : %08x\n", read_c0_prid());
125 	pr_emerg("BadVA : %016lx\n", nr->badva);
126 	pr_emerg("CErr  : %016lx\n", nr->cache_err);
127 	pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr);
128 
129 	pr_emerg("\n");
130 }
131 
132 void nmi_dump_hub_irq(nasid_t nasid, int slice)
133 {
134 	u64 mask0, mask1, pend0, pend1;
135 
136 	if (slice == 0) {				/* Slice A */
137 		mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
138 		mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
139 	} else {					/* Slice B */
140 		mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
141 		mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
142 	}
143 
144 	pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
145 	pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
146 
147 	pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1);
148 	pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1);
149 	pr_emerg("\n\n");
150 }
151 
152 /*
153  * Copy the cpu registers which have been saved in the IP27prom format
154  * into the eframe format for the node under consideration.
155  */
156 void nmi_node_eframe_save(nasid_t nasid)
157 {
158 	int slice;
159 
160 	if (nasid == INVALID_NASID)
161 		return;
162 
163 	/* Save the registers into eframe for each cpu */
164 	for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
165 		nmi_cpu_eframe_save(nasid, slice);
166 		nmi_dump_hub_irq(nasid, slice);
167 	}
168 }
169 
170 /*
171  * Save the nmi cpu registers for all cpus in the system.
172  */
173 void
174 nmi_eframes_save(void)
175 {
176 	nasid_t nasid;
177 
178 	for_each_online_node(nasid)
179 		nmi_node_eframe_save(nasid);
180 }
181 
182 void
183 cont_nmi_dump(void)
184 {
185 #ifndef REAL_NMI_SIGNAL
186 	static atomic_t nmied_cpus = ATOMIC_INIT(0);
187 
188 	atomic_inc(&nmied_cpus);
189 #endif
190 	/*
191 	 * Only allow 1 cpu to proceed
192 	 */
193 	arch_spin_lock(&nmi_lock);
194 
195 #ifdef REAL_NMI_SIGNAL
196 	/*
197 	 * Wait up to 15 seconds for the other cpus to respond to the NMI.
198 	 * If a cpu has not responded after 10 sec, send it 1 additional NMI.
199 	 * This is for 2 reasons:
200 	 *	- sometimes a MMSC fail to NMI all cpus.
201 	 *	- on 512p SN0 system, the MMSC will only send NMIs to
202 	 *	  half the cpus. Unfortunately, we don't know which cpus may be
203 	 *	  NMIed - it depends on how the site chooses to configure.
204 	 *
205 	 * Note: it has been measure that it takes the MMSC up to 2.3 secs to
206 	 * send NMIs to all cpus on a 256p system.
207 	 */
208 	for (i=0; i < 1500; i++) {
209 		for_each_online_node(node)
210 			if (NODEPDA(node)->dump_count == 0)
211 				break;
212 		if (node == MAX_NUMNODES)
213 			break;
214 		if (i == 1000) {
215 			for_each_online_node(node)
216 				if (NODEPDA(node)->dump_count == 0) {
217 					cpu = cpumask_first(cpumask_of_node(node));
218 					for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
219 						CPUMASK_SETB(nmied_cpus, cpu);
220 						/*
221 						 * cputonasid, cputoslice
222 						 * needs kernel cpuid
223 						 */
224 						SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
225 					}
226 				}
227 
228 		}
229 		udelay(10000);
230 	}
231 #else
232 	while (atomic_read(&nmied_cpus) != num_online_cpus());
233 #endif
234 
235 	/*
236 	 * Save the nmi cpu registers for all cpu in the eframe format.
237 	 */
238 	nmi_eframes_save();
239 	LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
240 }
241