1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * CBE Pervasive Monitor and Debug
4  *
5  * (C) Copyright IBM Corporation 2005
6  *
7  * Authors: Maximino Aguilar (maguilar@us.ibm.com)
8  *          Michael N. Day (mnday@us.ibm.com)
9  */
10 
11 #undef DEBUG
12 
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/percpu.h>
16 #include <linux/types.h>
17 #include <linux/kallsyms.h>
18 #include <linux/pgtable.h>
19 
20 #include <asm/io.h>
21 #include <asm/machdep.h>
22 #include <asm/prom.h>
23 #include <asm/reg.h>
24 #include <asm/cell-regs.h>
25 #include <asm/cpu_has_feature.h>
26 
27 #include "pervasive.h"
28 #include "ras.h"
29 
30 static void cbe_power_save(void)
31 {
32 	unsigned long ctrl, thread_switch_control;
33 
34 	/* Ensure our interrupt state is properly tracked */
35 	if (!prep_irq_for_idle())
36 		return;
37 
38 	ctrl = mfspr(SPRN_CTRLF);
39 
40 	/* Enable DEC and EE interrupt request */
41 	thread_switch_control  = mfspr(SPRN_TSC_CELL);
42 	thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
43 
44 	switch (ctrl & CTRL_CT) {
45 	case CTRL_CT0:
46 		thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
47 		break;
48 	case CTRL_CT1:
49 		thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
50 		break;
51 	default:
52 		printk(KERN_WARNING "%s: unknown configuration\n",
53 			__func__);
54 		break;
55 	}
56 	mtspr(SPRN_TSC_CELL, thread_switch_control);
57 
58 	/*
59 	 * go into low thread priority, medium priority will be
60 	 * restored for us after wake-up.
61 	 */
62 	HMT_low();
63 
64 	/*
65 	 * atomically disable thread execution and runlatch.
66 	 * External and Decrementer exceptions are still handled when the
67 	 * thread is disabled but now enter in cbe_system_reset_exception()
68 	 */
69 	ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
70 	mtspr(SPRN_CTRLT, ctrl);
71 
72 	/* Re-enable interrupts in MSR */
73 	__hard_irq_enable();
74 }
75 
76 static int cbe_system_reset_exception(struct pt_regs *regs)
77 {
78 	switch (regs->msr & SRR1_WAKEMASK) {
79 	case SRR1_WAKEDEC:
80 		set_dec(1);
81 	case SRR1_WAKEEE:
82 		/*
83 		 * Handle these when interrupts get re-enabled and we take
84 		 * them as regular exceptions. We are in an NMI context
85 		 * and can't handle these here.
86 		 */
87 		break;
88 	case SRR1_WAKEMT:
89 		return cbe_sysreset_hack();
90 #ifdef CONFIG_CBE_RAS
91 	case SRR1_WAKESYSERR:
92 		cbe_system_error_exception(regs);
93 		break;
94 	case SRR1_WAKETHERM:
95 		cbe_thermal_exception(regs);
96 		break;
97 #endif /* CONFIG_CBE_RAS */
98 	default:
99 		/* do system reset */
100 		return 0;
101 	}
102 	/* everything handled */
103 	return 1;
104 }
105 
106 void __init cbe_pervasive_init(void)
107 {
108 	int cpu;
109 
110 	if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
111 		return;
112 
113 	for_each_possible_cpu(cpu) {
114 		struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
115 		if (!regs)
116 			continue;
117 
118 		 /* Enable Pause(0) control bit */
119 		out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
120 					    CBE_PMD_PAUSE_ZERO_CONTROL);
121 	}
122 
123 	ppc_md.power_save = cbe_power_save;
124 	ppc_md.system_reset_exception = cbe_system_reset_exception;
125 }
126