1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * CBE Pervasive Monitor and Debug
4  *
5  * (C) Copyright IBM Corporation 2005
6  *
7  * Authors: Maximino Aguilar (maguilar@us.ibm.com)
8  *          Michael N. Day (mnday@us.ibm.com)
9  */
10 
11 #undef DEBUG
12 
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/percpu.h>
16 #include <linux/types.h>
17 #include <linux/kallsyms.h>
18 #include <linux/pgtable.h>
19 
20 #include <asm/io.h>
21 #include <asm/machdep.h>
22 #include <asm/prom.h>
23 #include <asm/reg.h>
24 #include <asm/cell-regs.h>
25 #include <asm/cpu_has_feature.h>
26 
27 #include "pervasive.h"
28 #include "ras.h"
29 
30 static void cbe_power_save(void)
31 {
32 	unsigned long ctrl, thread_switch_control;
33 
34 	/* Ensure our interrupt state is properly tracked */
35 	if (!prep_irq_for_idle())
36 		return;
37 
38 	ctrl = mfspr(SPRN_CTRLF);
39 
40 	/* Enable DEC and EE interrupt request */
41 	thread_switch_control  = mfspr(SPRN_TSC_CELL);
42 	thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
43 
44 	switch (ctrl & CTRL_CT) {
45 	case CTRL_CT0:
46 		thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
47 		break;
48 	case CTRL_CT1:
49 		thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
50 		break;
51 	default:
52 		printk(KERN_WARNING "%s: unknown configuration\n",
53 			__func__);
54 		break;
55 	}
56 	mtspr(SPRN_TSC_CELL, thread_switch_control);
57 
58 	/*
59 	 * go into low thread priority, medium priority will be
60 	 * restored for us after wake-up.
61 	 */
62 	HMT_low();
63 
64 	/*
65 	 * atomically disable thread execution and runlatch.
66 	 * External and Decrementer exceptions are still handled when the
67 	 * thread is disabled but now enter in cbe_system_reset_exception()
68 	 */
69 	ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
70 	mtspr(SPRN_CTRLT, ctrl);
71 
72 	/* Re-enable interrupts in MSR */
73 	__hard_irq_enable();
74 }
75 
76 static int cbe_system_reset_exception(struct pt_regs *regs)
77 {
78 	switch (regs->msr & SRR1_WAKEMASK) {
79 	case SRR1_WAKEDEC:
80 		set_dec(1);
81 		break;
82 	case SRR1_WAKEEE:
83 		/*
84 		 * Handle these when interrupts get re-enabled and we take
85 		 * them as regular exceptions. We are in an NMI context
86 		 * and can't handle these here.
87 		 */
88 		break;
89 	case SRR1_WAKEMT:
90 		return cbe_sysreset_hack();
91 #ifdef CONFIG_CBE_RAS
92 	case SRR1_WAKESYSERR:
93 		cbe_system_error_exception(regs);
94 		break;
95 	case SRR1_WAKETHERM:
96 		cbe_thermal_exception(regs);
97 		break;
98 #endif /* CONFIG_CBE_RAS */
99 	default:
100 		/* do system reset */
101 		return 0;
102 	}
103 	/* everything handled */
104 	return 1;
105 }
106 
107 void __init cbe_pervasive_init(void)
108 {
109 	int cpu;
110 
111 	if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
112 		return;
113 
114 	for_each_possible_cpu(cpu) {
115 		struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
116 		if (!regs)
117 			continue;
118 
119 		 /* Enable Pause(0) control bit */
120 		out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
121 					    CBE_PMD_PAUSE_ZERO_CONTROL);
122 	}
123 
124 	ppc_md.power_save = cbe_power_save;
125 	ppc_md.system_reset_exception = cbe_system_reset_exception;
126 }
127