xref: /openbmc/linux/arch/x86/kernel/vsmp_64.c (revision 2209fda3)
1 /*
2  * vSMPowered(tm) systems specific initialization
3  * Copyright (C) 2005 ScaleMP Inc.
4  *
5  * Use of this code is subject to the terms and conditions of the
6  * GNU general public license version 2. See "COPYING" or
7  * http://www.gnu.org/licenses/gpl.html
8  *
9  * Ravikiran Thirumalai <kiran@scalemp.com>,
10  * Shai Fultheim <shai@scalemp.com>
11  * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
12  *			     Ravikiran Thirumalai <kiran@scalemp.com>
13  */
14 
15 #include <linux/init.h>
16 #include <linux/pci_ids.h>
17 #include <linux/pci_regs.h>
18 #include <linux/smp.h>
19 #include <linux/irq.h>
20 
21 #include <asm/apic.h>
22 #include <asm/pci-direct.h>
23 #include <asm/io.h>
24 #include <asm/paravirt.h>
25 #include <asm/setup.h>
26 
27 #define TOPOLOGY_REGISTER_OFFSET 0x10
28 
29 #if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL
30 /*
31  * Interrupt control on vSMPowered systems:
32  * ~AC is a shadow of IF.  If IF is 'on' AC should be 'off'
33  * and vice versa.
34  */
35 
36 asmlinkage __visible unsigned long vsmp_save_fl(void)
37 {
38 	unsigned long flags = native_save_fl();
39 
40 	if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
41 		flags &= ~X86_EFLAGS_IF;
42 	return flags;
43 }
44 PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
45 
46 __visible void vsmp_restore_fl(unsigned long flags)
47 {
48 	if (flags & X86_EFLAGS_IF)
49 		flags &= ~X86_EFLAGS_AC;
50 	else
51 		flags |= X86_EFLAGS_AC;
52 	native_restore_fl(flags);
53 }
54 PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
55 
56 asmlinkage __visible void vsmp_irq_disable(void)
57 {
58 	unsigned long flags = native_save_fl();
59 
60 	native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
61 }
62 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
63 
64 asmlinkage __visible void vsmp_irq_enable(void)
65 {
66 	unsigned long flags = native_save_fl();
67 
68 	native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
69 }
70 PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
71 
72 static unsigned __init vsmp_patch(u8 type, void *ibuf,
73 				  unsigned long addr, unsigned len)
74 {
75 	switch (type) {
76 	case PARAVIRT_PATCH(irq.irq_enable):
77 	case PARAVIRT_PATCH(irq.irq_disable):
78 	case PARAVIRT_PATCH(irq.save_fl):
79 	case PARAVIRT_PATCH(irq.restore_fl):
80 		return paravirt_patch_default(type, ibuf, addr, len);
81 	default:
82 		return native_patch(type, ibuf, addr, len);
83 	}
84 
85 }
86 
87 static void __init set_vsmp_pv_ops(void)
88 {
89 	void __iomem *address;
90 	unsigned int cap, ctl, cfg;
91 
92 	/* set vSMP magic bits to indicate vSMP capable kernel */
93 	cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
94 	address = early_ioremap(cfg, 8);
95 	cap = readl(address);
96 	ctl = readl(address + 4);
97 	printk(KERN_INFO "vSMP CTL: capabilities:0x%08x  control:0x%08x\n",
98 	       cap, ctl);
99 
100 	/* If possible, let the vSMP foundation route the interrupt optimally */
101 #ifdef CONFIG_SMP
102 	if (cap & ctl & BIT(8)) {
103 		ctl &= ~BIT(8);
104 
105 #ifdef CONFIG_PROC_FS
106 		/* Don't let users change irq affinity via procfs */
107 		no_irq_affinity = 1;
108 #endif
109 	}
110 #endif
111 
112 	if (cap & ctl & (1 << 4)) {
113 		/* Setup irq ops and turn on vSMP  IRQ fastpath handling */
114 		pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
115 		pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
116 		pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
117 		pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
118 		pv_ops.init.patch = vsmp_patch;
119 		ctl &= ~(1 << 4);
120 	}
121 	writel(ctl, address + 4);
122 	ctl = readl(address + 4);
123 	pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
124 
125 	early_iounmap(address, 8);
126 }
127 #else
128 static void __init set_vsmp_pv_ops(void)
129 {
130 }
131 #endif
132 
133 #ifdef CONFIG_PCI
134 static int is_vsmp = -1;
135 
136 static void __init detect_vsmp_box(void)
137 {
138 	is_vsmp = 0;
139 
140 	if (!early_pci_allowed())
141 		return;
142 
143 	/* Check if we are running on a ScaleMP vSMPowered box */
144 	if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) ==
145 	     (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16)))
146 		is_vsmp = 1;
147 }
148 
149 static int is_vsmp_box(void)
150 {
151 	if (is_vsmp != -1)
152 		return is_vsmp;
153 	else {
154 		WARN_ON_ONCE(1);
155 		return 0;
156 	}
157 }
158 
159 #else
160 static void __init detect_vsmp_box(void)
161 {
162 }
163 static int is_vsmp_box(void)
164 {
165 	return 0;
166 }
167 #endif
168 
169 static void __init vsmp_cap_cpus(void)
170 {
171 #if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
172 	void __iomem *address;
173 	unsigned int cfg, topology, node_shift, maxcpus;
174 
175 	/*
176 	 * CONFIG_X86_VSMP is not configured, so limit the number CPUs to the
177 	 * ones present in the first board, unless explicitly overridden by
178 	 * setup_max_cpus
179 	 */
180 	if (setup_max_cpus != NR_CPUS)
181 		return;
182 
183 	/* Read the vSMP Foundation topology register */
184 	cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
185 	address = early_ioremap(cfg + TOPOLOGY_REGISTER_OFFSET, 4);
186 	if (WARN_ON(!address))
187 		return;
188 
189 	topology = readl(address);
190 	node_shift = (topology >> 16) & 0x7;
191 	if (!node_shift)
192 		/* The value 0 should be decoded as 8 */
193 		node_shift = 8;
194 	maxcpus = (topology & ((1 << node_shift) - 1)) + 1;
195 
196 	pr_info("vSMP CTL: Capping CPUs to %d (CONFIG_X86_VSMP is unset)\n",
197 		maxcpus);
198 	setup_max_cpus = maxcpus;
199 	early_iounmap(address, 4);
200 #endif
201 }
202 
203 static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
204 {
205 	return hard_smp_processor_id() >> index_msb;
206 }
207 
208 static void vsmp_apic_post_init(void)
209 {
210 	/* need to update phys_pkg_id */
211 	apic->phys_pkg_id = apicid_phys_pkg_id;
212 }
213 
214 void __init vsmp_init(void)
215 {
216 	detect_vsmp_box();
217 	if (!is_vsmp_box())
218 		return;
219 
220 	x86_platform.apic_post_init = vsmp_apic_post_init;
221 
222 	vsmp_cap_cpus();
223 
224 	set_vsmp_pv_ops();
225 	return;
226 }
227