xref: /openbmc/linux/arch/parisc/kernel/processor.c (revision 78d41d9b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *    Initial setup-routines for HP 9000 based hardware.
4  *
5  *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
6  *    Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
7  *    Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
8  *    Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
9  *    Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
10  *    Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
11  *
12  *    Initial PA-RISC Version: 04-23-1999 by Helge Deller
13  */
14 #include <linux/delay.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/cpu.h>
22 #include <asm/topology.h>
23 #include <asm/param.h>
24 #include <asm/cache.h>
25 #include <asm/hardware.h>	/* for register_parisc_driver() stuff */
26 #include <asm/processor.h>
27 #include <asm/page.h>
28 #include <asm/pdc.h>
29 #include <asm/smp.h>
30 #include <asm/pdcpat.h>
31 #include <asm/irq.h>		/* for struct irq_region */
32 #include <asm/parisc-device.h>
33 
34 struct system_cpuinfo_parisc boot_cpu_data __ro_after_init;
35 EXPORT_SYMBOL(boot_cpu_data);
36 #ifdef CONFIG_PA8X00
37 int _parisc_requires_coherency __ro_after_init;
38 EXPORT_SYMBOL(_parisc_requires_coherency);
39 #endif
40 
41 DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
42 
43 /*
44 **  	PARISC CPU driver - claim "device" and initialize CPU data structures.
45 **
46 ** Consolidate per CPU initialization into (mostly) one module.
47 ** Monarch CPU will initialize boot_cpu_data which shouldn't
48 ** change once the system has booted.
49 **
50 ** The callback *should* do per-instance initialization of
51 ** everything including the monarch. "Per CPU" init code in
52 ** setup.c:start_parisc() has migrated here and start_parisc()
53 ** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
54 **
55 ** The goal of consolidating CPU initialization into one place is
56 ** to make sure all CPUs get initialized the same way.
57 ** The code path not shared is how PDC hands control of the CPU to the OS.
58 ** The initialization of OS data structures is the same (done below).
59 */
60 
61 /**
62  * init_percpu_prof - enable/setup per cpu profiling hooks.
63  * @cpunum: The processor instance.
64  *
65  * FIXME: doesn't do much yet...
66  */
67 static void
init_percpu_prof(unsigned long cpunum)68 init_percpu_prof(unsigned long cpunum)
69 {
70 }
71 
72 
73 /**
74  * processor_probe - Determine if processor driver should claim this device.
75  * @dev: The device which has been found.
76  *
77  * Determine if processor driver should claim this chip (return 0) or not
78  * (return 1).  If so, initialize the chip and tell other partners in crime
79  * they have work to do.
80  */
processor_probe(struct parisc_device * dev)81 static int __init processor_probe(struct parisc_device *dev)
82 {
83 	unsigned long txn_addr;
84 	unsigned long cpuid;
85 	struct cpuinfo_parisc *p;
86 	struct pdc_pat_cpu_num cpu_info = { };
87 
88 #ifdef CONFIG_SMP
89 	if (num_online_cpus() >= nr_cpu_ids) {
90 		printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
91 		return 1;
92 	}
93 #else
94 	if (boot_cpu_data.cpu_count > 0) {
95 		printk(KERN_INFO "CONFIG_SMP=n  ignoring additional CPUs\n");
96 		return 1;
97 	}
98 #endif
99 
100 	/* logical CPU ID and update global counter
101 	 * May get overwritten by PAT code.
102 	 */
103 	cpuid = boot_cpu_data.cpu_count;
104 	txn_addr = dev->hpa.start;	/* for legacy PDC */
105 	cpu_info.cpu_num = cpu_info.cpu_loc = cpuid;
106 
107 #ifdef CONFIG_64BIT
108 	if (is_pdc_pat()) {
109 		ulong status;
110 		unsigned long bytecnt;
111 	        pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
112 
113 		pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
114 		if (!pa_pdc_cell)
115 			panic("couldn't allocate memory for PDC_PAT_CELL!");
116 
117 		status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
118 			dev->mod_index, PA_VIEW, pa_pdc_cell);
119 
120 		BUG_ON(PDC_OK != status);
121 
122 		/* verify it's the same as what do_pat_inventory() found */
123 		BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
124 		BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
125 
126 		txn_addr = pa_pdc_cell->mod[0];   /* id_eid for IO sapic */
127 
128 		kfree(pa_pdc_cell);
129 
130 		/* get the cpu number */
131 		status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
132 		BUG_ON(PDC_OK != status);
133 
134 		pr_info("Logical CPU #%lu is physical cpu #%lu at location "
135 			"0x%lx with hpa %pa\n",
136 			cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
137 			&dev->hpa.start);
138 
139 #undef USE_PAT_CPUID
140 #ifdef USE_PAT_CPUID
141 /* We need contiguous numbers for cpuid. Firmware's notion
142  * of cpuid is for physical CPUs and we just don't care yet.
143  * We'll care when we need to query PAT PDC about a CPU *after*
144  * boot time (ie shutdown a CPU from an OS perspective).
145  */
146 		if (cpu_info.cpu_num >= NR_CPUS) {
147 			printk(KERN_WARNING "IGNORING CPU at %pa,"
148 				" cpu_slot_id > NR_CPUS"
149 				" (%ld > %d)\n",
150 				&dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
151 			/* Ignore CPU since it will only crash */
152 			boot_cpu_data.cpu_count--;
153 			return 1;
154 		} else {
155 			cpuid = cpu_info.cpu_num;
156 		}
157 #endif
158 	}
159 #endif
160 
161 	p = &per_cpu(cpu_data, cpuid);
162 	boot_cpu_data.cpu_count++;
163 
164 	/* initialize counters - CPU 0 gets it_value set in time_init() */
165 	if (cpuid)
166 		memset(p, 0, sizeof(struct cpuinfo_parisc));
167 
168 	p->dev = dev;		/* Save IODC data in case we need it */
169 	p->hpa = dev->hpa.start;	/* save CPU hpa */
170 	p->cpuid = cpuid;	/* save CPU id */
171 	p->txn_addr = txn_addr;	/* save CPU IRQ address */
172 	p->cpu_num = cpu_info.cpu_num;
173 	p->cpu_loc = cpu_info.cpu_loc;
174 
175 	store_cpu_topology(cpuid);
176 
177 #ifdef CONFIG_SMP
178 	/*
179 	** FIXME: review if any other initialization is clobbered
180 	**	  for boot_cpu by the above memset().
181 	*/
182 	init_percpu_prof(cpuid);
183 #endif
184 
185 	/*
186 	** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
187 	** OS control. RENDEZVOUS is the default state - see mem_set above.
188 	**	p->state = STATE_RENDEZVOUS;
189 	*/
190 
191 #if 0
192 	/* CPU 0 IRQ table is statically allocated/initialized */
193 	if (cpuid) {
194 		struct irqaction actions[];
195 
196 		/*
197 		** itimer and ipi IRQ handlers are statically initialized in
198 		** arch/parisc/kernel/irq.c. ie Don't need to register them.
199 		*/
200 		actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
201 		if (!actions) {
202 			/* not getting it's own table, share with monarch */
203 			actions = cpu_irq_actions[0];
204 		}
205 
206 		cpu_irq_actions[cpuid] = actions;
207 	}
208 #endif
209 
210 	/*
211 	 * Bring this CPU up now! (ignore bootstrap cpuid == 0)
212 	 */
213 #ifdef CONFIG_SMP
214 	if (cpuid) {
215 		set_cpu_present(cpuid, true);
216 		add_cpu(cpuid);
217 	}
218 #endif
219 
220 	return 0;
221 }
222 
223 /**
224  * collect_boot_cpu_data - Fill the boot_cpu_data structure.
225  *
226  * This function collects and stores the generic processor information
227  * in the boot_cpu_data structure.
228  */
collect_boot_cpu_data(void)229 void __init collect_boot_cpu_data(void)
230 {
231 	unsigned long cr16_seed;
232 	char orig_prod_num[64], current_prod_num[64], serial_no[64];
233 
234 	memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
235 
236 	cr16_seed = get_cycles();
237 	add_device_randomness(&cr16_seed, sizeof(cr16_seed));
238 
239 	boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
240 
241 	/* get CPU-Model Information... */
242 #define p ((unsigned long *)&boot_cpu_data.pdc.model)
243 	if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) {
244 		printk(KERN_INFO
245 			"model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
246 			p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
247 
248 		add_device_randomness(&boot_cpu_data.pdc.model,
249 			sizeof(boot_cpu_data.pdc.model));
250 	}
251 #undef p
252 
253 	if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK) {
254 		printk(KERN_INFO "vers  %08lx\n",
255 			boot_cpu_data.pdc.versions);
256 
257 		add_device_randomness(&boot_cpu_data.pdc.versions,
258 			sizeof(boot_cpu_data.pdc.versions));
259 	}
260 
261 	if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) {
262 		printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
263 			(boot_cpu_data.pdc.cpuid >> 5) & 127,
264 			boot_cpu_data.pdc.cpuid & 31,
265 			boot_cpu_data.pdc.cpuid);
266 
267 		add_device_randomness(&boot_cpu_data.pdc.cpuid,
268 			sizeof(boot_cpu_data.pdc.cpuid));
269 	}
270 
271 	if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
272 		printk(KERN_INFO "capabilities 0x%lx\n",
273 			boot_cpu_data.pdc.capabilities);
274 
275 	if (pdc_model_sysmodel(OS_ID_HPUX, boot_cpu_data.pdc.sys_model_name) == PDC_OK)
276 		pr_info("HP-UX model name: %s\n",
277 			boot_cpu_data.pdc.sys_model_name);
278 
279 	serial_no[0] = 0;
280 	if (pdc_model_sysmodel(OS_ID_MPEXL, serial_no) == PDC_OK &&
281 		serial_no[0])
282 		pr_info("MPE/iX model name: %s\n", serial_no);
283 
284 	dump_stack_set_arch_desc("%s", boot_cpu_data.pdc.sys_model_name);
285 
286 	boot_cpu_data.hversion =  boot_cpu_data.pdc.model.hversion;
287 	boot_cpu_data.sversion =  boot_cpu_data.pdc.model.sversion;
288 
289 	boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
290 	boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
291 	boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
292 
293 #ifdef CONFIG_PA8X00
294 	_parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) ||
295 				(boot_cpu_data.cpu_type == mako2);
296 #endif
297 
298 	if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) {
299 		printk(KERN_INFO "product %s, original product %s, S/N: %s\n",
300 			current_prod_num[0] ? current_prod_num : "n/a",
301 			orig_prod_num, serial_no);
302 		add_device_randomness(orig_prod_num, strlen(orig_prod_num));
303 		add_device_randomness(current_prod_num, strlen(current_prod_num));
304 		add_device_randomness(serial_no, strlen(serial_no));
305 	}
306 }
307 
308 
309 /**
310  * init_per_cpu - Handle individual processor initializations.
311  * @cpunum: logical processor number.
312  *
313  * This function handles initialization for *every* CPU
314  * in the system:
315  *
316  * o Set "default" CPU width for trap handlers
317  *
318  * o Enable FP coprocessor
319  *   REVISIT: this could be done in the "code 22" trap handler.
320  *	(frowands idea - that way we know which processes need FP
321  *	registers saved on the interrupt stack.)
322  *   NEWS FLASH: wide kernels need FP coprocessor enabled to handle
323  *	formatted printing of %lx for example (double divides I think)
324  *
325  * o Enable CPU profiling hooks.
326  */
init_per_cpu(int cpunum)327 int init_per_cpu(int cpunum)
328 {
329 	int ret;
330 	struct pdc_coproc_cfg coproc_cfg;
331 
332 	set_firmware_width();
333 	ret = pdc_coproc_cfg(&coproc_cfg);
334 
335 	if(ret >= 0 && coproc_cfg.ccr_functional) {
336 		mtctl(coproc_cfg.ccr_functional, 10);  /* 10 == Coprocessor Control Reg */
337 
338 		/* FWIW, FP rev/model is a more accurate way to determine
339 		** CPU type. CPU rev/model has some ambiguous cases.
340 		*/
341 		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
342 		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
343 
344 		if (cpunum == 0)
345 			printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
346 				cpunum, coproc_cfg.revision, coproc_cfg.model);
347 
348 		/*
349 		** store status register to stack (hopefully aligned)
350 		** and clear the T-bit.
351 		*/
352 		asm volatile ("fstd    %fr0,8(%sp)");
353 
354 	} else {
355 		printk(KERN_WARNING  "WARNING: No FP CoProcessor?!"
356 			" (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
357 #ifdef CONFIG_64BIT
358 			"Halting Machine - FP required\n"
359 #endif
360 			, coproc_cfg.ccr_functional);
361 #ifdef CONFIG_64BIT
362 		mdelay(100);	/* previous chars get pushed to console */
363 		panic("FP CoProc not reported");
364 #endif
365 	}
366 
367 	/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
368 	init_percpu_prof(cpunum);
369 
370 	btlb_init_per_cpu();
371 
372 	return ret;
373 }
374 
375 /*
376  * Display CPU info for all CPUs.
377  */
378 int
show_cpuinfo(struct seq_file * m,void * v)379 show_cpuinfo (struct seq_file *m, void *v)
380 {
381 	unsigned long cpu;
382 	char cpu_name[60], *p;
383 
384 	/* strip PA path from CPU name to not confuse lscpu */
385 	strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
386 	p = strrchr(cpu_name, '[');
387 	if (p)
388 		*(--p) = 0;
389 
390 	for_each_online_cpu(cpu) {
391 #ifdef CONFIG_SMP
392 		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
393 
394 		if (0 == cpuinfo->hpa)
395 			continue;
396 #endif
397 		seq_printf(m, "processor\t: %lu\n"
398 				"cpu family\t: PA-RISC %s\n",
399 				 cpu, boot_cpu_data.family_name);
400 
401 		seq_printf(m, "cpu\t\t: %s\n",  boot_cpu_data.cpu_name );
402 
403 		/* cpu MHz */
404 		seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
405 				 boot_cpu_data.cpu_hz / 1000000,
406 				 boot_cpu_data.cpu_hz % 1000000  );
407 
408 #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
409 		seq_printf(m, "physical id\t: %d\n",
410 				topology_physical_package_id(cpu));
411 		seq_printf(m, "siblings\t: %d\n",
412 				cpumask_weight(topology_core_cpumask(cpu)));
413 		seq_printf(m, "core id\t\t: %d\n", topology_core_id(cpu));
414 #endif
415 
416 		seq_printf(m, "capabilities\t:");
417 		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
418 			seq_puts(m, " os32");
419 		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
420 			seq_puts(m, " os64");
421 		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)
422 			seq_puts(m, " iopdir_fdc");
423 		switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) {
424 		case PDC_MODEL_NVA_SUPPORTED:
425 			seq_puts(m, " nva_supported");
426 			break;
427 		case PDC_MODEL_NVA_SLOW:
428 			seq_puts(m, " nva_slow");
429 			break;
430 		case PDC_MODEL_NVA_UNSUPPORTED:
431 			seq_puts(m, " needs_equivalent_aliasing");
432 			break;
433 		}
434 		seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
435 
436 		seq_printf(m, "model\t\t: %s - %s\n",
437 				 boot_cpu_data.pdc.sys_model_name,
438 				 cpu_name);
439 
440 		seq_printf(m, "hversion\t: 0x%08x\n"
441 			        "sversion\t: 0x%08x\n",
442 				 boot_cpu_data.hversion,
443 				 boot_cpu_data.sversion );
444 
445 		/* print cachesize info */
446 		show_cache_info(m);
447 
448 		seq_printf(m, "bogomips\t: %lu.%02lu\n",
449 			     loops_per_jiffy / (500000 / HZ),
450 			     loops_per_jiffy / (5000 / HZ) % 100);
451 
452 		seq_printf(m, "software id\t: %ld\n\n",
453 				boot_cpu_data.pdc.model.sw_id);
454 	}
455 	return 0;
456 }
457 
458 static const struct parisc_device_id processor_tbl[] __initconst = {
459 	{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
460 	{ 0, }
461 };
462 
463 static struct parisc_driver cpu_driver __refdata = {
464 	.name		= "CPU",
465 	.id_table	= processor_tbl,
466 	.probe		= processor_probe
467 };
468 
469 /**
470  * processor_init - Processor initialization procedure.
471  *
472  * Register this driver.
473  */
processor_init(void)474 void __init processor_init(void)
475 {
476 	reset_cpu_topology();
477 	register_parisc_driver(&cpu_driver);
478 }
479