xref: /openbmc/linux/arch/arm/kernel/setup.c (revision afba8b0a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/kernel/setup.c
4  *
5  *  Copyright (C) 1995-2001 Russell King
6  */
7 #include <linux/efi.h>
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/stddef.h>
11 #include <linux/ioport.h>
12 #include <linux/delay.h>
13 #include <linux/utsname.h>
14 #include <linux/initrd.h>
15 #include <linux/console.h>
16 #include <linux/seq_file.h>
17 #include <linux/screen_info.h>
18 #include <linux/of_platform.h>
19 #include <linux/init.h>
20 #include <linux/kexec.h>
21 #include <linux/of_fdt.h>
22 #include <linux/cpu.h>
23 #include <linux/interrupt.h>
24 #include <linux/smp.h>
25 #include <linux/proc_fs.h>
26 #include <linux/memblock.h>
27 #include <linux/bug.h>
28 #include <linux/compiler.h>
29 #include <linux/sort.h>
30 #include <linux/psci.h>
31 
32 #include <asm/unified.h>
33 #include <asm/cp15.h>
34 #include <asm/cpu.h>
35 #include <asm/cputype.h>
36 #include <asm/efi.h>
37 #include <asm/elf.h>
38 #include <asm/early_ioremap.h>
39 #include <asm/fixmap.h>
40 #include <asm/procinfo.h>
41 #include <asm/psci.h>
42 #include <asm/sections.h>
43 #include <asm/setup.h>
44 #include <asm/smp_plat.h>
45 #include <asm/mach-types.h>
46 #include <asm/cacheflush.h>
47 #include <asm/cachetype.h>
48 #include <asm/tlbflush.h>
49 #include <asm/xen/hypervisor.h>
50 
51 #include <asm/prom.h>
52 #include <asm/mach/arch.h>
53 #include <asm/mach/irq.h>
54 #include <asm/mach/time.h>
55 #include <asm/system_info.h>
56 #include <asm/system_misc.h>
57 #include <asm/traps.h>
58 #include <asm/unwind.h>
59 #include <asm/memblock.h>
60 #include <asm/virt.h>
61 
62 #include "atags.h"
63 
64 
65 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
66 char fpe_type[8];
67 
68 static int __init fpe_setup(char *line)
69 {
70 	memcpy(fpe_type, line, 8);
71 	return 1;
72 }
73 
74 __setup("fpe=", fpe_setup);
75 #endif
76 
77 extern void init_default_cache_policy(unsigned long);
78 extern void paging_init(const struct machine_desc *desc);
79 extern void early_mm_init(const struct machine_desc *);
80 extern void adjust_lowmem_bounds(void);
81 extern enum reboot_mode reboot_mode;
82 extern void setup_dma_zone(const struct machine_desc *desc);
83 
84 unsigned int processor_id;
85 EXPORT_SYMBOL(processor_id);
86 unsigned int __machine_arch_type __read_mostly;
87 EXPORT_SYMBOL(__machine_arch_type);
88 unsigned int cacheid __read_mostly;
89 EXPORT_SYMBOL(cacheid);
90 
91 unsigned int __atags_pointer __initdata;
92 
93 unsigned int system_rev;
94 EXPORT_SYMBOL(system_rev);
95 
96 const char *system_serial;
97 EXPORT_SYMBOL(system_serial);
98 
99 unsigned int system_serial_low;
100 EXPORT_SYMBOL(system_serial_low);
101 
102 unsigned int system_serial_high;
103 EXPORT_SYMBOL(system_serial_high);
104 
105 unsigned int elf_hwcap __read_mostly;
106 EXPORT_SYMBOL(elf_hwcap);
107 
108 unsigned int elf_hwcap2 __read_mostly;
109 EXPORT_SYMBOL(elf_hwcap2);
110 
111 
112 #ifdef MULTI_CPU
113 struct processor processor __ro_after_init;
114 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
115 struct processor *cpu_vtable[NR_CPUS] = {
116 	[0] = &processor,
117 };
118 #endif
119 #endif
120 #ifdef MULTI_TLB
121 struct cpu_tlb_fns cpu_tlb __ro_after_init;
122 #endif
123 #ifdef MULTI_USER
124 struct cpu_user_fns cpu_user __ro_after_init;
125 #endif
126 #ifdef MULTI_CACHE
127 struct cpu_cache_fns cpu_cache __ro_after_init;
128 #endif
129 #ifdef CONFIG_OUTER_CACHE
130 struct outer_cache_fns outer_cache __ro_after_init;
131 EXPORT_SYMBOL(outer_cache);
132 #endif
133 
134 /*
135  * Cached cpu_architecture() result for use by assembler code.
136  * C code should use the cpu_architecture() function instead of accessing this
137  * variable directly.
138  */
139 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
140 
141 struct stack {
142 	u32 irq[3];
143 	u32 abt[3];
144 	u32 und[3];
145 	u32 fiq[3];
146 } ____cacheline_aligned;
147 
148 #ifndef CONFIG_CPU_V7M
149 static struct stack stacks[NR_CPUS];
150 #endif
151 
152 char elf_platform[ELF_PLATFORM_SIZE];
153 EXPORT_SYMBOL(elf_platform);
154 
155 static const char *cpu_name;
156 static const char *machine_name;
157 static char __initdata cmd_line[COMMAND_LINE_SIZE];
158 const struct machine_desc *machine_desc __initdata;
159 
160 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
161 #define ENDIANNESS ((char)endian_test.l)
162 
163 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
164 
165 /*
166  * Standard memory resources
167  */
168 static struct resource mem_res[] = {
169 	{
170 		.name = "Video RAM",
171 		.start = 0,
172 		.end = 0,
173 		.flags = IORESOURCE_MEM
174 	},
175 	{
176 		.name = "Kernel code",
177 		.start = 0,
178 		.end = 0,
179 		.flags = IORESOURCE_SYSTEM_RAM
180 	},
181 	{
182 		.name = "Kernel data",
183 		.start = 0,
184 		.end = 0,
185 		.flags = IORESOURCE_SYSTEM_RAM
186 	}
187 };
188 
189 #define video_ram   mem_res[0]
190 #define kernel_code mem_res[1]
191 #define kernel_data mem_res[2]
192 
193 static struct resource io_res[] = {
194 	{
195 		.name = "reserved",
196 		.start = 0x3bc,
197 		.end = 0x3be,
198 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
199 	},
200 	{
201 		.name = "reserved",
202 		.start = 0x378,
203 		.end = 0x37f,
204 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
205 	},
206 	{
207 		.name = "reserved",
208 		.start = 0x278,
209 		.end = 0x27f,
210 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
211 	}
212 };
213 
214 #define lp0 io_res[0]
215 #define lp1 io_res[1]
216 #define lp2 io_res[2]
217 
218 static const char *proc_arch[] = {
219 	"undefined/unknown",
220 	"3",
221 	"4",
222 	"4T",
223 	"5",
224 	"5T",
225 	"5TE",
226 	"5TEJ",
227 	"6TEJ",
228 	"7",
229 	"7M",
230 	"?(12)",
231 	"?(13)",
232 	"?(14)",
233 	"?(15)",
234 	"?(16)",
235 	"?(17)",
236 };
237 
238 #ifdef CONFIG_CPU_V7M
239 static int __get_cpu_architecture(void)
240 {
241 	return CPU_ARCH_ARMv7M;
242 }
243 #else
244 static int __get_cpu_architecture(void)
245 {
246 	int cpu_arch;
247 
248 	if ((read_cpuid_id() & 0x0008f000) == 0) {
249 		cpu_arch = CPU_ARCH_UNKNOWN;
250 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
251 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
252 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
253 		cpu_arch = (read_cpuid_id() >> 16) & 7;
254 		if (cpu_arch)
255 			cpu_arch += CPU_ARCH_ARMv3;
256 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
257 		/* Revised CPUID format. Read the Memory Model Feature
258 		 * Register 0 and check for VMSAv7 or PMSAv7 */
259 		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
260 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
261 		    (mmfr0 & 0x000000f0) >= 0x00000030)
262 			cpu_arch = CPU_ARCH_ARMv7;
263 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
264 			 (mmfr0 & 0x000000f0) == 0x00000020)
265 			cpu_arch = CPU_ARCH_ARMv6;
266 		else
267 			cpu_arch = CPU_ARCH_UNKNOWN;
268 	} else
269 		cpu_arch = CPU_ARCH_UNKNOWN;
270 
271 	return cpu_arch;
272 }
273 #endif
274 
275 int __pure cpu_architecture(void)
276 {
277 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
278 
279 	return __cpu_architecture;
280 }
281 
282 static int cpu_has_aliasing_icache(unsigned int arch)
283 {
284 	int aliasing_icache;
285 	unsigned int id_reg, num_sets, line_size;
286 
287 	/* PIPT caches never alias. */
288 	if (icache_is_pipt())
289 		return 0;
290 
291 	/* arch specifies the register format */
292 	switch (arch) {
293 	case CPU_ARCH_ARMv7:
294 		set_csselr(CSSELR_ICACHE | CSSELR_L1);
295 		isb();
296 		id_reg = read_ccsidr();
297 		line_size = 4 << ((id_reg & 0x7) + 2);
298 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
299 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
300 		break;
301 	case CPU_ARCH_ARMv6:
302 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
303 		break;
304 	default:
305 		/* I-cache aliases will be handled by D-cache aliasing code */
306 		aliasing_icache = 0;
307 	}
308 
309 	return aliasing_icache;
310 }
311 
312 static void __init cacheid_init(void)
313 {
314 	unsigned int arch = cpu_architecture();
315 
316 	if (arch >= CPU_ARCH_ARMv6) {
317 		unsigned int cachetype = read_cpuid_cachetype();
318 
319 		if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
320 			cacheid = 0;
321 		} else if ((cachetype & (7 << 29)) == 4 << 29) {
322 			/* ARMv7 register format */
323 			arch = CPU_ARCH_ARMv7;
324 			cacheid = CACHEID_VIPT_NONALIASING;
325 			switch (cachetype & (3 << 14)) {
326 			case (1 << 14):
327 				cacheid |= CACHEID_ASID_TAGGED;
328 				break;
329 			case (3 << 14):
330 				cacheid |= CACHEID_PIPT;
331 				break;
332 			}
333 		} else {
334 			arch = CPU_ARCH_ARMv6;
335 			if (cachetype & (1 << 23))
336 				cacheid = CACHEID_VIPT_ALIASING;
337 			else
338 				cacheid = CACHEID_VIPT_NONALIASING;
339 		}
340 		if (cpu_has_aliasing_icache(arch))
341 			cacheid |= CACHEID_VIPT_I_ALIASING;
342 	} else {
343 		cacheid = CACHEID_VIVT;
344 	}
345 
346 	pr_info("CPU: %s data cache, %s instruction cache\n",
347 		cache_is_vivt() ? "VIVT" :
348 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
349 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
350 		cache_is_vivt() ? "VIVT" :
351 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
352 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
353 		icache_is_pipt() ? "PIPT" :
354 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
355 }
356 
357 /*
358  * These functions re-use the assembly code in head.S, which
359  * already provide the required functionality.
360  */
361 extern struct proc_info_list *lookup_processor_type(unsigned int);
362 
363 void __init early_print(const char *str, ...)
364 {
365 	extern void printascii(const char *);
366 	char buf[256];
367 	va_list ap;
368 
369 	va_start(ap, str);
370 	vsnprintf(buf, sizeof(buf), str, ap);
371 	va_end(ap);
372 
373 #ifdef CONFIG_DEBUG_LL
374 	printascii(buf);
375 #endif
376 	printk("%s", buf);
377 }
378 
379 #ifdef CONFIG_ARM_PATCH_IDIV
380 
381 static inline u32 __attribute_const__ sdiv_instruction(void)
382 {
383 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
384 		/* "sdiv r0, r0, r1" */
385 		u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
386 		return __opcode_to_mem_thumb32(insn);
387 	}
388 
389 	/* "sdiv r0, r0, r1" */
390 	return __opcode_to_mem_arm(0xe710f110);
391 }
392 
393 static inline u32 __attribute_const__ udiv_instruction(void)
394 {
395 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
396 		/* "udiv r0, r0, r1" */
397 		u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
398 		return __opcode_to_mem_thumb32(insn);
399 	}
400 
401 	/* "udiv r0, r0, r1" */
402 	return __opcode_to_mem_arm(0xe730f110);
403 }
404 
405 static inline u32 __attribute_const__ bx_lr_instruction(void)
406 {
407 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
408 		/* "bx lr; nop" */
409 		u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
410 		return __opcode_to_mem_thumb32(insn);
411 	}
412 
413 	/* "bx lr" */
414 	return __opcode_to_mem_arm(0xe12fff1e);
415 }
416 
417 static void __init patch_aeabi_idiv(void)
418 {
419 	extern void __aeabi_uidiv(void);
420 	extern void __aeabi_idiv(void);
421 	uintptr_t fn_addr;
422 	unsigned int mask;
423 
424 	mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
425 	if (!(elf_hwcap & mask))
426 		return;
427 
428 	pr_info("CPU: div instructions available: patching division code\n");
429 
430 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
431 	asm ("" : "+g" (fn_addr));
432 	((u32 *)fn_addr)[0] = udiv_instruction();
433 	((u32 *)fn_addr)[1] = bx_lr_instruction();
434 	flush_icache_range(fn_addr, fn_addr + 8);
435 
436 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
437 	asm ("" : "+g" (fn_addr));
438 	((u32 *)fn_addr)[0] = sdiv_instruction();
439 	((u32 *)fn_addr)[1] = bx_lr_instruction();
440 	flush_icache_range(fn_addr, fn_addr + 8);
441 }
442 
443 #else
444 static inline void patch_aeabi_idiv(void) { }
445 #endif
446 
447 static void __init cpuid_init_hwcaps(void)
448 {
449 	int block;
450 	u32 isar5;
451 
452 	if (cpu_architecture() < CPU_ARCH_ARMv7)
453 		return;
454 
455 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
456 	if (block >= 2)
457 		elf_hwcap |= HWCAP_IDIVA;
458 	if (block >= 1)
459 		elf_hwcap |= HWCAP_IDIVT;
460 
461 	/* LPAE implies atomic ldrd/strd instructions */
462 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
463 	if (block >= 5)
464 		elf_hwcap |= HWCAP_LPAE;
465 
466 	/* check for supported v8 Crypto instructions */
467 	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
468 
469 	block = cpuid_feature_extract_field(isar5, 4);
470 	if (block >= 2)
471 		elf_hwcap2 |= HWCAP2_PMULL;
472 	if (block >= 1)
473 		elf_hwcap2 |= HWCAP2_AES;
474 
475 	block = cpuid_feature_extract_field(isar5, 8);
476 	if (block >= 1)
477 		elf_hwcap2 |= HWCAP2_SHA1;
478 
479 	block = cpuid_feature_extract_field(isar5, 12);
480 	if (block >= 1)
481 		elf_hwcap2 |= HWCAP2_SHA2;
482 
483 	block = cpuid_feature_extract_field(isar5, 16);
484 	if (block >= 1)
485 		elf_hwcap2 |= HWCAP2_CRC32;
486 }
487 
488 static void __init elf_hwcap_fixup(void)
489 {
490 	unsigned id = read_cpuid_id();
491 
492 	/*
493 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
494 	 * see also kuser_get_tls_init.
495 	 */
496 	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
497 	    ((id >> 20) & 3) == 0) {
498 		elf_hwcap &= ~HWCAP_TLS;
499 		return;
500 	}
501 
502 	/* Verify if CPUID scheme is implemented */
503 	if ((id & 0x000f0000) != 0x000f0000)
504 		return;
505 
506 	/*
507 	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
508 	 * avoid advertising SWP; it may not be atomic with
509 	 * multiprocessing cores.
510 	 */
511 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
512 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
513 	     cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
514 		elf_hwcap &= ~HWCAP_SWP;
515 }
516 
517 /*
518  * cpu_init - initialise one CPU.
519  *
520  * cpu_init sets up the per-CPU stacks.
521  */
522 void notrace cpu_init(void)
523 {
524 #ifndef CONFIG_CPU_V7M
525 	unsigned int cpu = smp_processor_id();
526 	struct stack *stk = &stacks[cpu];
527 
528 	if (cpu >= NR_CPUS) {
529 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
530 		BUG();
531 	}
532 
533 	/*
534 	 * This only works on resume and secondary cores. For booting on the
535 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
536 	 */
537 	set_my_cpu_offset(per_cpu_offset(cpu));
538 
539 	cpu_proc_init();
540 
541 	/*
542 	 * Define the placement constraint for the inline asm directive below.
543 	 * In Thumb-2, msr with an immediate value is not allowed.
544 	 */
545 #ifdef CONFIG_THUMB2_KERNEL
546 #define PLC	"r"
547 #else
548 #define PLC	"I"
549 #endif
550 
551 	/*
552 	 * setup stacks for re-entrant exception handlers
553 	 */
554 	__asm__ (
555 	"msr	cpsr_c, %1\n\t"
556 	"add	r14, %0, %2\n\t"
557 	"mov	sp, r14\n\t"
558 	"msr	cpsr_c, %3\n\t"
559 	"add	r14, %0, %4\n\t"
560 	"mov	sp, r14\n\t"
561 	"msr	cpsr_c, %5\n\t"
562 	"add	r14, %0, %6\n\t"
563 	"mov	sp, r14\n\t"
564 	"msr	cpsr_c, %7\n\t"
565 	"add	r14, %0, %8\n\t"
566 	"mov	sp, r14\n\t"
567 	"msr	cpsr_c, %9"
568 	    :
569 	    : "r" (stk),
570 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
571 	      "I" (offsetof(struct stack, irq[0])),
572 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
573 	      "I" (offsetof(struct stack, abt[0])),
574 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
575 	      "I" (offsetof(struct stack, und[0])),
576 	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
577 	      "I" (offsetof(struct stack, fiq[0])),
578 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
579 	    : "r14");
580 #endif
581 }
582 
583 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
584 
585 void __init smp_setup_processor_id(void)
586 {
587 	int i;
588 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
589 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
590 
591 	cpu_logical_map(0) = cpu;
592 	for (i = 1; i < nr_cpu_ids; ++i)
593 		cpu_logical_map(i) = i == cpu ? 0 : i;
594 
595 	/*
596 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
597 	 * using percpu variable early, for example, lockdep will
598 	 * access percpu variable inside lock_release
599 	 */
600 	set_my_cpu_offset(0);
601 
602 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
603 }
604 
605 struct mpidr_hash mpidr_hash;
606 #ifdef CONFIG_SMP
607 /**
608  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
609  *			  level in order to build a linear index from an
610  *			  MPIDR value. Resulting algorithm is a collision
611  *			  free hash carried out through shifting and ORing
612  */
613 static void __init smp_build_mpidr_hash(void)
614 {
615 	u32 i, affinity;
616 	u32 fs[3], bits[3], ls, mask = 0;
617 	/*
618 	 * Pre-scan the list of MPIDRS and filter out bits that do
619 	 * not contribute to affinity levels, ie they never toggle.
620 	 */
621 	for_each_possible_cpu(i)
622 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
623 	pr_debug("mask of set bits 0x%x\n", mask);
624 	/*
625 	 * Find and stash the last and first bit set at all affinity levels to
626 	 * check how many bits are required to represent them.
627 	 */
628 	for (i = 0; i < 3; i++) {
629 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
630 		/*
631 		 * Find the MSB bit and LSB bits position
632 		 * to determine how many bits are required
633 		 * to express the affinity level.
634 		 */
635 		ls = fls(affinity);
636 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
637 		bits[i] = ls - fs[i];
638 	}
639 	/*
640 	 * An index can be created from the MPIDR by isolating the
641 	 * significant bits at each affinity level and by shifting
642 	 * them in order to compress the 24 bits values space to a
643 	 * compressed set of values. This is equivalent to hashing
644 	 * the MPIDR through shifting and ORing. It is a collision free
645 	 * hash though not minimal since some levels might contain a number
646 	 * of CPUs that is not an exact power of 2 and their bit
647 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
648 	 */
649 	mpidr_hash.shift_aff[0] = fs[0];
650 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
651 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
652 						(bits[1] + bits[0]);
653 	mpidr_hash.mask = mask;
654 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
655 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
656 				mpidr_hash.shift_aff[0],
657 				mpidr_hash.shift_aff[1],
658 				mpidr_hash.shift_aff[2],
659 				mpidr_hash.mask,
660 				mpidr_hash.bits);
661 	/*
662 	 * 4x is an arbitrary value used to warn on a hash table much bigger
663 	 * than expected on most systems.
664 	 */
665 	if (mpidr_hash_size() > 4 * num_possible_cpus())
666 		pr_warn("Large number of MPIDR hash buckets detected\n");
667 	sync_cache_w(&mpidr_hash);
668 }
669 #endif
670 
671 /*
672  * locate processor in the list of supported processor types.  The linker
673  * builds this table for us from the entries in arch/arm/mm/proc-*.S
674  */
675 struct proc_info_list *lookup_processor(u32 midr)
676 {
677 	struct proc_info_list *list = lookup_processor_type(midr);
678 
679 	if (!list) {
680 		pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
681 		       smp_processor_id(), midr);
682 		while (1)
683 		/* can't use cpu_relax() here as it may require MMU setup */;
684 	}
685 
686 	return list;
687 }
688 
689 static void __init setup_processor(void)
690 {
691 	unsigned int midr = read_cpuid_id();
692 	struct proc_info_list *list = lookup_processor(midr);
693 
694 	cpu_name = list->cpu_name;
695 	__cpu_architecture = __get_cpu_architecture();
696 
697 	init_proc_vtable(list->proc);
698 #ifdef MULTI_TLB
699 	cpu_tlb = *list->tlb;
700 #endif
701 #ifdef MULTI_USER
702 	cpu_user = *list->user;
703 #endif
704 #ifdef MULTI_CACHE
705 	cpu_cache = *list->cache;
706 #endif
707 
708 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
709 		list->cpu_name, midr, midr & 15,
710 		proc_arch[cpu_architecture()], get_cr());
711 
712 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
713 		 list->arch_name, ENDIANNESS);
714 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
715 		 list->elf_name, ENDIANNESS);
716 	elf_hwcap = list->elf_hwcap;
717 
718 	cpuid_init_hwcaps();
719 	patch_aeabi_idiv();
720 
721 #ifndef CONFIG_ARM_THUMB
722 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
723 #endif
724 #ifdef CONFIG_MMU
725 	init_default_cache_policy(list->__cpu_mm_mmu_flags);
726 #endif
727 	erratum_a15_798181_init();
728 
729 	elf_hwcap_fixup();
730 
731 	cacheid_init();
732 	cpu_init();
733 }
734 
735 void __init dump_machine_table(void)
736 {
737 	const struct machine_desc *p;
738 
739 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
740 	for_each_machine_desc(p)
741 		early_print("%08x\t%s\n", p->nr, p->name);
742 
743 	early_print("\nPlease check your kernel config and/or bootloader.\n");
744 
745 	while (true)
746 		/* can't use cpu_relax() here as it may require MMU setup */;
747 }
748 
749 int __init arm_add_memory(u64 start, u64 size)
750 {
751 	u64 aligned_start;
752 
753 	/*
754 	 * Ensure that start/size are aligned to a page boundary.
755 	 * Size is rounded down, start is rounded up.
756 	 */
757 	aligned_start = PAGE_ALIGN(start);
758 	if (aligned_start > start + size)
759 		size = 0;
760 	else
761 		size -= aligned_start - start;
762 
763 #ifndef CONFIG_PHYS_ADDR_T_64BIT
764 	if (aligned_start > ULONG_MAX) {
765 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
766 			(long long)start);
767 		return -EINVAL;
768 	}
769 
770 	if (aligned_start + size > ULONG_MAX) {
771 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
772 			(long long)start);
773 		/*
774 		 * To ensure bank->start + bank->size is representable in
775 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
776 		 * This means we lose a page after masking.
777 		 */
778 		size = ULONG_MAX - aligned_start;
779 	}
780 #endif
781 
782 	if (aligned_start < PHYS_OFFSET) {
783 		if (aligned_start + size <= PHYS_OFFSET) {
784 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
785 				aligned_start, aligned_start + size);
786 			return -EINVAL;
787 		}
788 
789 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
790 			aligned_start, (u64)PHYS_OFFSET);
791 
792 		size -= PHYS_OFFSET - aligned_start;
793 		aligned_start = PHYS_OFFSET;
794 	}
795 
796 	start = aligned_start;
797 	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
798 
799 	/*
800 	 * Check whether this memory region has non-zero size or
801 	 * invalid node number.
802 	 */
803 	if (size == 0)
804 		return -EINVAL;
805 
806 	memblock_add(start, size);
807 	return 0;
808 }
809 
810 /*
811  * Pick out the memory size.  We look for mem=size@start,
812  * where start and size are "size[KkMm]"
813  */
814 
815 static int __init early_mem(char *p)
816 {
817 	static int usermem __initdata = 0;
818 	u64 size;
819 	u64 start;
820 	char *endp;
821 
822 	/*
823 	 * If the user specifies memory size, we
824 	 * blow away any automatically generated
825 	 * size.
826 	 */
827 	if (usermem == 0) {
828 		usermem = 1;
829 		memblock_remove(memblock_start_of_DRAM(),
830 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
831 	}
832 
833 	start = PHYS_OFFSET;
834 	size  = memparse(p, &endp);
835 	if (*endp == '@')
836 		start = memparse(endp + 1, NULL);
837 
838 	arm_add_memory(start, size);
839 
840 	return 0;
841 }
842 early_param("mem", early_mem);
843 
844 static void __init request_standard_resources(const struct machine_desc *mdesc)
845 {
846 	phys_addr_t start, end, res_end;
847 	struct resource *res;
848 	u64 i;
849 
850 	kernel_code.start   = virt_to_phys(_text);
851 	kernel_code.end     = virt_to_phys(__init_begin - 1);
852 	kernel_data.start   = virt_to_phys(_sdata);
853 	kernel_data.end     = virt_to_phys(_end - 1);
854 
855 	for_each_mem_range(i, &start, &end) {
856 		unsigned long boot_alias_start;
857 
858 		/*
859 		 * In memblock, end points to the first byte after the
860 		 * range while in resourses, end points to the last byte in
861 		 * the range.
862 		 */
863 		res_end = end - 1;
864 
865 		/*
866 		 * Some systems have a special memory alias which is only
867 		 * used for booting.  We need to advertise this region to
868 		 * kexec-tools so they know where bootable RAM is located.
869 		 */
870 		boot_alias_start = phys_to_idmap(start);
871 		if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
872 			res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
873 			if (!res)
874 				panic("%s: Failed to allocate %zu bytes\n",
875 				      __func__, sizeof(*res));
876 			res->name = "System RAM (boot alias)";
877 			res->start = boot_alias_start;
878 			res->end = phys_to_idmap(res_end);
879 			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
880 			request_resource(&iomem_resource, res);
881 		}
882 
883 		res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
884 		if (!res)
885 			panic("%s: Failed to allocate %zu bytes\n", __func__,
886 			      sizeof(*res));
887 		res->name  = "System RAM";
888 		res->start = start;
889 		res->end = res_end;
890 		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
891 
892 		request_resource(&iomem_resource, res);
893 
894 		if (kernel_code.start >= res->start &&
895 		    kernel_code.end <= res->end)
896 			request_resource(res, &kernel_code);
897 		if (kernel_data.start >= res->start &&
898 		    kernel_data.end <= res->end)
899 			request_resource(res, &kernel_data);
900 	}
901 
902 	if (mdesc->video_start) {
903 		video_ram.start = mdesc->video_start;
904 		video_ram.end   = mdesc->video_end;
905 		request_resource(&iomem_resource, &video_ram);
906 	}
907 
908 	/*
909 	 * Some machines don't have the possibility of ever
910 	 * possessing lp0, lp1 or lp2
911 	 */
912 	if (mdesc->reserve_lp0)
913 		request_resource(&ioport_resource, &lp0);
914 	if (mdesc->reserve_lp1)
915 		request_resource(&ioport_resource, &lp1);
916 	if (mdesc->reserve_lp2)
917 		request_resource(&ioport_resource, &lp2);
918 }
919 
920 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
921     defined(CONFIG_EFI)
922 struct screen_info screen_info = {
923  .orig_video_lines	= 30,
924  .orig_video_cols	= 80,
925  .orig_video_mode	= 0,
926  .orig_video_ega_bx	= 0,
927  .orig_video_isVGA	= 1,
928  .orig_video_points	= 8
929 };
930 #endif
931 
932 static int __init customize_machine(void)
933 {
934 	/*
935 	 * customizes platform devices, or adds new ones
936 	 * On DT based machines, we fall back to populating the
937 	 * machine from the device tree, if no callback is provided,
938 	 * otherwise we would always need an init_machine callback.
939 	 */
940 	if (machine_desc->init_machine)
941 		machine_desc->init_machine();
942 
943 	return 0;
944 }
945 arch_initcall(customize_machine);
946 
947 static int __init init_machine_late(void)
948 {
949 	struct device_node *root;
950 	int ret;
951 
952 	if (machine_desc->init_late)
953 		machine_desc->init_late();
954 
955 	root = of_find_node_by_path("/");
956 	if (root) {
957 		ret = of_property_read_string(root, "serial-number",
958 					      &system_serial);
959 		if (ret)
960 			system_serial = NULL;
961 	}
962 
963 	if (!system_serial)
964 		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
965 					  system_serial_high,
966 					  system_serial_low);
967 
968 	return 0;
969 }
970 late_initcall(init_machine_late);
971 
972 #ifdef CONFIG_KEXEC
973 /*
974  * The crash region must be aligned to 128MB to avoid
975  * zImage relocating below the reserved region.
976  */
977 #define CRASH_ALIGN	(128 << 20)
978 
979 static inline unsigned long long get_total_mem(void)
980 {
981 	unsigned long total;
982 
983 	total = max_low_pfn - min_low_pfn;
984 	return total << PAGE_SHIFT;
985 }
986 
987 /**
988  * reserve_crashkernel() - reserves memory are for crash kernel
989  *
990  * This function reserves memory area given in "crashkernel=" kernel command
991  * line parameter. The memory reserved is used by a dump capture kernel when
992  * primary kernel is crashing.
993  */
994 static void __init reserve_crashkernel(void)
995 {
996 	unsigned long long crash_size, crash_base;
997 	unsigned long long total_mem;
998 	int ret;
999 
1000 	total_mem = get_total_mem();
1001 	ret = parse_crashkernel(boot_command_line, total_mem,
1002 				&crash_size, &crash_base);
1003 	if (ret)
1004 		return;
1005 
1006 	if (crash_base <= 0) {
1007 		unsigned long long crash_max = idmap_to_phys((u32)~0);
1008 		unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1009 		if (crash_max > lowmem_max)
1010 			crash_max = lowmem_max;
1011 		crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1012 						    crash_size, CRASH_ALIGN);
1013 		if (!crash_base) {
1014 			pr_err("crashkernel reservation failed - No suitable area found.\n");
1015 			return;
1016 		}
1017 	} else {
1018 		unsigned long long start;
1019 
1020 		start = memblock_find_in_range(crash_base,
1021 					       crash_base + crash_size,
1022 					       crash_size, SECTION_SIZE);
1023 		if (start != crash_base) {
1024 			pr_err("crashkernel reservation failed - memory is in use.\n");
1025 			return;
1026 		}
1027 	}
1028 
1029 	ret = memblock_reserve(crash_base, crash_size);
1030 	if (ret < 0) {
1031 		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1032 			(unsigned long)crash_base);
1033 		return;
1034 	}
1035 
1036 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1037 		(unsigned long)(crash_size >> 20),
1038 		(unsigned long)(crash_base >> 20),
1039 		(unsigned long)(total_mem >> 20));
1040 
1041 	/* The crashk resource must always be located in normal mem */
1042 	crashk_res.start = crash_base;
1043 	crashk_res.end = crash_base + crash_size - 1;
1044 	insert_resource(&iomem_resource, &crashk_res);
1045 
1046 	if (arm_has_idmap_alias()) {
1047 		/*
1048 		 * If we have a special RAM alias for use at boot, we
1049 		 * need to advertise to kexec tools where the alias is.
1050 		 */
1051 		static struct resource crashk_boot_res = {
1052 			.name = "Crash kernel (boot alias)",
1053 			.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1054 		};
1055 
1056 		crashk_boot_res.start = phys_to_idmap(crash_base);
1057 		crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1058 		insert_resource(&iomem_resource, &crashk_boot_res);
1059 	}
1060 }
1061 #else
1062 static inline void reserve_crashkernel(void) {}
1063 #endif /* CONFIG_KEXEC */
1064 
1065 void __init hyp_mode_check(void)
1066 {
1067 #ifdef CONFIG_ARM_VIRT_EXT
1068 	sync_boot_mode();
1069 
1070 	if (is_hyp_mode_available()) {
1071 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
1072 		pr_info("CPU: Virtualization extensions available.\n");
1073 	} else if (is_hyp_mode_mismatched()) {
1074 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1075 			__boot_cpu_mode & MODE_MASK);
1076 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1077 	} else
1078 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
1079 #endif
1080 }
1081 
1082 void __init setup_arch(char **cmdline_p)
1083 {
1084 	const struct machine_desc *mdesc;
1085 
1086 	setup_processor();
1087 	mdesc = setup_machine_fdt(__atags_pointer);
1088 	if (!mdesc)
1089 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1090 	if (!mdesc) {
1091 		early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1092 		early_print("  r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1093 			    __atags_pointer);
1094 		if (__atags_pointer)
1095 			early_print("  r2[]=%*ph\n", 16,
1096 				    phys_to_virt(__atags_pointer));
1097 		dump_machine_table();
1098 	}
1099 
1100 	machine_desc = mdesc;
1101 	machine_name = mdesc->name;
1102 	dump_stack_set_arch_desc("%s", mdesc->name);
1103 
1104 	if (mdesc->reboot_mode != REBOOT_HARD)
1105 		reboot_mode = mdesc->reboot_mode;
1106 
1107 	init_mm.start_code = (unsigned long) _text;
1108 	init_mm.end_code   = (unsigned long) _etext;
1109 	init_mm.end_data   = (unsigned long) _edata;
1110 	init_mm.brk	   = (unsigned long) _end;
1111 
1112 	/* populate cmd_line too for later use, preserving boot_command_line */
1113 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1114 	*cmdline_p = cmd_line;
1115 
1116 	early_fixmap_init();
1117 	early_ioremap_init();
1118 
1119 	parse_early_param();
1120 
1121 #ifdef CONFIG_MMU
1122 	early_mm_init(mdesc);
1123 #endif
1124 	setup_dma_zone(mdesc);
1125 	xen_early_init();
1126 	efi_init();
1127 	/*
1128 	 * Make sure the calculation for lowmem/highmem is set appropriately
1129 	 * before reserving/allocating any mmeory
1130 	 */
1131 	adjust_lowmem_bounds();
1132 	arm_memblock_init(mdesc);
1133 	/* Memory may have been removed so recalculate the bounds. */
1134 	adjust_lowmem_bounds();
1135 
1136 	early_ioremap_reset();
1137 
1138 	paging_init(mdesc);
1139 	request_standard_resources(mdesc);
1140 
1141 	if (mdesc->restart)
1142 		arm_pm_restart = mdesc->restart;
1143 
1144 	unflatten_device_tree();
1145 
1146 	arm_dt_init_cpu_maps();
1147 	psci_dt_init();
1148 #ifdef CONFIG_SMP
1149 	if (is_smp()) {
1150 		if (!mdesc->smp_init || !mdesc->smp_init()) {
1151 			if (psci_smp_available())
1152 				smp_set_ops(&psci_smp_ops);
1153 			else if (mdesc->smp)
1154 				smp_set_ops(mdesc->smp);
1155 		}
1156 		smp_init_cpus();
1157 		smp_build_mpidr_hash();
1158 	}
1159 #endif
1160 
1161 	if (!is_smp())
1162 		hyp_mode_check();
1163 
1164 	reserve_crashkernel();
1165 
1166 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1167 	handle_arch_irq = mdesc->handle_irq;
1168 #endif
1169 
1170 #ifdef CONFIG_VT
1171 #if defined(CONFIG_VGA_CONSOLE)
1172 	conswitchp = &vga_con;
1173 #endif
1174 #endif
1175 
1176 	if (mdesc->init_early)
1177 		mdesc->init_early();
1178 }
1179 
1180 
1181 static int __init topology_init(void)
1182 {
1183 	int cpu;
1184 
1185 	for_each_possible_cpu(cpu) {
1186 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1187 		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1188 		register_cpu(&cpuinfo->cpu, cpu);
1189 	}
1190 
1191 	return 0;
1192 }
1193 subsys_initcall(topology_init);
1194 
1195 #ifdef CONFIG_HAVE_PROC_CPU
1196 static int __init proc_cpu_init(void)
1197 {
1198 	struct proc_dir_entry *res;
1199 
1200 	res = proc_mkdir("cpu", NULL);
1201 	if (!res)
1202 		return -ENOMEM;
1203 	return 0;
1204 }
1205 fs_initcall(proc_cpu_init);
1206 #endif
1207 
1208 static const char *hwcap_str[] = {
1209 	"swp",
1210 	"half",
1211 	"thumb",
1212 	"26bit",
1213 	"fastmult",
1214 	"fpa",
1215 	"vfp",
1216 	"edsp",
1217 	"java",
1218 	"iwmmxt",
1219 	"crunch",
1220 	"thumbee",
1221 	"neon",
1222 	"vfpv3",
1223 	"vfpv3d16",
1224 	"tls",
1225 	"vfpv4",
1226 	"idiva",
1227 	"idivt",
1228 	"vfpd32",
1229 	"lpae",
1230 	"evtstrm",
1231 	NULL
1232 };
1233 
1234 static const char *hwcap2_str[] = {
1235 	"aes",
1236 	"pmull",
1237 	"sha1",
1238 	"sha2",
1239 	"crc32",
1240 	NULL
1241 };
1242 
1243 static int c_show(struct seq_file *m, void *v)
1244 {
1245 	int i, j;
1246 	u32 cpuid;
1247 
1248 	for_each_online_cpu(i) {
1249 		/*
1250 		 * glibc reads /proc/cpuinfo to determine the number of
1251 		 * online processors, looking for lines beginning with
1252 		 * "processor".  Give glibc what it expects.
1253 		 */
1254 		seq_printf(m, "processor\t: %d\n", i);
1255 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1256 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1257 			   cpu_name, cpuid & 15, elf_platform);
1258 
1259 #if defined(CONFIG_SMP)
1260 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1261 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1262 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1263 #else
1264 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1265 			   loops_per_jiffy / (500000/HZ),
1266 			   (loops_per_jiffy / (5000/HZ)) % 100);
1267 #endif
1268 		/* dump out the processor features */
1269 		seq_puts(m, "Features\t: ");
1270 
1271 		for (j = 0; hwcap_str[j]; j++)
1272 			if (elf_hwcap & (1 << j))
1273 				seq_printf(m, "%s ", hwcap_str[j]);
1274 
1275 		for (j = 0; hwcap2_str[j]; j++)
1276 			if (elf_hwcap2 & (1 << j))
1277 				seq_printf(m, "%s ", hwcap2_str[j]);
1278 
1279 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1280 		seq_printf(m, "CPU architecture: %s\n",
1281 			   proc_arch[cpu_architecture()]);
1282 
1283 		if ((cpuid & 0x0008f000) == 0x00000000) {
1284 			/* pre-ARM7 */
1285 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1286 		} else {
1287 			if ((cpuid & 0x0008f000) == 0x00007000) {
1288 				/* ARM7 */
1289 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1290 					   (cpuid >> 16) & 127);
1291 			} else {
1292 				/* post-ARM7 */
1293 				seq_printf(m, "CPU variant\t: 0x%x\n",
1294 					   (cpuid >> 20) & 15);
1295 			}
1296 			seq_printf(m, "CPU part\t: 0x%03x\n",
1297 				   (cpuid >> 4) & 0xfff);
1298 		}
1299 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1300 	}
1301 
1302 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1303 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1304 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
1305 
1306 	return 0;
1307 }
1308 
1309 static void *c_start(struct seq_file *m, loff_t *pos)
1310 {
1311 	return *pos < 1 ? (void *)1 : NULL;
1312 }
1313 
1314 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1315 {
1316 	++*pos;
1317 	return NULL;
1318 }
1319 
1320 static void c_stop(struct seq_file *m, void *v)
1321 {
1322 }
1323 
1324 const struct seq_operations cpuinfo_op = {
1325 	.start	= c_start,
1326 	.next	= c_next,
1327 	.stop	= c_stop,
1328 	.show	= c_show
1329 };
1330