xref: /openbmc/linux/arch/sh/kernel/cpu/init.c (revision 4b4193256c8d3bc3a5397b5cd9494c2ad386317d)
1b0a148f8SKuninori Morimoto // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * arch/sh/kernel/cpu/init.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * CPU init code
61da177e4SLinus Torvalds  *
77dd6662aSPaul Mundt  * Copyright (C) 2002 - 2009  Paul Mundt
8b638d0b9SRichard Curnow  * Copyright (C) 2003  Richard Curnow
91da177e4SLinus Torvalds  */
101da177e4SLinus Torvalds #include <linux/init.h>
111da177e4SLinus Torvalds #include <linux/kernel.h>
12aec5e0e1SPaul Mundt #include <linux/mm.h>
13cd01204bSPaul Mundt #include <linux/log2.h>
14aec5e0e1SPaul Mundt #include <asm/mmu_context.h>
151da177e4SLinus Torvalds #include <asm/processor.h>
167c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
17f3c25758SPaul Mundt #include <asm/page.h>
181da177e4SLinus Torvalds #include <asm/cacheflush.h>
191da177e4SLinus Torvalds #include <asm/cache.h>
20cd01204bSPaul Mundt #include <asm/elf.h>
211da177e4SLinus Torvalds #include <asm/io.h>
22aba1030aSPaul Mundt #include <asm/smp.h>
2349f3bfe9SPaul Mundt #include <asm/sh_bios.h>
24e839ca52SDavid Howells #include <asm/setup.h>
251da177e4SLinus Torvalds 
260ea820cfSPaul Mundt #ifdef CONFIG_SH_FPU
270ea820cfSPaul Mundt #define cpu_has_fpu	1
280ea820cfSPaul Mundt #else
290ea820cfSPaul Mundt #define cpu_has_fpu	0
300ea820cfSPaul Mundt #endif
310ea820cfSPaul Mundt 
320ea820cfSPaul Mundt #ifdef CONFIG_SH_DSP
330ea820cfSPaul Mundt #define cpu_has_dsp	1
340ea820cfSPaul Mundt #else
350ea820cfSPaul Mundt #define cpu_has_dsp	0
361da177e4SLinus Torvalds #endif
371da177e4SLinus Torvalds 
381da177e4SLinus Torvalds /*
391da177e4SLinus Torvalds  * Generic wrapper for command line arguments to disable on-chip
401da177e4SLinus Torvalds  * peripherals (nofpu, nodsp, and so forth).
411da177e4SLinus Torvalds  */
421da177e4SLinus Torvalds #define onchip_setup(x)					\
434603f53aSPaul Gortmaker static int x##_disabled = !cpu_has_##x;			\
441da177e4SLinus Torvalds 							\
454603f53aSPaul Gortmaker static int x##_setup(char *opts)			\
461da177e4SLinus Torvalds {							\
471da177e4SLinus Torvalds 	x##_disabled = 1;				\
489b41046cSOGAWA Hirofumi 	return 1;					\
491da177e4SLinus Torvalds }							\
501da177e4SLinus Torvalds __setup("no" __stringify(x), x##_setup);
511da177e4SLinus Torvalds 
521da177e4SLinus Torvalds onchip_setup(fpu);
531da177e4SLinus Torvalds onchip_setup(dsp);
541da177e4SLinus Torvalds 
5545ed285bSPaul Mundt #ifdef CONFIG_SPECULATIVE_EXECUTION
5645ed285bSPaul Mundt #define CPUOPM		0xff2f0000
5745ed285bSPaul Mundt #define CPUOPM_RABD	(1 << 5)
5845ed285bSPaul Mundt 
speculative_execution_init(void)594603f53aSPaul Gortmaker static void speculative_execution_init(void)
6045ed285bSPaul Mundt {
6145ed285bSPaul Mundt 	/* Clear RABD */
629d56dd3bSPaul Mundt 	__raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
6345ed285bSPaul Mundt 
6445ed285bSPaul Mundt 	/* Flush the update */
659d56dd3bSPaul Mundt 	(void)__raw_readl(CPUOPM);
6645ed285bSPaul Mundt 	ctrl_barrier();
6745ed285bSPaul Mundt }
6845ed285bSPaul Mundt #else
6945ed285bSPaul Mundt #define speculative_execution_init()	do { } while (0)
7045ed285bSPaul Mundt #endif
7145ed285bSPaul Mundt 
727dd6662aSPaul Mundt #ifdef CONFIG_CPU_SH4A
737dd6662aSPaul Mundt #define EXPMASK			0xff2f0004
747dd6662aSPaul Mundt #define EXPMASK_RTEDS		(1 << 0)
757dd6662aSPaul Mundt #define EXPMASK_BRDSSLP		(1 << 1)
767dd6662aSPaul Mundt #define EXPMASK_MMCAW		(1 << 4)
777dd6662aSPaul Mundt 
expmask_init(void)784603f53aSPaul Gortmaker static void expmask_init(void)
797dd6662aSPaul Mundt {
807dd6662aSPaul Mundt 	unsigned long expmask = __raw_readl(EXPMASK);
817dd6662aSPaul Mundt 
827dd6662aSPaul Mundt 	/*
837dd6662aSPaul Mundt 	 * Future proofing.
847dd6662aSPaul Mundt 	 *
856e8a0d11SPaul Mundt 	 * Disable support for slottable sleep instruction, non-nop
866e8a0d11SPaul Mundt 	 * instructions in the rte delay slot, and associative writes to
876e8a0d11SPaul Mundt 	 * the memory-mapped cache array.
887dd6662aSPaul Mundt 	 */
896e8a0d11SPaul Mundt 	expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
907dd6662aSPaul Mundt 
917dd6662aSPaul Mundt 	__raw_writel(expmask, EXPMASK);
927dd6662aSPaul Mundt 	ctrl_barrier();
937dd6662aSPaul Mundt }
947dd6662aSPaul Mundt #else
957dd6662aSPaul Mundt #define expmask_init()	do { } while (0)
967dd6662aSPaul Mundt #endif
977dd6662aSPaul Mundt 
98fab88d9fSKuninori Morimoto /* 2nd-level cache init */
l2_cache_init(void)992dc2f8e0SPaul Mundt void __attribute__ ((weak)) l2_cache_init(void)
100fab88d9fSKuninori Morimoto {
101fab88d9fSKuninori Morimoto }
102fab88d9fSKuninori Morimoto 
1031da177e4SLinus Torvalds /*
1041da177e4SLinus Torvalds  * Generic first-level cache init
1051da177e4SLinus Torvalds  */
106*37744feeSArnd Bergmann #if !defined(CONFIG_CPU_J2)
cache_init(void)1072dc2f8e0SPaul Mundt static void cache_init(void)
1081da177e4SLinus Torvalds {
1091da177e4SLinus Torvalds 	unsigned long ccr, flags;
1101da177e4SLinus Torvalds 
111cbaa118eSStuart Menefy 	jump_to_uncached();
112a5f6ea29SGeert Uytterhoeven 	ccr = __raw_readl(SH_CCR);
1131da177e4SLinus Torvalds 
1141da177e4SLinus Torvalds 	/*
115b638d0b9SRichard Curnow 	 * At this point we don't know whether the cache is enabled or not - a
116b638d0b9SRichard Curnow 	 * bootloader may have enabled it.  There are at least 2 things that
117b638d0b9SRichard Curnow 	 * could be dirty in the cache at this point:
118b638d0b9SRichard Curnow 	 * 1. kernel command line set up by boot loader
119b638d0b9SRichard Curnow 	 * 2. spilled registers from the prolog of this function
120b638d0b9SRichard Curnow 	 * => before re-initialising the cache, we must do a purge of the whole
121b638d0b9SRichard Curnow 	 * cache out to memory for safety.  As long as nothing is spilled
122b638d0b9SRichard Curnow 	 * during the loop to lines that have already been done, this is safe.
123b638d0b9SRichard Curnow 	 * - RPC
1241da177e4SLinus Torvalds 	 */
1251da177e4SLinus Torvalds 	if (ccr & CCR_CACHE_ENABLE) {
1261da177e4SLinus Torvalds 		unsigned long ways, waysize, addrstart;
1271da177e4SLinus Torvalds 
12811c19656SPaul Mundt 		waysize = current_cpu_data.dcache.sets;
1291da177e4SLinus Torvalds 
1309d4436a6SYoshinori Sato #ifdef CCR_CACHE_ORA
1311da177e4SLinus Torvalds 		/*
1321da177e4SLinus Torvalds 		 * If the OC is already in RAM mode, we only have
1331da177e4SLinus Torvalds 		 * half of the entries to flush..
1341da177e4SLinus Torvalds 		 */
1351da177e4SLinus Torvalds 		if (ccr & CCR_CACHE_ORA)
1361da177e4SLinus Torvalds 			waysize >>= 1;
1379d4436a6SYoshinori Sato #endif
1381da177e4SLinus Torvalds 
13911c19656SPaul Mundt 		waysize <<= current_cpu_data.dcache.entry_shift;
1401da177e4SLinus Torvalds 
1411da177e4SLinus Torvalds #ifdef CCR_CACHE_EMODE
1421da177e4SLinus Torvalds 		/* If EMODE is not set, we only have 1 way to flush. */
1431da177e4SLinus Torvalds 		if (!(ccr & CCR_CACHE_EMODE))
1441da177e4SLinus Torvalds 			ways = 1;
1451da177e4SLinus Torvalds 		else
1461da177e4SLinus Torvalds #endif
14711c19656SPaul Mundt 			ways = current_cpu_data.dcache.ways;
1481da177e4SLinus Torvalds 
1491da177e4SLinus Torvalds 		addrstart = CACHE_OC_ADDRESS_ARRAY;
1501da177e4SLinus Torvalds 		do {
1511da177e4SLinus Torvalds 			unsigned long addr;
1521da177e4SLinus Torvalds 
1531da177e4SLinus Torvalds 			for (addr = addrstart;
1541da177e4SLinus Torvalds 			     addr < addrstart + waysize;
15511c19656SPaul Mundt 			     addr += current_cpu_data.dcache.linesz)
1569d56dd3bSPaul Mundt 				__raw_writel(0, addr);
1571da177e4SLinus Torvalds 
15811c19656SPaul Mundt 			addrstart += current_cpu_data.dcache.way_incr;
1591da177e4SLinus Torvalds 		} while (--ways);
1601da177e4SLinus Torvalds 	}
1611da177e4SLinus Torvalds 
1621da177e4SLinus Torvalds 	/*
1631da177e4SLinus Torvalds 	 * Default CCR values .. enable the caches
1641da177e4SLinus Torvalds 	 * and invalidate them immediately..
1651da177e4SLinus Torvalds 	 */
1661da177e4SLinus Torvalds 	flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
1671da177e4SLinus Torvalds 
1681da177e4SLinus Torvalds #ifdef CCR_CACHE_EMODE
1691da177e4SLinus Torvalds 	/* Force EMODE if possible */
17011c19656SPaul Mundt 	if (current_cpu_data.dcache.ways > 1)
1711da177e4SLinus Torvalds 		flags |= CCR_CACHE_EMODE;
172b638d0b9SRichard Curnow 	else
173b638d0b9SRichard Curnow 		flags &= ~CCR_CACHE_EMODE;
1741da177e4SLinus Torvalds #endif
1751da177e4SLinus Torvalds 
176e7bd34a1SPaul Mundt #if defined(CONFIG_CACHE_WRITETHROUGH)
177e7bd34a1SPaul Mundt 	/* Write-through */
1781da177e4SLinus Torvalds 	flags |= CCR_CACHE_WT;
179e7bd34a1SPaul Mundt #elif defined(CONFIG_CACHE_WRITEBACK)
180e7bd34a1SPaul Mundt 	/* Write-back */
1811da177e4SLinus Torvalds 	flags |= CCR_CACHE_CB;
182e7bd34a1SPaul Mundt #else
183e7bd34a1SPaul Mundt 	/* Off */
184e7bd34a1SPaul Mundt 	flags &= ~CCR_CACHE_ENABLE;
1851da177e4SLinus Torvalds #endif
1861da177e4SLinus Torvalds 
187fab88d9fSKuninori Morimoto 	l2_cache_init();
188fab88d9fSKuninori Morimoto 
189a5f6ea29SGeert Uytterhoeven 	__raw_writel(flags, SH_CCR);
190cbaa118eSStuart Menefy 	back_to_cached();
1911da177e4SLinus Torvalds }
19227a511c6SPaul Mundt #else
19327a511c6SPaul Mundt #define cache_init()	do { } while (0)
19427a511c6SPaul Mundt #endif
1951da177e4SLinus Torvalds 
196cd01204bSPaul Mundt #define CSHAPE(totalsize, linesize, assoc) \
197cd01204bSPaul Mundt 	((totalsize & ~0xff) | (linesize << 4) | assoc)
198cd01204bSPaul Mundt 
199cd01204bSPaul Mundt #define CACHE_DESC_SHAPE(desc)	\
200cd01204bSPaul Mundt 	CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
201cd01204bSPaul Mundt 
detect_cache_shape(void)202cd01204bSPaul Mundt static void detect_cache_shape(void)
203cd01204bSPaul Mundt {
204cd01204bSPaul Mundt 	l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
205cd01204bSPaul Mundt 
206cd01204bSPaul Mundt 	if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
207cd01204bSPaul Mundt 		l1i_cache_shape = l1d_cache_shape;
208cd01204bSPaul Mundt 	else
209cd01204bSPaul Mundt 		l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
210cd01204bSPaul Mundt 
211cd01204bSPaul Mundt 	if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
212cd01204bSPaul Mundt 		l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
213cd01204bSPaul Mundt 	else
214cd01204bSPaul Mundt 		l2_cache_shape = -1; /* No S-cache */
215cd01204bSPaul Mundt }
216cd01204bSPaul Mundt 
fpu_init(void)2174603f53aSPaul Gortmaker static void fpu_init(void)
2180ea820cfSPaul Mundt {
2190ea820cfSPaul Mundt 	/* Disable the FPU */
2200ea820cfSPaul Mundt 	if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
2210ea820cfSPaul Mundt 		printk("FPU Disabled\n");
2220ea820cfSPaul Mundt 		current_cpu_data.flags &= ~CPU_HAS_FPU;
2230ea820cfSPaul Mundt 	}
2240ea820cfSPaul Mundt 
2250ea820cfSPaul Mundt 	disable_fpu();
2260ea820cfSPaul Mundt 	clear_used_math();
2270ea820cfSPaul Mundt }
2280ea820cfSPaul Mundt 
2291da177e4SLinus Torvalds #ifdef CONFIG_SH_DSP
release_dsp(void)2304603f53aSPaul Gortmaker static void release_dsp(void)
2311da177e4SLinus Torvalds {
2321da177e4SLinus Torvalds 	unsigned long sr;
2331da177e4SLinus Torvalds 
2341da177e4SLinus Torvalds 	/* Clear SR.DSP bit */
2351da177e4SLinus Torvalds 	__asm__ __volatile__ (
2361da177e4SLinus Torvalds 		"stc\tsr, %0\n\t"
2371da177e4SLinus Torvalds 		"and\t%1, %0\n\t"
2381da177e4SLinus Torvalds 		"ldc\t%0, sr\n\t"
2391da177e4SLinus Torvalds 		: "=&r" (sr)
2401da177e4SLinus Torvalds 		: "r" (~SR_DSP)
2411da177e4SLinus Torvalds 	);
2421da177e4SLinus Torvalds }
2431da177e4SLinus Torvalds 
dsp_init(void)2444603f53aSPaul Gortmaker static void dsp_init(void)
2451da177e4SLinus Torvalds {
2461da177e4SLinus Torvalds 	unsigned long sr;
2471da177e4SLinus Torvalds 
2481da177e4SLinus Torvalds 	/*
2491da177e4SLinus Torvalds 	 * Set the SR.DSP bit, wait for one instruction, and then read
2501da177e4SLinus Torvalds 	 * back the SR value.
2511da177e4SLinus Torvalds 	 */
2521da177e4SLinus Torvalds 	__asm__ __volatile__ (
2531da177e4SLinus Torvalds 		"stc\tsr, %0\n\t"
2541da177e4SLinus Torvalds 		"or\t%1, %0\n\t"
2551da177e4SLinus Torvalds 		"ldc\t%0, sr\n\t"
2561da177e4SLinus Torvalds 		"nop\n\t"
2571da177e4SLinus Torvalds 		"stc\tsr, %0\n\t"
2581da177e4SLinus Torvalds 		: "=&r" (sr)
2591da177e4SLinus Torvalds 		: "r" (SR_DSP)
2601da177e4SLinus Torvalds 	);
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds 	/* If the DSP bit is still set, this CPU has a DSP */
2631da177e4SLinus Torvalds 	if (sr & SR_DSP)
26411c19656SPaul Mundt 		current_cpu_data.flags |= CPU_HAS_DSP;
2651da177e4SLinus Torvalds 
2660ea820cfSPaul Mundt 	/* Disable the DSP */
2670ea820cfSPaul Mundt 	if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
2680ea820cfSPaul Mundt 		printk("DSP Disabled\n");
2690ea820cfSPaul Mundt 		current_cpu_data.flags &= ~CPU_HAS_DSP;
2700ea820cfSPaul Mundt 	}
2710ea820cfSPaul Mundt 
2721da177e4SLinus Torvalds 	/* Now that we've determined the DSP status, clear the DSP bit. */
2731da177e4SLinus Torvalds 	release_dsp();
2741da177e4SLinus Torvalds }
2750ea820cfSPaul Mundt #else
dsp_init(void)2764603f53aSPaul Gortmaker static inline void dsp_init(void) { }
2771da177e4SLinus Torvalds #endif /* CONFIG_SH_DSP */
2781da177e4SLinus Torvalds 
2791da177e4SLinus Torvalds /**
2804a6feab0SPaul Mundt  * cpu_init
2811da177e4SLinus Torvalds  *
2827025bec9SPaul Mundt  * This is our initial entry point for each CPU, and is invoked on the
2837025bec9SPaul Mundt  * boot CPU prior to calling start_kernel(). For SMP, a combination of
2847025bec9SPaul Mundt  * this and start_secondary() will bring up each processor to a ready
2857025bec9SPaul Mundt  * state prior to hand forking the idle loop.
2861da177e4SLinus Torvalds  *
2877025bec9SPaul Mundt  * We do all of the basic processor init here, including setting up
2887025bec9SPaul Mundt  * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
2897025bec9SPaul Mundt  * subsequently platform_setup()) things like determining the CPU
2907025bec9SPaul Mundt  * subtype and initial configuration will all be done.
2911da177e4SLinus Torvalds  *
2921da177e4SLinus Torvalds  * Each processor family is still responsible for doing its own probing
293a9079ca0SPaul Mundt  * and cache configuration in cpu_probe().
2941da177e4SLinus Torvalds  */
cpu_init(void)2954603f53aSPaul Gortmaker asmlinkage void cpu_init(void)
2961da177e4SLinus Torvalds {
297aba1030aSPaul Mundt 	current_thread_info()->cpu = hard_smp_processor_id();
298aba1030aSPaul Mundt 
2991da177e4SLinus Torvalds 	/* First, probe the CPU */
300a9079ca0SPaul Mundt 	cpu_probe();
3011da177e4SLinus Torvalds 
302ffe1b4e9SPaul Mundt 	if (current_cpu_data.type == CPU_SH_NONE)
303ffe1b4e9SPaul Mundt 		panic("Unknown CPU");
304ffe1b4e9SPaul Mundt 
30527a511c6SPaul Mundt 	/* First setup the rest of the I-cache info */
30627a511c6SPaul Mundt 	current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
30727a511c6SPaul Mundt 				      current_cpu_data.icache.linesz;
30827a511c6SPaul Mundt 
30927a511c6SPaul Mundt 	current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
31027a511c6SPaul Mundt 				    current_cpu_data.icache.linesz;
31127a511c6SPaul Mundt 
31227a511c6SPaul Mundt 	/* And the D-cache too */
31327a511c6SPaul Mundt 	current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
31427a511c6SPaul Mundt 				      current_cpu_data.dcache.linesz;
31527a511c6SPaul Mundt 
31627a511c6SPaul Mundt 	current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
31727a511c6SPaul Mundt 				    current_cpu_data.dcache.linesz;
31827a511c6SPaul Mundt 
3191da177e4SLinus Torvalds 	/* Init the cache */
3201da177e4SLinus Torvalds 	cache_init();
3211da177e4SLinus Torvalds 
322cd01204bSPaul Mundt 	if (raw_smp_processor_id() == 0) {
32357155c65SRich Felker #ifdef CONFIG_MMU
324f3c25758SPaul Mundt 		shm_align_mask = max_t(unsigned long,
32511c19656SPaul Mundt 				       current_cpu_data.dcache.way_size - 1,
326f3c25758SPaul Mundt 				       PAGE_SIZE - 1);
32757155c65SRich Felker #else
32857155c65SRich Felker 		shm_align_mask = PAGE_SIZE - 1;
32957155c65SRich Felker #endif
330f3c25758SPaul Mundt 
331cd01204bSPaul Mundt 		/* Boot CPU sets the cache shape */
332cd01204bSPaul Mundt 		detect_cache_shape();
333cd01204bSPaul Mundt 	}
334cd01204bSPaul Mundt 
3350ea820cfSPaul Mundt 	fpu_init();
3360ea820cfSPaul Mundt 	dsp_init();
3371da177e4SLinus Torvalds 
338aec5e0e1SPaul Mundt 	/*
339aec5e0e1SPaul Mundt 	 * Initialize the per-CPU ASID cache very early, since the
340aec5e0e1SPaul Mundt 	 * TLB flushing routines depend on this being setup.
341aec5e0e1SPaul Mundt 	 */
342aec5e0e1SPaul Mundt 	current_cpu_data.asid_cache = NO_CONTEXT;
343aec5e0e1SPaul Mundt 
3442f98492cSPaul Mundt 	current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
3452f98492cSPaul Mundt 
34645ed285bSPaul Mundt 	speculative_execution_init();
3477dd6662aSPaul Mundt 	expmask_init();
3480ea820cfSPaul Mundt 
34949f3bfe9SPaul Mundt 	/* Do the rest of the boot processor setup */
35049f3bfe9SPaul Mundt 	if (raw_smp_processor_id() == 0) {
35149f3bfe9SPaul Mundt 		/* Save off the BIOS VBR, if there is one */
35249f3bfe9SPaul Mundt 		sh_bios_vbr_init();
35349f3bfe9SPaul Mundt 
3540ea820cfSPaul Mundt 		/*
35549f3bfe9SPaul Mundt 		 * Setup VBR for boot CPU. Secondary CPUs do this through
35649f3bfe9SPaul Mundt 		 * start_secondary().
3570ea820cfSPaul Mundt 		 */
35849f3bfe9SPaul Mundt 		per_cpu_trap_init();
35949f3bfe9SPaul Mundt 
36049f3bfe9SPaul Mundt 		/*
36149f3bfe9SPaul Mundt 		 * Boot processor to setup the FP and extended state
36249f3bfe9SPaul Mundt 		 * context info.
36349f3bfe9SPaul Mundt 		 */
3640ea820cfSPaul Mundt 		init_thread_xstate();
3651da177e4SLinus Torvalds 	}
36649f3bfe9SPaul Mundt }
367