xref: /openbmc/linux/arch/powerpc/kernel/paca.c (revision 7e3a68be)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
28882a4daSDavid Gibson /*
38882a4daSDavid Gibson  * c 2001 PPC 64 Team, IBM Corp
48882a4daSDavid Gibson  */
58882a4daSDavid Gibson 
62cd947f1SMilton Miller #include <linux/smp.h>
74b16f8e2SPaul Gortmaker #include <linux/export.h>
895f72d1eSYinghai Lu #include <linux/memblock.h>
99164bb4aSIngo Molnar #include <linux/sched/task.h>
1098fa15f3SAnshuman Khandual #include <linux/numa.h>
1165fddcfcSMike Rapoport #include <linux/pgtable.h>
128882a4daSDavid Gibson 
138882a4daSDavid Gibson #include <asm/lppaca.h>
148882a4daSDavid Gibson #include <asm/paca.h>
15549e8152SPaul Mackerras #include <asm/sections.h>
161fc711f7SMichael Neuling #include <asm/kexec.h>
17bd104e6dSAnshuman Khandual #include <asm/svm.h>
18bd104e6dSAnshuman Khandual #include <asm/ultravisor.h>
198882a4daSDavid Gibson 
201af19331SNicholas Piggin #include "setup.h"
211af19331SNicholas Piggin 
224890aea6SNicholas Piggin #ifndef CONFIG_SMP
234890aea6SNicholas Piggin #define boot_cpuid 0
244890aea6SNicholas Piggin #endif
254890aea6SNicholas Piggin 
alloc_paca_data(unsigned long size,unsigned long align,unsigned long limit,int cpu)264890aea6SNicholas Piggin static void *__init alloc_paca_data(unsigned long size, unsigned long align,
274890aea6SNicholas Piggin 				unsigned long limit, int cpu)
284890aea6SNicholas Piggin {
29f806714fSMike Rapoport 	void *ptr;
304890aea6SNicholas Piggin 	int nid;
3191c60b5bSBenjamin Herrenschmidt 
323356bb9fSDavid Gibson 	/*
334890aea6SNicholas Piggin 	 * boot_cpuid paca is allocated very early before cpu_to_node is up.
344890aea6SNicholas Piggin 	 * Set bottom-up mode, because the boot CPU should be on node-0,
354890aea6SNicholas Piggin 	 * which will put its paca in the right place.
363356bb9fSDavid Gibson 	 */
374890aea6SNicholas Piggin 	if (cpu == boot_cpuid) {
3898fa15f3SAnshuman Khandual 		nid = NUMA_NO_NODE;
394890aea6SNicholas Piggin 		memblock_set_bottom_up(true);
404890aea6SNicholas Piggin 	} else {
414890aea6SNicholas Piggin 		nid = early_cpu_to_node(cpu);
424890aea6SNicholas Piggin 	}
434890aea6SNicholas Piggin 
44f806714fSMike Rapoport 	ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
45f806714fSMike Rapoport 				     limit, nid);
46f806714fSMike Rapoport 	if (!ptr)
474890aea6SNicholas Piggin 		panic("cannot allocate paca data");
484890aea6SNicholas Piggin 
494890aea6SNicholas Piggin 	if (cpu == boot_cpuid)
504890aea6SNicholas Piggin 		memblock_set_bottom_up(false);
514890aea6SNicholas Piggin 
52f806714fSMike Rapoport 	return ptr;
534890aea6SNicholas Piggin }
544890aea6SNicholas Piggin 
558e0b634bSNicholas Piggin #ifdef CONFIG_PPC_PSERIES
568882a4daSDavid Gibson 
57e311a92dSThiago Jung Bauermann #define LPPACA_SIZE 0x400
58e311a92dSThiago Jung Bauermann 
alloc_shared_lppaca(unsigned long size,unsigned long limit,int cpu)59178748b6SSatheesh Rajendran static void *__init alloc_shared_lppaca(unsigned long size, unsigned long limit,
60178748b6SSatheesh Rajendran 					int cpu)
61bd104e6dSAnshuman Khandual {
62bd104e6dSAnshuman Khandual 	size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE);
63bd104e6dSAnshuman Khandual 	static unsigned long shared_lppaca_size;
64bd104e6dSAnshuman Khandual 	static void *shared_lppaca;
65bd104e6dSAnshuman Khandual 	void *ptr;
66bd104e6dSAnshuman Khandual 
67bd104e6dSAnshuman Khandual 	if (!shared_lppaca) {
68bd104e6dSAnshuman Khandual 		memblock_set_bottom_up(true);
69bd104e6dSAnshuman Khandual 
70178748b6SSatheesh Rajendran 		/*
71178748b6SSatheesh Rajendran 		 * See Documentation/powerpc/ultravisor.rst for more details.
72178748b6SSatheesh Rajendran 		 *
73178748b6SSatheesh Rajendran 		 * UV/HV data sharing is in PAGE_SIZE granularity. In order to
74178748b6SSatheesh Rajendran 		 * minimize the number of pages shared, align the allocation to
75178748b6SSatheesh Rajendran 		 * PAGE_SIZE.
76178748b6SSatheesh Rajendran 		 */
77bd104e6dSAnshuman Khandual 		shared_lppaca =
78bd104e6dSAnshuman Khandual 			memblock_alloc_try_nid(shared_lppaca_total_size,
79bd104e6dSAnshuman Khandual 					       PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
80bd104e6dSAnshuman Khandual 					       limit, NUMA_NO_NODE);
81bd104e6dSAnshuman Khandual 		if (!shared_lppaca)
82bd104e6dSAnshuman Khandual 			panic("cannot allocate shared data");
83bd104e6dSAnshuman Khandual 
84bd104e6dSAnshuman Khandual 		memblock_set_bottom_up(false);
85bd104e6dSAnshuman Khandual 		uv_share_page(PHYS_PFN(__pa(shared_lppaca)),
86bd104e6dSAnshuman Khandual 			      shared_lppaca_total_size >> PAGE_SHIFT);
87bd104e6dSAnshuman Khandual 	}
88bd104e6dSAnshuman Khandual 
89bd104e6dSAnshuman Khandual 	ptr = shared_lppaca + shared_lppaca_size;
90bd104e6dSAnshuman Khandual 	shared_lppaca_size += size;
91bd104e6dSAnshuman Khandual 
92bd104e6dSAnshuman Khandual 	/*
93bd104e6dSAnshuman Khandual 	 * This is very early in boot, so no harm done if the kernel crashes at
94bd104e6dSAnshuman Khandual 	 * this point.
95bd104e6dSAnshuman Khandual 	 */
96b710d27bSSatheesh Rajendran 	BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
97bd104e6dSAnshuman Khandual 
98bd104e6dSAnshuman Khandual 	return ptr;
99bd104e6dSAnshuman Khandual }
100bd104e6dSAnshuman Khandual 
1018882a4daSDavid Gibson /*
102499dcd41SNicholas Piggin  * See asm/lppaca.h for more detail.
103499dcd41SNicholas Piggin  *
104499dcd41SNicholas Piggin  * lppaca structures must must be 1kB in size, L1 cache line aligned,
105499dcd41SNicholas Piggin  * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy
106499dcd41SNicholas Piggin  * these requirements.
1073356bb9fSDavid Gibson  */
init_lppaca(struct lppaca * lppaca)108499dcd41SNicholas Piggin static inline void init_lppaca(struct lppaca *lppaca)
109499dcd41SNicholas Piggin {
110499dcd41SNicholas Piggin 	BUILD_BUG_ON(sizeof(struct lppaca) != 640);
111499dcd41SNicholas Piggin 
112499dcd41SNicholas Piggin 	*lppaca = (struct lppaca) {
1137ffcf8ecSAnton Blanchard 		.desc = cpu_to_be32(0xd397d781),	/* "LpPa" */
114e311a92dSThiago Jung Bauermann 		.size = cpu_to_be16(LPPACA_SIZE),
1153356bb9fSDavid Gibson 		.fpregs_in_use = 1,
1167ffcf8ecSAnton Blanchard 		.slb_count = cpu_to_be16(64),
1173356bb9fSDavid Gibson 		.vmxregs_in_use = 0,
118499dcd41SNicholas Piggin 		.page_ins = 0, };
1193356bb9fSDavid Gibson };
1203356bb9fSDavid Gibson 
new_lppaca(int cpu,unsigned long limit)121499dcd41SNicholas Piggin static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
12293c22703SPaul Mackerras {
12393c22703SPaul Mackerras 	struct lppaca *lp;
12493c22703SPaul Mackerras 
125e311a92dSThiago Jung Bauermann 	BUILD_BUG_ON(sizeof(struct lppaca) > LPPACA_SIZE);
12693c22703SPaul Mackerras 
1278e0b634bSNicholas Piggin 	if (early_cpu_has_feature(CPU_FTR_HVMODE))
1288e0b634bSNicholas Piggin 		return NULL;
1298e0b634bSNicholas Piggin 
130bd104e6dSAnshuman Khandual 	if (is_secure_guest())
131178748b6SSatheesh Rajendran 		lp = alloc_shared_lppaca(LPPACA_SIZE, limit, cpu);
132bd104e6dSAnshuman Khandual 	else
133e311a92dSThiago Jung Bauermann 		lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
134bd104e6dSAnshuman Khandual 
135499dcd41SNicholas Piggin 	init_lppaca(lp);
13693c22703SPaul Mackerras 
13793c22703SPaul Mackerras 	return lp;
13893c22703SPaul Mackerras }
139e311a92dSThiago Jung Bauermann #endif /* CONFIG_PPC_PSERIES */
14091c60b5bSBenjamin Herrenschmidt 
141387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
1422f6093c8SMichael Neuling /*
143384e8067SNicholas Piggin  * 3 persistent SLBs are allocated here.  The buffer will be zero
1442f6093c8SMichael Neuling  * initially, hence will all be invaild until we actually write them.
145d8d164a9SAlexander Graf  *
146d8d164a9SAlexander Graf  * If you make the number of persistent SLB entries dynamic, please also
147d8d164a9SAlexander Graf  * update PR KVM to flush and restore them accordingly.
1482f6093c8SMichael Neuling  */
new_slb_shadow(int cpu,unsigned long limit)149384e8067SNicholas Piggin static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
1506f4441efSJeremy Kerr {
151b68b1d74SNicholas Piggin 	struct slb_shadow *s;
152b68b1d74SNicholas Piggin 
153384e8067SNicholas Piggin 	if (cpu != boot_cpuid) {
154384e8067SNicholas Piggin 		/*
155384e8067SNicholas Piggin 		 * Boot CPU comes here before early_radix_enabled
156384e8067SNicholas Piggin 		 * is parsed (e.g., for disable_radix). So allocate
157384e8067SNicholas Piggin 		 * always and this will be fixed up in free_unused_pacas.
158384e8067SNicholas Piggin 		 */
159b68b1d74SNicholas Piggin 		if (early_radix_enabled())
160b68b1d74SNicholas Piggin 			return NULL;
161384e8067SNicholas Piggin 	}
162b68b1d74SNicholas Piggin 
1634890aea6SNicholas Piggin 	s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
1646f20e7f2SGavin Shan 
1656f4441efSJeremy Kerr 	s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
1666f4441efSJeremy Kerr 	s->buffer_length = cpu_to_be32(sizeof(*s));
1676f4441efSJeremy Kerr 
1686f4441efSJeremy Kerr 	return s;
1696f4441efSJeremy Kerr }
170387e220aSNicholas Piggin #endif /* CONFIG_PPC_64S_HASH_MMU */
17191c60b5bSBenjamin Herrenschmidt 
1728882a4daSDavid Gibson /* The Paca is an array with one entry per processor.  Each contains an
1738882a4daSDavid Gibson  * lppaca, which contains the information shared between the
1741888e7b5SDavid Gibson  * hypervisor and Linux.
1758882a4daSDavid Gibson  * On systems with hardware multi-threading, there are two threads
1768882a4daSDavid Gibson  * per processor.  The Paca array must contain an entry for each thread.
1778882a4daSDavid Gibson  * The VPD Areas will give a max logical processors = 2 * max physical
1788882a4daSDavid Gibson  * processors.  The processor VPD array needs one entry per physical
1798882a4daSDavid Gibson  * processor (not thread).
1808882a4daSDavid Gibson  */
181d2e60075SNicholas Piggin struct paca_struct **paca_ptrs __read_mostly;
182d2e60075SNicholas Piggin EXPORT_SYMBOL(paca_ptrs);
18390035fe3STony Breeds 
initialise_paca(struct paca_struct * new_paca,int cpu)184a7223f5bSArd Biesheuvel void __init initialise_paca(struct paca_struct *new_paca, int cpu)
1851426d5a3SMichael Ellerman {
1868e0b634bSNicholas Piggin #ifdef CONFIG_PPC_PSERIES
187499dcd41SNicholas Piggin 	new_paca->lppaca_ptr = NULL;
1888e0b634bSNicholas Piggin #endif
189e0d68273SChristophe Leroy #ifdef CONFIG_PPC_BOOK3E_64
190dce6670aSBenjamin Herrenschmidt 	new_paca->kernel_pgd = swapper_pg_dir;
19191c60b5bSBenjamin Herrenschmidt #endif
19290035fe3STony Breeds 	new_paca->lock_token = 0x8000;
19390035fe3STony Breeds 	new_paca->paca_index = cpu;
194*7e3a68beSNicholas Piggin #ifndef CONFIG_PPC_KERNEL_PCREL
195a5cab83cSMichael Ellerman 	new_paca->kernel_toc = kernel_toc_addr();
196*7e3a68beSNicholas Piggin #endif
197549e8152SPaul Mackerras 	new_paca->kernelbase = (unsigned long) _stext;
198a944a9c4SBenjamin Herrenschmidt 	/* Only set MSR:IR/DR when MMU is initialized */
199a944a9c4SBenjamin Herrenschmidt 	new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
20090035fe3STony Breeds 	new_paca->hw_cpu_id = 0xffff;
2011fc711f7SMichael Neuling 	new_paca->kexec_state = KEXEC_STATE_NONE;
20290035fe3STony Breeds 	new_paca->__current = &init_task;
203407821a3SMichael Ellerman 	new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
204387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
205384e8067SNicholas Piggin 	new_paca->slb_shadow_ptr = NULL;
2064e003747SMichael Ellerman #endif
20728efc35fSScott Wood 
208e0d68273SChristophe Leroy #ifdef CONFIG_PPC_BOOK3E_64
20928efc35fSScott Wood 	/* For now -- if we have threads this will be adjusted later */
21028efc35fSScott Wood 	new_paca->tcd_ptr = &new_paca->tcd;
21128efc35fSScott Wood #endif
21290035fe3STony Breeds }
2131426d5a3SMichael Ellerman 
214fc53b420SMatt Evans /* Put the paca pointer into r13 and SPRG_PACA */
setup_paca(struct paca_struct * new_paca)215a7223f5bSArd Biesheuvel void setup_paca(struct paca_struct *new_paca)
216fc53b420SMatt Evans {
2172dd60d79SBenjamin Herrenschmidt 	/* Setup r13 */
218fc53b420SMatt Evans 	local_paca = new_paca;
2192dd60d79SBenjamin Herrenschmidt 
220e0d68273SChristophe Leroy #ifdef CONFIG_PPC_BOOK3E_64
2212dd60d79SBenjamin Herrenschmidt 	/* On Book3E, initialize the TLB miss exception frames */
222fc53b420SMatt Evans 	mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
2232dd60d79SBenjamin Herrenschmidt #else
224d4a8e986SDaniel Axtens 	/*
225d4a8e986SDaniel Axtens 	 * In HV mode, we setup both HPACA and PACA to avoid problems
2262dd60d79SBenjamin Herrenschmidt 	 * if we do a GET_PACA() before the feature fixups have been
227d4a8e986SDaniel Axtens 	 * applied.
228d4a8e986SDaniel Axtens 	 *
229d4a8e986SDaniel Axtens 	 * Normally you should test against CPU_FTR_HVMODE, but CPU features
230d4a8e986SDaniel Axtens 	 * are not yet set up when we first reach here.
2312dd60d79SBenjamin Herrenschmidt 	 */
232d4a8e986SDaniel Axtens 	if (mfmsr() & MSR_HV)
2332dd60d79SBenjamin Herrenschmidt 		mtspr(SPRN_SPRG_HPACA, local_paca);
234fc53b420SMatt Evans #endif
2352dd60d79SBenjamin Herrenschmidt 	mtspr(SPRN_SPRG_PACA, local_paca);
2362dd60d79SBenjamin Herrenschmidt 
237fc53b420SMatt Evans }
238fc53b420SMatt Evans 
239d2e60075SNicholas Piggin static int __initdata paca_nr_cpu_ids;
240d2e60075SNicholas Piggin static int __initdata paca_ptrs_size;
24159f57774SNicholas Piggin static int __initdata paca_struct_size;
2421426d5a3SMichael Ellerman 
allocate_paca_ptrs(void)24359f57774SNicholas Piggin void __init allocate_paca_ptrs(void)
24459f57774SNicholas Piggin {
24559f57774SNicholas Piggin 	paca_nr_cpu_ids = nr_cpu_ids;
24659f57774SNicholas Piggin 
24759f57774SNicholas Piggin 	paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
2481269f7b8SChristophe Leroy 	paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES);
2491269f7b8SChristophe Leroy 	if (!paca_ptrs)
2501269f7b8SChristophe Leroy 		panic("Failed to allocate %d bytes for paca pointers\n",
2511269f7b8SChristophe Leroy 		      paca_ptrs_size);
2521269f7b8SChristophe Leroy 
25359f57774SNicholas Piggin 	memset(paca_ptrs, 0x88, paca_ptrs_size);
25459f57774SNicholas Piggin }
25559f57774SNicholas Piggin 
allocate_paca(int cpu)25659f57774SNicholas Piggin void __init allocate_paca(int cpu)
2571426d5a3SMichael Ellerman {
258ecc4999fSScott Wood 	u64 limit;
25959f57774SNicholas Piggin 	struct paca_struct *paca;
26059f57774SNicholas Piggin 
26159f57774SNicholas Piggin 	BUG_ON(cpu >= paca_nr_cpu_ids);
2621426d5a3SMichael Ellerman 
263ecc4999fSScott Wood #ifdef CONFIG_PPC_BOOK3S_64
2641426d5a3SMichael Ellerman 	/*
2651af19331SNicholas Piggin 	 * We access pacas in real mode, and cannot take SLB faults
2661af19331SNicholas Piggin 	 * on them when in virtual mode, so allocate them accordingly.
2671426d5a3SMichael Ellerman 	 */
2681af19331SNicholas Piggin 	limit = min(ppc64_bolted_size(), ppc64_rma_size);
2691af19331SNicholas Piggin #else
2701af19331SNicholas Piggin 	limit = ppc64_rma_size;
271ecc4999fSScott Wood #endif
2721426d5a3SMichael Ellerman 
2734890aea6SNicholas Piggin 	paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
2744890aea6SNicholas Piggin 				limit, cpu);
27559f57774SNicholas Piggin 	paca_ptrs[cpu] = paca;
2761426d5a3SMichael Ellerman 
277384e8067SNicholas Piggin 	initialise_paca(paca, cpu);
278499dcd41SNicholas Piggin #ifdef CONFIG_PPC_PSERIES
279384e8067SNicholas Piggin 	paca->lppaca_ptr = new_lppaca(cpu, limit);
280384e8067SNicholas Piggin #endif
281387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
282384e8067SNicholas Piggin 	paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
283499dcd41SNicholas Piggin #endif
28459f57774SNicholas Piggin 	paca_struct_size += sizeof(struct paca_struct);
2851426d5a3SMichael Ellerman }
2861426d5a3SMichael Ellerman 
free_unused_pacas(void)2871426d5a3SMichael Ellerman void __init free_unused_pacas(void)
2881426d5a3SMichael Ellerman {
289d2e60075SNicholas Piggin 	int new_ptrs_size;
2901426d5a3SMichael Ellerman 
291d2e60075SNicholas Piggin 	new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
29259f57774SNicholas Piggin 	if (new_ptrs_size < paca_ptrs_size)
2933ecc6834SMike Rapoport 		memblock_phys_free(__pa(paca_ptrs) + new_ptrs_size,
294d2e60075SNicholas Piggin 				   paca_ptrs_size - new_ptrs_size);
2951426d5a3SMichael Ellerman 
296d2e60075SNicholas Piggin 	paca_nr_cpu_ids = nr_cpu_ids;
297d2e60075SNicholas Piggin 	paca_ptrs_size = new_ptrs_size;
2981426d5a3SMichael Ellerman 
299387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
300384e8067SNicholas Piggin 	if (early_radix_enabled()) {
301384e8067SNicholas Piggin 		/* Ugly fixup, see new_slb_shadow() */
3023ecc6834SMike Rapoport 		memblock_phys_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
303384e8067SNicholas Piggin 				   sizeof(struct slb_shadow));
304384e8067SNicholas Piggin 		paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
305384e8067SNicholas Piggin 	}
306384e8067SNicholas Piggin #endif
3071426d5a3SMichael Ellerman 
30859f57774SNicholas Piggin 	printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
30959f57774SNicholas Piggin 			paca_ptrs_size + paca_struct_size, nr_cpu_ids);
31090035fe3STony Breeds }
31152b1e665SAneesh Kumar K.V 
312387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
copy_mm_to_paca(struct mm_struct * mm)31352b1e665SAneesh Kumar K.V void copy_mm_to_paca(struct mm_struct *mm)
31452b1e665SAneesh Kumar K.V {
31552b1e665SAneesh Kumar K.V 	mm_context_t *context = &mm->context;
31652b1e665SAneesh Kumar K.V 
31760458fbaSAneesh Kumar K.V 	VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
31860458fbaSAneesh Kumar K.V 	memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
31960458fbaSAneesh Kumar K.V 	       LOW_SLICE_ARRAY_SZ);
32060458fbaSAneesh Kumar K.V 	memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
32160458fbaSAneesh Kumar K.V 	       TASK_SLICE_ARRAY_SZ(context));
32252b1e665SAneesh Kumar K.V }
323387e220aSNicholas Piggin #endif /* CONFIG_PPC_64S_HASH_MMU */
324