12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
227e23b5fSChristophe Leroy /*
327e23b5fSChristophe Leroy  * This file contains the routines for handling the MMU on those
427e23b5fSChristophe Leroy  * PowerPC implementations where the MMU is not using the hash
527e23b5fSChristophe Leroy  * table, such as 8xx, 4xx, BookE's etc...
627e23b5fSChristophe Leroy  *
727e23b5fSChristophe Leroy  * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
827e23b5fSChristophe Leroy  *                IBM Corp.
927e23b5fSChristophe Leroy  *
1027e23b5fSChristophe Leroy  *  Derived from previous arch/powerpc/mm/mmu_context.c
1127e23b5fSChristophe Leroy  *  and arch/powerpc/include/asm/mmu_context.h
1227e23b5fSChristophe Leroy  *
1327e23b5fSChristophe Leroy  * TODO:
1427e23b5fSChristophe Leroy  *
1527e23b5fSChristophe Leroy  *   - The global context lock will not scale very well
1627e23b5fSChristophe Leroy  *   - The maps should be dynamically allocated to allow for processors
1727e23b5fSChristophe Leroy  *     that support more PID bits at runtime
1827e23b5fSChristophe Leroy  *   - Implement flush_tlb_mm() by making the context stale and picking
1927e23b5fSChristophe Leroy  *     a new one
2027e23b5fSChristophe Leroy  *   - More aggressively clear stale map bits and maybe find some way to
2127e23b5fSChristophe Leroy  *     also clear mm->cpu_vm_mask bits when processes are migrated
2227e23b5fSChristophe Leroy  */
2327e23b5fSChristophe Leroy 
2427e23b5fSChristophe Leroy #include <linux/kernel.h>
2527e23b5fSChristophe Leroy #include <linux/mm.h>
2627e23b5fSChristophe Leroy #include <linux/init.h>
2727e23b5fSChristophe Leroy #include <linux/spinlock.h>
2827e23b5fSChristophe Leroy #include <linux/memblock.h>
2927e23b5fSChristophe Leroy #include <linux/notifier.h>
3027e23b5fSChristophe Leroy #include <linux/cpu.h>
3127e23b5fSChristophe Leroy #include <linux/slab.h>
3227e23b5fSChristophe Leroy 
3327e23b5fSChristophe Leroy #include <asm/mmu_context.h>
3427e23b5fSChristophe Leroy #include <asm/tlbflush.h>
35c13066e5SChristophe Leroy #include <asm/smp.h>
36*43afcf8fSChristophe Leroy #include <asm/kup.h>
3727e23b5fSChristophe Leroy 
3827e23b5fSChristophe Leroy #include <mm/mmu_decl.h>
3927e23b5fSChristophe Leroy 
4027e23b5fSChristophe Leroy /*
4125910260SChristophe Leroy  * Room for two PTE table pointers, usually the kernel and current user
4225910260SChristophe Leroy  * pointer to their respective root page table (pgdir).
4325910260SChristophe Leroy  */
4425910260SChristophe Leroy void *abatron_pteptrs[2];
4525910260SChristophe Leroy 
4625910260SChristophe Leroy /*
4727e23b5fSChristophe Leroy  * The MPC8xx has only 16 contexts. We rotate through them on each task switch.
4827e23b5fSChristophe Leroy  * A better way would be to keep track of tasks that own contexts, and implement
4927e23b5fSChristophe Leroy  * an LRU usage. That way very active tasks don't always have to pay the TLB
5027e23b5fSChristophe Leroy  * reload overhead. The kernel pages are mapped shared, so the kernel can run on
5127e23b5fSChristophe Leroy  * behalf of any task that makes a kernel entry. Shared does not mean they are
5227e23b5fSChristophe Leroy  * not protected, just that the ASID comparison is not performed. -- Dan
5327e23b5fSChristophe Leroy  *
5427e23b5fSChristophe Leroy  * The IBM4xx has 256 contexts, so we can just rotate through these as a way of
5527e23b5fSChristophe Leroy  * "switching" contexts. If the TID of the TLB is zero, the PID/TID comparison
5627e23b5fSChristophe Leroy  * is disabled, so we can use a TID of zero to represent all kernel pages as
5727e23b5fSChristophe Leroy  * shared among all contexts. -- Dan
5827e23b5fSChristophe Leroy  *
5927e23b5fSChristophe Leroy  * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We should
6027e23b5fSChristophe Leroy  * normally never have to steal though the facility is present if needed.
6127e23b5fSChristophe Leroy  * -- BenH
6227e23b5fSChristophe Leroy  */
6327e23b5fSChristophe Leroy #define FIRST_CONTEXT 1
64a36c0fafSChristophe Leroy #if defined(CONFIG_PPC_8xx)
6527e23b5fSChristophe Leroy #define LAST_CONTEXT 16
6627e23b5fSChristophe Leroy #elif defined(CONFIG_PPC_47x)
6727e23b5fSChristophe Leroy #define LAST_CONTEXT 65535
6827e23b5fSChristophe Leroy #else
6927e23b5fSChristophe Leroy #define LAST_CONTEXT 255
7027e23b5fSChristophe Leroy #endif
7127e23b5fSChristophe Leroy 
7227e23b5fSChristophe Leroy static unsigned int next_context, nr_free_contexts;
7327e23b5fSChristophe Leroy static unsigned long *context_map;
7427e23b5fSChristophe Leroy static unsigned long *stale_map[NR_CPUS];
7527e23b5fSChristophe Leroy static struct mm_struct **context_mm;
7627e23b5fSChristophe Leroy static DEFINE_RAW_SPINLOCK(context_lock);
7727e23b5fSChristophe Leroy 
7827e23b5fSChristophe Leroy #define CTX_MAP_SIZE	\
7927e23b5fSChristophe Leroy 	(sizeof(unsigned long) * (LAST_CONTEXT / BITS_PER_LONG + 1))
8027e23b5fSChristophe Leroy 
8127e23b5fSChristophe Leroy 
8227e23b5fSChristophe Leroy /* Steal a context from a task that has one at the moment.
8327e23b5fSChristophe Leroy  *
8427e23b5fSChristophe Leroy  * This is used when we are running out of available PID numbers
8527e23b5fSChristophe Leroy  * on the processors.
8627e23b5fSChristophe Leroy  *
8727e23b5fSChristophe Leroy  * This isn't an LRU system, it just frees up each context in
8827e23b5fSChristophe Leroy  * turn (sort-of pseudo-random replacement :).  This would be the
8927e23b5fSChristophe Leroy  * place to implement an LRU scheme if anyone was motivated to do it.
9027e23b5fSChristophe Leroy  *  -- paulus
9127e23b5fSChristophe Leroy  *
9227e23b5fSChristophe Leroy  * For context stealing, we use a slightly different approach for
9327e23b5fSChristophe Leroy  * SMP and UP. Basically, the UP one is simpler and doesn't use
9427e23b5fSChristophe Leroy  * the stale map as we can just flush the local CPU
9527e23b5fSChristophe Leroy  *  -- benh
9627e23b5fSChristophe Leroy  */
steal_context_smp(unsigned int id)9727e23b5fSChristophe Leroy static unsigned int steal_context_smp(unsigned int id)
9827e23b5fSChristophe Leroy {
9927e23b5fSChristophe Leroy 	struct mm_struct *mm;
10027e23b5fSChristophe Leroy 	unsigned int cpu, max, i;
10127e23b5fSChristophe Leroy 
10227e23b5fSChristophe Leroy 	max = LAST_CONTEXT - FIRST_CONTEXT;
10327e23b5fSChristophe Leroy 
10427e23b5fSChristophe Leroy 	/* Attempt to free next_context first and then loop until we manage */
10527e23b5fSChristophe Leroy 	while (max--) {
10627e23b5fSChristophe Leroy 		/* Pick up the victim mm */
10727e23b5fSChristophe Leroy 		mm = context_mm[id];
10827e23b5fSChristophe Leroy 
10927e23b5fSChristophe Leroy 		/* We have a candidate victim, check if it's active, on SMP
11027e23b5fSChristophe Leroy 		 * we cannot steal active contexts
11127e23b5fSChristophe Leroy 		 */
11227e23b5fSChristophe Leroy 		if (mm->context.active) {
11327e23b5fSChristophe Leroy 			id++;
11427e23b5fSChristophe Leroy 			if (id > LAST_CONTEXT)
11527e23b5fSChristophe Leroy 				id = FIRST_CONTEXT;
11627e23b5fSChristophe Leroy 			continue;
11727e23b5fSChristophe Leroy 		}
11827e23b5fSChristophe Leroy 
11927e23b5fSChristophe Leroy 		/* Mark this mm has having no context anymore */
12027e23b5fSChristophe Leroy 		mm->context.id = MMU_NO_CONTEXT;
12127e23b5fSChristophe Leroy 
12227e23b5fSChristophe Leroy 		/* Mark it stale on all CPUs that used this mm. For threaded
12327e23b5fSChristophe Leroy 		 * implementations, we set it on all threads on each core
12427e23b5fSChristophe Leroy 		 * represented in the mask. A future implementation will use
12527e23b5fSChristophe Leroy 		 * a core map instead but this will do for now.
12627e23b5fSChristophe Leroy 		 */
12727e23b5fSChristophe Leroy 		for_each_cpu(cpu, mm_cpumask(mm)) {
12827e23b5fSChristophe Leroy 			for (i = cpu_first_thread_sibling(cpu);
12927e23b5fSChristophe Leroy 			     i <= cpu_last_thread_sibling(cpu); i++) {
13027e23b5fSChristophe Leroy 				if (stale_map[i])
13127e23b5fSChristophe Leroy 					__set_bit(id, stale_map[i]);
13227e23b5fSChristophe Leroy 			}
13327e23b5fSChristophe Leroy 			cpu = i - 1;
13427e23b5fSChristophe Leroy 		}
13527e23b5fSChristophe Leroy 		return id;
13627e23b5fSChristophe Leroy 	}
13727e23b5fSChristophe Leroy 
13827e23b5fSChristophe Leroy 	/* This will happen if you have more CPUs than available contexts,
13927e23b5fSChristophe Leroy 	 * all we can do here is wait a bit and try again
14027e23b5fSChristophe Leroy 	 */
14127e23b5fSChristophe Leroy 	raw_spin_unlock(&context_lock);
14227e23b5fSChristophe Leroy 	cpu_relax();
14327e23b5fSChristophe Leroy 	raw_spin_lock(&context_lock);
14427e23b5fSChristophe Leroy 
14527e23b5fSChristophe Leroy 	/* This will cause the caller to try again */
14627e23b5fSChristophe Leroy 	return MMU_NO_CONTEXT;
14727e23b5fSChristophe Leroy }
14827e23b5fSChristophe Leroy 
steal_all_contexts(void)14927e23b5fSChristophe Leroy static unsigned int steal_all_contexts(void)
15027e23b5fSChristophe Leroy {
15127e23b5fSChristophe Leroy 	struct mm_struct *mm;
15227e23b5fSChristophe Leroy 	int cpu = smp_processor_id();
15327e23b5fSChristophe Leroy 	unsigned int id;
15427e23b5fSChristophe Leroy 
15527e23b5fSChristophe Leroy 	for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
15627e23b5fSChristophe Leroy 		/* Pick up the victim mm */
15727e23b5fSChristophe Leroy 		mm = context_mm[id];
15827e23b5fSChristophe Leroy 
15927e23b5fSChristophe Leroy 		/* Mark this mm as having no context anymore */
16027e23b5fSChristophe Leroy 		mm->context.id = MMU_NO_CONTEXT;
16127e23b5fSChristophe Leroy 		if (id != FIRST_CONTEXT) {
16227e23b5fSChristophe Leroy 			context_mm[id] = NULL;
16327e23b5fSChristophe Leroy 			__clear_bit(id, context_map);
16427e23b5fSChristophe Leroy 		}
165c13066e5SChristophe Leroy 		if (IS_ENABLED(CONFIG_SMP))
16627e23b5fSChristophe Leroy 			__clear_bit(id, stale_map[cpu]);
16727e23b5fSChristophe Leroy 	}
16827e23b5fSChristophe Leroy 
16927e23b5fSChristophe Leroy 	/* Flush the TLB for all contexts (not to be used on SMP) */
17027e23b5fSChristophe Leroy 	_tlbil_all();
17127e23b5fSChristophe Leroy 
17227e23b5fSChristophe Leroy 	nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT;
17327e23b5fSChristophe Leroy 
17427e23b5fSChristophe Leroy 	return FIRST_CONTEXT;
17527e23b5fSChristophe Leroy }
17627e23b5fSChristophe Leroy 
17727e23b5fSChristophe Leroy /* Note that this will also be called on SMP if all other CPUs are
17827e23b5fSChristophe Leroy  * offlined, which means that it may be called for cpu != 0. For
17927e23b5fSChristophe Leroy  * this to work, we somewhat assume that CPUs that are onlined
18027e23b5fSChristophe Leroy  * come up with a fully clean TLB (or are cleaned when offlined)
18127e23b5fSChristophe Leroy  */
steal_context_up(unsigned int id)18227e23b5fSChristophe Leroy static unsigned int steal_context_up(unsigned int id)
18327e23b5fSChristophe Leroy {
18427e23b5fSChristophe Leroy 	struct mm_struct *mm;
18527e23b5fSChristophe Leroy 	int cpu = smp_processor_id();
18627e23b5fSChristophe Leroy 
18727e23b5fSChristophe Leroy 	/* Pick up the victim mm */
18827e23b5fSChristophe Leroy 	mm = context_mm[id];
18927e23b5fSChristophe Leroy 
19027e23b5fSChristophe Leroy 	/* Flush the TLB for that context */
19127e23b5fSChristophe Leroy 	local_flush_tlb_mm(mm);
19227e23b5fSChristophe Leroy 
19327e23b5fSChristophe Leroy 	/* Mark this mm has having no context anymore */
19427e23b5fSChristophe Leroy 	mm->context.id = MMU_NO_CONTEXT;
19527e23b5fSChristophe Leroy 
19627e23b5fSChristophe Leroy 	/* XXX This clear should ultimately be part of local_flush_tlb_mm */
197c13066e5SChristophe Leroy 	if (IS_ENABLED(CONFIG_SMP))
19827e23b5fSChristophe Leroy 		__clear_bit(id, stale_map[cpu]);
19927e23b5fSChristophe Leroy 
20027e23b5fSChristophe Leroy 	return id;
20127e23b5fSChristophe Leroy }
20227e23b5fSChristophe Leroy 
set_context(unsigned long id,pgd_t * pgd)203a56ab7c7SChristophe Leroy static void set_context(unsigned long id, pgd_t *pgd)
204a56ab7c7SChristophe Leroy {
205a56ab7c7SChristophe Leroy 	if (IS_ENABLED(CONFIG_PPC_8xx)) {
206a56ab7c7SChristophe Leroy 		s16 offset = (s16)(__pa(swapper_pg_dir));
207a56ab7c7SChristophe Leroy 
208a56ab7c7SChristophe Leroy 		/*
209a56ab7c7SChristophe Leroy 		 * Register M_TWB will contain base address of level 1 table minus the
210a56ab7c7SChristophe Leroy 		 * lower part of the kernel PGDIR base address, so that all accesses to
211a56ab7c7SChristophe Leroy 		 * level 1 table are done relative to lower part of kernel PGDIR base
212a56ab7c7SChristophe Leroy 		 * address.
213a56ab7c7SChristophe Leroy 		 */
214a56ab7c7SChristophe Leroy 		mtspr(SPRN_M_TWB, __pa(pgd) - offset);
215a56ab7c7SChristophe Leroy 
216a56ab7c7SChristophe Leroy 		/* Update context */
217a56ab7c7SChristophe Leroy 		mtspr(SPRN_M_CASID, id - 1);
218a56ab7c7SChristophe Leroy 
219a56ab7c7SChristophe Leroy 		/* sync */
220a56ab7c7SChristophe Leroy 		mb();
221*43afcf8fSChristophe Leroy 	} else if (kuap_is_disabled()) {
222a56ab7c7SChristophe Leroy 		if (IS_ENABLED(CONFIG_40x))
223a56ab7c7SChristophe Leroy 			mb();	/* sync */
224a56ab7c7SChristophe Leroy 
225a56ab7c7SChristophe Leroy 		mtspr(SPRN_PID, id);
226a56ab7c7SChristophe Leroy 		isync();
227a56ab7c7SChristophe Leroy 	}
228a56ab7c7SChristophe Leroy }
229a56ab7c7SChristophe Leroy 
switch_mmu_context(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)23027e23b5fSChristophe Leroy void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
23127e23b5fSChristophe Leroy 			struct task_struct *tsk)
23227e23b5fSChristophe Leroy {
23327e23b5fSChristophe Leroy 	unsigned int id;
23427e23b5fSChristophe Leroy 	unsigned int i, cpu = smp_processor_id();
23527e23b5fSChristophe Leroy 	unsigned long *map;
23627e23b5fSChristophe Leroy 
23727e23b5fSChristophe Leroy 	/* No lockless fast path .. yet */
23827e23b5fSChristophe Leroy 	raw_spin_lock(&context_lock);
23927e23b5fSChristophe Leroy 
240c13066e5SChristophe Leroy 	if (IS_ENABLED(CONFIG_SMP)) {
24127e23b5fSChristophe Leroy 		/* Mark us active and the previous one not anymore */
24227e23b5fSChristophe Leroy 		next->context.active++;
24327e23b5fSChristophe Leroy 		if (prev) {
24427e23b5fSChristophe Leroy 			WARN_ON(prev->context.active < 1);
24527e23b5fSChristophe Leroy 			prev->context.active--;
24627e23b5fSChristophe Leroy 		}
247c13066e5SChristophe Leroy 	}
24827e23b5fSChristophe Leroy 
24927e23b5fSChristophe Leroy  again:
25027e23b5fSChristophe Leroy 
25127e23b5fSChristophe Leroy 	/* If we already have a valid assigned context, skip all that */
25227e23b5fSChristophe Leroy 	id = next->context.id;
253dac3db1eSChristophe Leroy 	if (likely(id != MMU_NO_CONTEXT))
25427e23b5fSChristophe Leroy 		goto ctxt_ok;
25527e23b5fSChristophe Leroy 
25627e23b5fSChristophe Leroy 	/* We really don't have a context, let's try to acquire one */
25727e23b5fSChristophe Leroy 	id = next_context;
25827e23b5fSChristophe Leroy 	if (id > LAST_CONTEXT)
25927e23b5fSChristophe Leroy 		id = FIRST_CONTEXT;
26027e23b5fSChristophe Leroy 	map = context_map;
26127e23b5fSChristophe Leroy 
26227e23b5fSChristophe Leroy 	/* No more free contexts, let's try to steal one */
26327e23b5fSChristophe Leroy 	if (nr_free_contexts == 0) {
26427e23b5fSChristophe Leroy 		if (num_online_cpus() > 1) {
26527e23b5fSChristophe Leroy 			id = steal_context_smp(id);
26627e23b5fSChristophe Leroy 			if (id == MMU_NO_CONTEXT)
26727e23b5fSChristophe Leroy 				goto again;
26827e23b5fSChristophe Leroy 			goto stolen;
26927e23b5fSChristophe Leroy 		}
27027e23b5fSChristophe Leroy 		if (IS_ENABLED(CONFIG_PPC_8xx))
27127e23b5fSChristophe Leroy 			id = steal_all_contexts();
27227e23b5fSChristophe Leroy 		else
27327e23b5fSChristophe Leroy 			id = steal_context_up(id);
27427e23b5fSChristophe Leroy 		goto stolen;
27527e23b5fSChristophe Leroy 	}
27627e23b5fSChristophe Leroy 	nr_free_contexts--;
27727e23b5fSChristophe Leroy 
27827e23b5fSChristophe Leroy 	/* We know there's at least one free context, try to find it */
27927e23b5fSChristophe Leroy 	while (__test_and_set_bit(id, map)) {
28027e23b5fSChristophe Leroy 		id = find_next_zero_bit(map, LAST_CONTEXT+1, id);
28127e23b5fSChristophe Leroy 		if (id > LAST_CONTEXT)
28227e23b5fSChristophe Leroy 			id = FIRST_CONTEXT;
28327e23b5fSChristophe Leroy 	}
28427e23b5fSChristophe Leroy  stolen:
28527e23b5fSChristophe Leroy 	next_context = id + 1;
28627e23b5fSChristophe Leroy 	context_mm[id] = next;
28727e23b5fSChristophe Leroy 	next->context.id = id;
28827e23b5fSChristophe Leroy 
28927e23b5fSChristophe Leroy  ctxt_ok:
29027e23b5fSChristophe Leroy 
29127e23b5fSChristophe Leroy 	/* If that context got marked stale on this CPU, then flush the
29227e23b5fSChristophe Leroy 	 * local TLB for it and unmark it before we use it
29327e23b5fSChristophe Leroy 	 */
294c13066e5SChristophe Leroy 	if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) {
29527e23b5fSChristophe Leroy 		local_flush_tlb_mm(next);
29627e23b5fSChristophe Leroy 
29727e23b5fSChristophe Leroy 		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
29827e23b5fSChristophe Leroy 		for (i = cpu_first_thread_sibling(cpu);
29927e23b5fSChristophe Leroy 		     i <= cpu_last_thread_sibling(cpu); i++) {
30027e23b5fSChristophe Leroy 			if (stale_map[i])
30127e23b5fSChristophe Leroy 				__clear_bit(id, stale_map[i]);
30227e23b5fSChristophe Leroy 		}
30327e23b5fSChristophe Leroy 	}
30427e23b5fSChristophe Leroy 
30527e23b5fSChristophe Leroy 	/* Flick the MMU and release lock */
30625910260SChristophe Leroy 	if (IS_ENABLED(CONFIG_BDI_SWITCH))
30725910260SChristophe Leroy 		abatron_pteptrs[1] = next->pgd;
30827e23b5fSChristophe Leroy 	set_context(id, next->pgd);
309*43afcf8fSChristophe Leroy #if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
310*43afcf8fSChristophe Leroy 	tsk->thread.pid = id;
311*43afcf8fSChristophe Leroy #endif
31227e23b5fSChristophe Leroy 	raw_spin_unlock(&context_lock);
31327e23b5fSChristophe Leroy }
31427e23b5fSChristophe Leroy 
31527e23b5fSChristophe Leroy /*
31627e23b5fSChristophe Leroy  * Set up the context for a new address space.
31727e23b5fSChristophe Leroy  */
init_new_context(struct task_struct * t,struct mm_struct * mm)31827e23b5fSChristophe Leroy int init_new_context(struct task_struct *t, struct mm_struct *mm)
31927e23b5fSChristophe Leroy {
32027e23b5fSChristophe Leroy 	mm->context.id = MMU_NO_CONTEXT;
32127e23b5fSChristophe Leroy 	mm->context.active = 0;
32227e23b5fSChristophe Leroy 	pte_frag_set(&mm->context, NULL);
32327e23b5fSChristophe Leroy 	return 0;
32427e23b5fSChristophe Leroy }
32527e23b5fSChristophe Leroy 
32627e23b5fSChristophe Leroy /*
32727e23b5fSChristophe Leroy  * We're finished using the context for an address space.
32827e23b5fSChristophe Leroy  */
destroy_context(struct mm_struct * mm)32927e23b5fSChristophe Leroy void destroy_context(struct mm_struct *mm)
33027e23b5fSChristophe Leroy {
33127e23b5fSChristophe Leroy 	unsigned long flags;
33227e23b5fSChristophe Leroy 	unsigned int id;
33327e23b5fSChristophe Leroy 
33427e23b5fSChristophe Leroy 	if (mm->context.id == MMU_NO_CONTEXT)
33527e23b5fSChristophe Leroy 		return;
33627e23b5fSChristophe Leroy 
33727e23b5fSChristophe Leroy 	WARN_ON(mm->context.active != 0);
33827e23b5fSChristophe Leroy 
33927e23b5fSChristophe Leroy 	raw_spin_lock_irqsave(&context_lock, flags);
34027e23b5fSChristophe Leroy 	id = mm->context.id;
34127e23b5fSChristophe Leroy 	if (id != MMU_NO_CONTEXT) {
34227e23b5fSChristophe Leroy 		__clear_bit(id, context_map);
34327e23b5fSChristophe Leroy 		mm->context.id = MMU_NO_CONTEXT;
34427e23b5fSChristophe Leroy 		context_mm[id] = NULL;
34527e23b5fSChristophe Leroy 		nr_free_contexts++;
34627e23b5fSChristophe Leroy 	}
34727e23b5fSChristophe Leroy 	raw_spin_unlock_irqrestore(&context_lock, flags);
34827e23b5fSChristophe Leroy }
34927e23b5fSChristophe Leroy 
mmu_ctx_cpu_prepare(unsigned int cpu)35027e23b5fSChristophe Leroy static int mmu_ctx_cpu_prepare(unsigned int cpu)
35127e23b5fSChristophe Leroy {
35227e23b5fSChristophe Leroy 	/* We don't touch CPU 0 map, it's allocated at aboot and kept
35327e23b5fSChristophe Leroy 	 * around forever
35427e23b5fSChristophe Leroy 	 */
35527e23b5fSChristophe Leroy 	if (cpu == boot_cpuid)
35627e23b5fSChristophe Leroy 		return 0;
35727e23b5fSChristophe Leroy 
35827e23b5fSChristophe Leroy 	stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
35927e23b5fSChristophe Leroy 	return 0;
36027e23b5fSChristophe Leroy }
36127e23b5fSChristophe Leroy 
mmu_ctx_cpu_dead(unsigned int cpu)36227e23b5fSChristophe Leroy static int mmu_ctx_cpu_dead(unsigned int cpu)
36327e23b5fSChristophe Leroy {
36427e23b5fSChristophe Leroy #ifdef CONFIG_HOTPLUG_CPU
36527e23b5fSChristophe Leroy 	if (cpu == boot_cpuid)
36627e23b5fSChristophe Leroy 		return 0;
36727e23b5fSChristophe Leroy 
36827e23b5fSChristophe Leroy 	kfree(stale_map[cpu]);
36927e23b5fSChristophe Leroy 	stale_map[cpu] = NULL;
37027e23b5fSChristophe Leroy 
37127e23b5fSChristophe Leroy 	/* We also clear the cpu_vm_mask bits of CPUs going away */
37227e23b5fSChristophe Leroy 	clear_tasks_mm_cpumask(cpu);
37327e23b5fSChristophe Leroy #endif
37427e23b5fSChristophe Leroy 	return 0;
37527e23b5fSChristophe Leroy }
37627e23b5fSChristophe Leroy 
37727e23b5fSChristophe Leroy /*
37827e23b5fSChristophe Leroy  * Initialize the context management stuff.
37927e23b5fSChristophe Leroy  */
mmu_context_init(void)38027e23b5fSChristophe Leroy void __init mmu_context_init(void)
38127e23b5fSChristophe Leroy {
38227e23b5fSChristophe Leroy 	/* Mark init_mm as being active on all possible CPUs since
38327e23b5fSChristophe Leroy 	 * we'll get called with prev == init_mm the first time
38427e23b5fSChristophe Leroy 	 * we schedule on a given CPU
38527e23b5fSChristophe Leroy 	 */
38627e23b5fSChristophe Leroy 	init_mm.context.active = NR_CPUS;
38727e23b5fSChristophe Leroy 
38827e23b5fSChristophe Leroy 	/*
38927e23b5fSChristophe Leroy 	 * Allocate the maps used by context management
39027e23b5fSChristophe Leroy 	 */
39127e23b5fSChristophe Leroy 	context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
39227e23b5fSChristophe Leroy 	if (!context_map)
39327e23b5fSChristophe Leroy 		panic("%s: Failed to allocate %zu bytes\n", __func__,
39427e23b5fSChristophe Leroy 		      CTX_MAP_SIZE);
39527e23b5fSChristophe Leroy 	context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
39627e23b5fSChristophe Leroy 				    SMP_CACHE_BYTES);
39727e23b5fSChristophe Leroy 	if (!context_mm)
39827e23b5fSChristophe Leroy 		panic("%s: Failed to allocate %zu bytes\n", __func__,
39927e23b5fSChristophe Leroy 		      sizeof(void *) * (LAST_CONTEXT + 1));
400c13066e5SChristophe Leroy 	if (IS_ENABLED(CONFIG_SMP)) {
40127e23b5fSChristophe Leroy 		stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
40227e23b5fSChristophe Leroy 		if (!stale_map[boot_cpuid])
40327e23b5fSChristophe Leroy 			panic("%s: Failed to allocate %zu bytes\n", __func__,
40427e23b5fSChristophe Leroy 			      CTX_MAP_SIZE);
40527e23b5fSChristophe Leroy 
40627e23b5fSChristophe Leroy 		cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
40727e23b5fSChristophe Leroy 					  "powerpc/mmu/ctx:prepare",
40827e23b5fSChristophe Leroy 					  mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
409c13066e5SChristophe Leroy 	}
41027e23b5fSChristophe Leroy 
41127e23b5fSChristophe Leroy 	printk(KERN_INFO
41227e23b5fSChristophe Leroy 	       "MMU: Allocated %zu bytes of context maps for %d contexts\n",
41327e23b5fSChristophe Leroy 	       2 * CTX_MAP_SIZE + (sizeof(void *) * (LAST_CONTEXT + 1)),
41427e23b5fSChristophe Leroy 	       LAST_CONTEXT - FIRST_CONTEXT + 1);
41527e23b5fSChristophe Leroy 
41627e23b5fSChristophe Leroy 	/*
41727e23b5fSChristophe Leroy 	 * Some processors have too few contexts to reserve one for
41827e23b5fSChristophe Leroy 	 * init_mm, and require using context 0 for a normal task.
41927e23b5fSChristophe Leroy 	 * Other processors reserve the use of context zero for the kernel.
42027e23b5fSChristophe Leroy 	 * This code assumes FIRST_CONTEXT < 32.
42127e23b5fSChristophe Leroy 	 */
42227e23b5fSChristophe Leroy 	context_map[0] = (1 << FIRST_CONTEXT) - 1;
42327e23b5fSChristophe Leroy 	next_context = FIRST_CONTEXT;
42427e23b5fSChristophe Leroy 	nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;
42527e23b5fSChristophe Leroy }
426