12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
227e23b5fSChristophe Leroy /*
327e23b5fSChristophe Leroy  * This file contains the routines for handling the MMU on those
427e23b5fSChristophe Leroy  * PowerPC implementations where the MMU is not using the hash
527e23b5fSChristophe Leroy  * table, such as 8xx, 4xx, BookE's etc...
627e23b5fSChristophe Leroy  *
727e23b5fSChristophe Leroy  * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
827e23b5fSChristophe Leroy  *                IBM Corp.
927e23b5fSChristophe Leroy  *
1027e23b5fSChristophe Leroy  *  Derived from previous arch/powerpc/mm/mmu_context.c
1127e23b5fSChristophe Leroy  *  and arch/powerpc/include/asm/mmu_context.h
1227e23b5fSChristophe Leroy  *
1327e23b5fSChristophe Leroy  * TODO:
1427e23b5fSChristophe Leroy  *
1527e23b5fSChristophe Leroy  *   - The global context lock will not scale very well
1627e23b5fSChristophe Leroy  *   - The maps should be dynamically allocated to allow for processors
1727e23b5fSChristophe Leroy  *     that support more PID bits at runtime
1827e23b5fSChristophe Leroy  *   - Implement flush_tlb_mm() by making the context stale and picking
1927e23b5fSChristophe Leroy  *     a new one
2027e23b5fSChristophe Leroy  *   - More aggressively clear stale map bits and maybe find some way to
2127e23b5fSChristophe Leroy  *     also clear mm->cpu_vm_mask bits when processes are migrated
2227e23b5fSChristophe Leroy  */
2327e23b5fSChristophe Leroy 
2427e23b5fSChristophe Leroy //#define DEBUG_HARDER
2527e23b5fSChristophe Leroy 
2627e23b5fSChristophe Leroy /* We don't use DEBUG because it tends to be compiled in always nowadays
2727e23b5fSChristophe Leroy  * and this would generate way too much output
2827e23b5fSChristophe Leroy  */
2927e23b5fSChristophe Leroy #ifdef DEBUG_HARDER
3027e23b5fSChristophe Leroy #define pr_hard(args...)	printk(KERN_DEBUG args)
3127e23b5fSChristophe Leroy #define pr_hardcont(args...)	printk(KERN_CONT args)
3227e23b5fSChristophe Leroy #else
3327e23b5fSChristophe Leroy #define pr_hard(args...)	do { } while(0)
3427e23b5fSChristophe Leroy #define pr_hardcont(args...)	do { } while(0)
3527e23b5fSChristophe Leroy #endif
3627e23b5fSChristophe Leroy 
3727e23b5fSChristophe Leroy #include <linux/kernel.h>
3827e23b5fSChristophe Leroy #include <linux/mm.h>
3927e23b5fSChristophe Leroy #include <linux/init.h>
4027e23b5fSChristophe Leroy #include <linux/spinlock.h>
4127e23b5fSChristophe Leroy #include <linux/memblock.h>
4227e23b5fSChristophe Leroy #include <linux/notifier.h>
4327e23b5fSChristophe Leroy #include <linux/cpu.h>
4427e23b5fSChristophe Leroy #include <linux/slab.h>
4527e23b5fSChristophe Leroy 
4627e23b5fSChristophe Leroy #include <asm/mmu_context.h>
4727e23b5fSChristophe Leroy #include <asm/tlbflush.h>
48c13066e5SChristophe Leroy #include <asm/smp.h>
4927e23b5fSChristophe Leroy 
5027e23b5fSChristophe Leroy #include <mm/mmu_decl.h>
5127e23b5fSChristophe Leroy 
5227e23b5fSChristophe Leroy /*
5325910260SChristophe Leroy  * Room for two PTE table pointers, usually the kernel and current user
5425910260SChristophe Leroy  * pointer to their respective root page table (pgdir).
5525910260SChristophe Leroy  */
5625910260SChristophe Leroy void *abatron_pteptrs[2];
5725910260SChristophe Leroy 
5825910260SChristophe Leroy /*
5927e23b5fSChristophe Leroy  * The MPC8xx has only 16 contexts. We rotate through them on each task switch.
6027e23b5fSChristophe Leroy  * A better way would be to keep track of tasks that own contexts, and implement
6127e23b5fSChristophe Leroy  * an LRU usage. That way very active tasks don't always have to pay the TLB
6227e23b5fSChristophe Leroy  * reload overhead. The kernel pages are mapped shared, so the kernel can run on
6327e23b5fSChristophe Leroy  * behalf of any task that makes a kernel entry. Shared does not mean they are
6427e23b5fSChristophe Leroy  * not protected, just that the ASID comparison is not performed. -- Dan
6527e23b5fSChristophe Leroy  *
6627e23b5fSChristophe Leroy  * The IBM4xx has 256 contexts, so we can just rotate through these as a way of
6727e23b5fSChristophe Leroy  * "switching" contexts. If the TID of the TLB is zero, the PID/TID comparison
6827e23b5fSChristophe Leroy  * is disabled, so we can use a TID of zero to represent all kernel pages as
6927e23b5fSChristophe Leroy  * shared among all contexts. -- Dan
7027e23b5fSChristophe Leroy  *
7127e23b5fSChristophe Leroy  * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We should
7227e23b5fSChristophe Leroy  * normally never have to steal though the facility is present if needed.
7327e23b5fSChristophe Leroy  * -- BenH
7427e23b5fSChristophe Leroy  */
7527e23b5fSChristophe Leroy #define FIRST_CONTEXT 1
76*a36c0fafSChristophe Leroy #if defined(CONFIG_PPC_8xx)
7727e23b5fSChristophe Leroy #define LAST_CONTEXT 16
7827e23b5fSChristophe Leroy #elif defined(CONFIG_PPC_47x)
7927e23b5fSChristophe Leroy #define LAST_CONTEXT 65535
8027e23b5fSChristophe Leroy #else
8127e23b5fSChristophe Leroy #define LAST_CONTEXT 255
8227e23b5fSChristophe Leroy #endif
8327e23b5fSChristophe Leroy 
8427e23b5fSChristophe Leroy static unsigned int next_context, nr_free_contexts;
8527e23b5fSChristophe Leroy static unsigned long *context_map;
8627e23b5fSChristophe Leroy static unsigned long *stale_map[NR_CPUS];
8727e23b5fSChristophe Leroy static struct mm_struct **context_mm;
8827e23b5fSChristophe Leroy static DEFINE_RAW_SPINLOCK(context_lock);
8927e23b5fSChristophe Leroy 
9027e23b5fSChristophe Leroy #define CTX_MAP_SIZE	\
9127e23b5fSChristophe Leroy 	(sizeof(unsigned long) * (LAST_CONTEXT / BITS_PER_LONG + 1))
9227e23b5fSChristophe Leroy 
9327e23b5fSChristophe Leroy 
9427e23b5fSChristophe Leroy /* Steal a context from a task that has one at the moment.
9527e23b5fSChristophe Leroy  *
9627e23b5fSChristophe Leroy  * This is used when we are running out of available PID numbers
9727e23b5fSChristophe Leroy  * on the processors.
9827e23b5fSChristophe Leroy  *
9927e23b5fSChristophe Leroy  * This isn't an LRU system, it just frees up each context in
10027e23b5fSChristophe Leroy  * turn (sort-of pseudo-random replacement :).  This would be the
10127e23b5fSChristophe Leroy  * place to implement an LRU scheme if anyone was motivated to do it.
10227e23b5fSChristophe Leroy  *  -- paulus
10327e23b5fSChristophe Leroy  *
10427e23b5fSChristophe Leroy  * For context stealing, we use a slightly different approach for
10527e23b5fSChristophe Leroy  * SMP and UP. Basically, the UP one is simpler and doesn't use
10627e23b5fSChristophe Leroy  * the stale map as we can just flush the local CPU
10727e23b5fSChristophe Leroy  *  -- benh
10827e23b5fSChristophe Leroy  */
10927e23b5fSChristophe Leroy static unsigned int steal_context_smp(unsigned int id)
11027e23b5fSChristophe Leroy {
11127e23b5fSChristophe Leroy 	struct mm_struct *mm;
11227e23b5fSChristophe Leroy 	unsigned int cpu, max, i;
11327e23b5fSChristophe Leroy 
11427e23b5fSChristophe Leroy 	max = LAST_CONTEXT - FIRST_CONTEXT;
11527e23b5fSChristophe Leroy 
11627e23b5fSChristophe Leroy 	/* Attempt to free next_context first and then loop until we manage */
11727e23b5fSChristophe Leroy 	while (max--) {
11827e23b5fSChristophe Leroy 		/* Pick up the victim mm */
11927e23b5fSChristophe Leroy 		mm = context_mm[id];
12027e23b5fSChristophe Leroy 
12127e23b5fSChristophe Leroy 		/* We have a candidate victim, check if it's active, on SMP
12227e23b5fSChristophe Leroy 		 * we cannot steal active contexts
12327e23b5fSChristophe Leroy 		 */
12427e23b5fSChristophe Leroy 		if (mm->context.active) {
12527e23b5fSChristophe Leroy 			id++;
12627e23b5fSChristophe Leroy 			if (id > LAST_CONTEXT)
12727e23b5fSChristophe Leroy 				id = FIRST_CONTEXT;
12827e23b5fSChristophe Leroy 			continue;
12927e23b5fSChristophe Leroy 		}
13027e23b5fSChristophe Leroy 		pr_hardcont(" | steal %d from 0x%p", id, mm);
13127e23b5fSChristophe Leroy 
13227e23b5fSChristophe Leroy 		/* Mark this mm has having no context anymore */
13327e23b5fSChristophe Leroy 		mm->context.id = MMU_NO_CONTEXT;
13427e23b5fSChristophe Leroy 
13527e23b5fSChristophe Leroy 		/* Mark it stale on all CPUs that used this mm. For threaded
13627e23b5fSChristophe Leroy 		 * implementations, we set it on all threads on each core
13727e23b5fSChristophe Leroy 		 * represented in the mask. A future implementation will use
13827e23b5fSChristophe Leroy 		 * a core map instead but this will do for now.
13927e23b5fSChristophe Leroy 		 */
14027e23b5fSChristophe Leroy 		for_each_cpu(cpu, mm_cpumask(mm)) {
14127e23b5fSChristophe Leroy 			for (i = cpu_first_thread_sibling(cpu);
14227e23b5fSChristophe Leroy 			     i <= cpu_last_thread_sibling(cpu); i++) {
14327e23b5fSChristophe Leroy 				if (stale_map[i])
14427e23b5fSChristophe Leroy 					__set_bit(id, stale_map[i]);
14527e23b5fSChristophe Leroy 			}
14627e23b5fSChristophe Leroy 			cpu = i - 1;
14727e23b5fSChristophe Leroy 		}
14827e23b5fSChristophe Leroy 		return id;
14927e23b5fSChristophe Leroy 	}
15027e23b5fSChristophe Leroy 
15127e23b5fSChristophe Leroy 	/* This will happen if you have more CPUs than available contexts,
15227e23b5fSChristophe Leroy 	 * all we can do here is wait a bit and try again
15327e23b5fSChristophe Leroy 	 */
15427e23b5fSChristophe Leroy 	raw_spin_unlock(&context_lock);
15527e23b5fSChristophe Leroy 	cpu_relax();
15627e23b5fSChristophe Leroy 	raw_spin_lock(&context_lock);
15727e23b5fSChristophe Leroy 
15827e23b5fSChristophe Leroy 	/* This will cause the caller to try again */
15927e23b5fSChristophe Leroy 	return MMU_NO_CONTEXT;
16027e23b5fSChristophe Leroy }
16127e23b5fSChristophe Leroy 
16227e23b5fSChristophe Leroy static unsigned int steal_all_contexts(void)
16327e23b5fSChristophe Leroy {
16427e23b5fSChristophe Leroy 	struct mm_struct *mm;
16527e23b5fSChristophe Leroy 	int cpu = smp_processor_id();
16627e23b5fSChristophe Leroy 	unsigned int id;
16727e23b5fSChristophe Leroy 
16827e23b5fSChristophe Leroy 	for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
16927e23b5fSChristophe Leroy 		/* Pick up the victim mm */
17027e23b5fSChristophe Leroy 		mm = context_mm[id];
17127e23b5fSChristophe Leroy 
17227e23b5fSChristophe Leroy 		pr_hardcont(" | steal %d from 0x%p", id, mm);
17327e23b5fSChristophe Leroy 
17427e23b5fSChristophe Leroy 		/* Mark this mm as having no context anymore */
17527e23b5fSChristophe Leroy 		mm->context.id = MMU_NO_CONTEXT;
17627e23b5fSChristophe Leroy 		if (id != FIRST_CONTEXT) {
17727e23b5fSChristophe Leroy 			context_mm[id] = NULL;
17827e23b5fSChristophe Leroy 			__clear_bit(id, context_map);
17927e23b5fSChristophe Leroy 		}
180c13066e5SChristophe Leroy 		if (IS_ENABLED(CONFIG_SMP))
18127e23b5fSChristophe Leroy 			__clear_bit(id, stale_map[cpu]);
18227e23b5fSChristophe Leroy 	}
18327e23b5fSChristophe Leroy 
18427e23b5fSChristophe Leroy 	/* Flush the TLB for all contexts (not to be used on SMP) */
18527e23b5fSChristophe Leroy 	_tlbil_all();
18627e23b5fSChristophe Leroy 
18727e23b5fSChristophe Leroy 	nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT;
18827e23b5fSChristophe Leroy 
18927e23b5fSChristophe Leroy 	return FIRST_CONTEXT;
19027e23b5fSChristophe Leroy }
19127e23b5fSChristophe Leroy 
19227e23b5fSChristophe Leroy /* Note that this will also be called on SMP if all other CPUs are
19327e23b5fSChristophe Leroy  * offlined, which means that it may be called for cpu != 0. For
19427e23b5fSChristophe Leroy  * this to work, we somewhat assume that CPUs that are onlined
19527e23b5fSChristophe Leroy  * come up with a fully clean TLB (or are cleaned when offlined)
19627e23b5fSChristophe Leroy  */
19727e23b5fSChristophe Leroy static unsigned int steal_context_up(unsigned int id)
19827e23b5fSChristophe Leroy {
19927e23b5fSChristophe Leroy 	struct mm_struct *mm;
20027e23b5fSChristophe Leroy 	int cpu = smp_processor_id();
20127e23b5fSChristophe Leroy 
20227e23b5fSChristophe Leroy 	/* Pick up the victim mm */
20327e23b5fSChristophe Leroy 	mm = context_mm[id];
20427e23b5fSChristophe Leroy 
20527e23b5fSChristophe Leroy 	pr_hardcont(" | steal %d from 0x%p", id, mm);
20627e23b5fSChristophe Leroy 
20727e23b5fSChristophe Leroy 	/* Flush the TLB for that context */
20827e23b5fSChristophe Leroy 	local_flush_tlb_mm(mm);
20927e23b5fSChristophe Leroy 
21027e23b5fSChristophe Leroy 	/* Mark this mm has having no context anymore */
21127e23b5fSChristophe Leroy 	mm->context.id = MMU_NO_CONTEXT;
21227e23b5fSChristophe Leroy 
21327e23b5fSChristophe Leroy 	/* XXX This clear should ultimately be part of local_flush_tlb_mm */
214c13066e5SChristophe Leroy 	if (IS_ENABLED(CONFIG_SMP))
21527e23b5fSChristophe Leroy 		__clear_bit(id, stale_map[cpu]);
21627e23b5fSChristophe Leroy 
21727e23b5fSChristophe Leroy 	return id;
21827e23b5fSChristophe Leroy }
21927e23b5fSChristophe Leroy 
220a56ab7c7SChristophe Leroy static void set_context(unsigned long id, pgd_t *pgd)
221a56ab7c7SChristophe Leroy {
222a56ab7c7SChristophe Leroy 	if (IS_ENABLED(CONFIG_PPC_8xx)) {
223a56ab7c7SChristophe Leroy 		s16 offset = (s16)(__pa(swapper_pg_dir));
224a56ab7c7SChristophe Leroy 
225a56ab7c7SChristophe Leroy 		/*
226a56ab7c7SChristophe Leroy 		 * Register M_TWB will contain base address of level 1 table minus the
227a56ab7c7SChristophe Leroy 		 * lower part of the kernel PGDIR base address, so that all accesses to
228a56ab7c7SChristophe Leroy 		 * level 1 table are done relative to lower part of kernel PGDIR base
229a56ab7c7SChristophe Leroy 		 * address.
230a56ab7c7SChristophe Leroy 		 */
231a56ab7c7SChristophe Leroy 		mtspr(SPRN_M_TWB, __pa(pgd) - offset);
232a56ab7c7SChristophe Leroy 
233a56ab7c7SChristophe Leroy 		/* Update context */
234a56ab7c7SChristophe Leroy 		mtspr(SPRN_M_CASID, id - 1);
235a56ab7c7SChristophe Leroy 
236a56ab7c7SChristophe Leroy 		/* sync */
237a56ab7c7SChristophe Leroy 		mb();
238a56ab7c7SChristophe Leroy 	} else {
239a56ab7c7SChristophe Leroy 		if (IS_ENABLED(CONFIG_40x))
240a56ab7c7SChristophe Leroy 			mb();	/* sync */
241a56ab7c7SChristophe Leroy 
242a56ab7c7SChristophe Leroy 		mtspr(SPRN_PID, id);
243a56ab7c7SChristophe Leroy 		isync();
244a56ab7c7SChristophe Leroy 	}
245a56ab7c7SChristophe Leroy }
246a56ab7c7SChristophe Leroy 
24727e23b5fSChristophe Leroy void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
24827e23b5fSChristophe Leroy 			struct task_struct *tsk)
24927e23b5fSChristophe Leroy {
25027e23b5fSChristophe Leroy 	unsigned int id;
25127e23b5fSChristophe Leroy 	unsigned int i, cpu = smp_processor_id();
25227e23b5fSChristophe Leroy 	unsigned long *map;
25327e23b5fSChristophe Leroy 
25427e23b5fSChristophe Leroy 	/* No lockless fast path .. yet */
25527e23b5fSChristophe Leroy 	raw_spin_lock(&context_lock);
25627e23b5fSChristophe Leroy 
25727e23b5fSChristophe Leroy 	pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
25827e23b5fSChristophe Leroy 		cpu, next, next->context.active, next->context.id);
25927e23b5fSChristophe Leroy 
260c13066e5SChristophe Leroy 	if (IS_ENABLED(CONFIG_SMP)) {
26127e23b5fSChristophe Leroy 		/* Mark us active and the previous one not anymore */
26227e23b5fSChristophe Leroy 		next->context.active++;
26327e23b5fSChristophe Leroy 		if (prev) {
26427e23b5fSChristophe Leroy 			pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
26527e23b5fSChristophe Leroy 			WARN_ON(prev->context.active < 1);
26627e23b5fSChristophe Leroy 			prev->context.active--;
26727e23b5fSChristophe Leroy 		}
268c13066e5SChristophe Leroy 	}
26927e23b5fSChristophe Leroy 
27027e23b5fSChristophe Leroy  again:
27127e23b5fSChristophe Leroy 
27227e23b5fSChristophe Leroy 	/* If we already have a valid assigned context, skip all that */
27327e23b5fSChristophe Leroy 	id = next->context.id;
274dac3db1eSChristophe Leroy 	if (likely(id != MMU_NO_CONTEXT))
27527e23b5fSChristophe Leroy 		goto ctxt_ok;
27627e23b5fSChristophe Leroy 
27727e23b5fSChristophe Leroy 	/* We really don't have a context, let's try to acquire one */
27827e23b5fSChristophe Leroy 	id = next_context;
27927e23b5fSChristophe Leroy 	if (id > LAST_CONTEXT)
28027e23b5fSChristophe Leroy 		id = FIRST_CONTEXT;
28127e23b5fSChristophe Leroy 	map = context_map;
28227e23b5fSChristophe Leroy 
28327e23b5fSChristophe Leroy 	/* No more free contexts, let's try to steal one */
28427e23b5fSChristophe Leroy 	if (nr_free_contexts == 0) {
28527e23b5fSChristophe Leroy 		if (num_online_cpus() > 1) {
28627e23b5fSChristophe Leroy 			id = steal_context_smp(id);
28727e23b5fSChristophe Leroy 			if (id == MMU_NO_CONTEXT)
28827e23b5fSChristophe Leroy 				goto again;
28927e23b5fSChristophe Leroy 			goto stolen;
29027e23b5fSChristophe Leroy 		}
29127e23b5fSChristophe Leroy 		if (IS_ENABLED(CONFIG_PPC_8xx))
29227e23b5fSChristophe Leroy 			id = steal_all_contexts();
29327e23b5fSChristophe Leroy 		else
29427e23b5fSChristophe Leroy 			id = steal_context_up(id);
29527e23b5fSChristophe Leroy 		goto stolen;
29627e23b5fSChristophe Leroy 	}
29727e23b5fSChristophe Leroy 	nr_free_contexts--;
29827e23b5fSChristophe Leroy 
29927e23b5fSChristophe Leroy 	/* We know there's at least one free context, try to find it */
30027e23b5fSChristophe Leroy 	while (__test_and_set_bit(id, map)) {
30127e23b5fSChristophe Leroy 		id = find_next_zero_bit(map, LAST_CONTEXT+1, id);
30227e23b5fSChristophe Leroy 		if (id > LAST_CONTEXT)
30327e23b5fSChristophe Leroy 			id = FIRST_CONTEXT;
30427e23b5fSChristophe Leroy 	}
30527e23b5fSChristophe Leroy  stolen:
30627e23b5fSChristophe Leroy 	next_context = id + 1;
30727e23b5fSChristophe Leroy 	context_mm[id] = next;
30827e23b5fSChristophe Leroy 	next->context.id = id;
30927e23b5fSChristophe Leroy 	pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
31027e23b5fSChristophe Leroy 
31127e23b5fSChristophe Leroy  ctxt_ok:
31227e23b5fSChristophe Leroy 
31327e23b5fSChristophe Leroy 	/* If that context got marked stale on this CPU, then flush the
31427e23b5fSChristophe Leroy 	 * local TLB for it and unmark it before we use it
31527e23b5fSChristophe Leroy 	 */
316c13066e5SChristophe Leroy 	if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) {
31727e23b5fSChristophe Leroy 		pr_hardcont(" | stale flush %d [%d..%d]",
31827e23b5fSChristophe Leroy 			    id, cpu_first_thread_sibling(cpu),
31927e23b5fSChristophe Leroy 			    cpu_last_thread_sibling(cpu));
32027e23b5fSChristophe Leroy 
32127e23b5fSChristophe Leroy 		local_flush_tlb_mm(next);
32227e23b5fSChristophe Leroy 
32327e23b5fSChristophe Leroy 		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
32427e23b5fSChristophe Leroy 		for (i = cpu_first_thread_sibling(cpu);
32527e23b5fSChristophe Leroy 		     i <= cpu_last_thread_sibling(cpu); i++) {
32627e23b5fSChristophe Leroy 			if (stale_map[i])
32727e23b5fSChristophe Leroy 				__clear_bit(id, stale_map[i]);
32827e23b5fSChristophe Leroy 		}
32927e23b5fSChristophe Leroy 	}
33027e23b5fSChristophe Leroy 
33127e23b5fSChristophe Leroy 	/* Flick the MMU and release lock */
33227e23b5fSChristophe Leroy 	pr_hardcont(" -> %d\n", id);
33325910260SChristophe Leroy 	if (IS_ENABLED(CONFIG_BDI_SWITCH))
33425910260SChristophe Leroy 		abatron_pteptrs[1] = next->pgd;
33527e23b5fSChristophe Leroy 	set_context(id, next->pgd);
33627e23b5fSChristophe Leroy 	raw_spin_unlock(&context_lock);
33727e23b5fSChristophe Leroy }
33827e23b5fSChristophe Leroy 
33927e23b5fSChristophe Leroy /*
34027e23b5fSChristophe Leroy  * Set up the context for a new address space.
34127e23b5fSChristophe Leroy  */
34227e23b5fSChristophe Leroy int init_new_context(struct task_struct *t, struct mm_struct *mm)
34327e23b5fSChristophe Leroy {
34427e23b5fSChristophe Leroy 	pr_hard("initing context for mm @%p\n", mm);
34527e23b5fSChristophe Leroy 
34627e23b5fSChristophe Leroy 	/*
34727e23b5fSChristophe Leroy 	 * We have MMU_NO_CONTEXT set to be ~0. Hence check
34827e23b5fSChristophe Leroy 	 * explicitly against context.id == 0. This ensures that we properly
34927e23b5fSChristophe Leroy 	 * initialize context slice details for newly allocated mm's (which will
35027e23b5fSChristophe Leroy 	 * have id == 0) and don't alter context slice inherited via fork (which
35127e23b5fSChristophe Leroy 	 * will have id != 0).
35227e23b5fSChristophe Leroy 	 */
35327e23b5fSChristophe Leroy 	if (mm->context.id == 0)
35427e23b5fSChristophe Leroy 		slice_init_new_context_exec(mm);
35527e23b5fSChristophe Leroy 	mm->context.id = MMU_NO_CONTEXT;
35627e23b5fSChristophe Leroy 	mm->context.active = 0;
35727e23b5fSChristophe Leroy 	pte_frag_set(&mm->context, NULL);
35827e23b5fSChristophe Leroy 	return 0;
35927e23b5fSChristophe Leroy }
36027e23b5fSChristophe Leroy 
36127e23b5fSChristophe Leroy /*
36227e23b5fSChristophe Leroy  * We're finished using the context for an address space.
36327e23b5fSChristophe Leroy  */
36427e23b5fSChristophe Leroy void destroy_context(struct mm_struct *mm)
36527e23b5fSChristophe Leroy {
36627e23b5fSChristophe Leroy 	unsigned long flags;
36727e23b5fSChristophe Leroy 	unsigned int id;
36827e23b5fSChristophe Leroy 
36927e23b5fSChristophe Leroy 	if (mm->context.id == MMU_NO_CONTEXT)
37027e23b5fSChristophe Leroy 		return;
37127e23b5fSChristophe Leroy 
37227e23b5fSChristophe Leroy 	WARN_ON(mm->context.active != 0);
37327e23b5fSChristophe Leroy 
37427e23b5fSChristophe Leroy 	raw_spin_lock_irqsave(&context_lock, flags);
37527e23b5fSChristophe Leroy 	id = mm->context.id;
37627e23b5fSChristophe Leroy 	if (id != MMU_NO_CONTEXT) {
37727e23b5fSChristophe Leroy 		__clear_bit(id, context_map);
37827e23b5fSChristophe Leroy 		mm->context.id = MMU_NO_CONTEXT;
37927e23b5fSChristophe Leroy 		context_mm[id] = NULL;
38027e23b5fSChristophe Leroy 		nr_free_contexts++;
38127e23b5fSChristophe Leroy 	}
38227e23b5fSChristophe Leroy 	raw_spin_unlock_irqrestore(&context_lock, flags);
38327e23b5fSChristophe Leroy }
38427e23b5fSChristophe Leroy 
38527e23b5fSChristophe Leroy static int mmu_ctx_cpu_prepare(unsigned int cpu)
38627e23b5fSChristophe Leroy {
38727e23b5fSChristophe Leroy 	/* We don't touch CPU 0 map, it's allocated at aboot and kept
38827e23b5fSChristophe Leroy 	 * around forever
38927e23b5fSChristophe Leroy 	 */
39027e23b5fSChristophe Leroy 	if (cpu == boot_cpuid)
39127e23b5fSChristophe Leroy 		return 0;
39227e23b5fSChristophe Leroy 
39327e23b5fSChristophe Leroy 	pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
39427e23b5fSChristophe Leroy 	stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
39527e23b5fSChristophe Leroy 	return 0;
39627e23b5fSChristophe Leroy }
39727e23b5fSChristophe Leroy 
39827e23b5fSChristophe Leroy static int mmu_ctx_cpu_dead(unsigned int cpu)
39927e23b5fSChristophe Leroy {
40027e23b5fSChristophe Leroy #ifdef CONFIG_HOTPLUG_CPU
40127e23b5fSChristophe Leroy 	if (cpu == boot_cpuid)
40227e23b5fSChristophe Leroy 		return 0;
40327e23b5fSChristophe Leroy 
40427e23b5fSChristophe Leroy 	pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
40527e23b5fSChristophe Leroy 	kfree(stale_map[cpu]);
40627e23b5fSChristophe Leroy 	stale_map[cpu] = NULL;
40727e23b5fSChristophe Leroy 
40827e23b5fSChristophe Leroy 	/* We also clear the cpu_vm_mask bits of CPUs going away */
40927e23b5fSChristophe Leroy 	clear_tasks_mm_cpumask(cpu);
41027e23b5fSChristophe Leroy #endif
41127e23b5fSChristophe Leroy 	return 0;
41227e23b5fSChristophe Leroy }
41327e23b5fSChristophe Leroy 
41427e23b5fSChristophe Leroy /*
41527e23b5fSChristophe Leroy  * Initialize the context management stuff.
41627e23b5fSChristophe Leroy  */
41727e23b5fSChristophe Leroy void __init mmu_context_init(void)
41827e23b5fSChristophe Leroy {
41927e23b5fSChristophe Leroy 	/* Mark init_mm as being active on all possible CPUs since
42027e23b5fSChristophe Leroy 	 * we'll get called with prev == init_mm the first time
42127e23b5fSChristophe Leroy 	 * we schedule on a given CPU
42227e23b5fSChristophe Leroy 	 */
42327e23b5fSChristophe Leroy 	init_mm.context.active = NR_CPUS;
42427e23b5fSChristophe Leroy 
42527e23b5fSChristophe Leroy 	/*
42627e23b5fSChristophe Leroy 	 * Allocate the maps used by context management
42727e23b5fSChristophe Leroy 	 */
42827e23b5fSChristophe Leroy 	context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
42927e23b5fSChristophe Leroy 	if (!context_map)
43027e23b5fSChristophe Leroy 		panic("%s: Failed to allocate %zu bytes\n", __func__,
43127e23b5fSChristophe Leroy 		      CTX_MAP_SIZE);
43227e23b5fSChristophe Leroy 	context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
43327e23b5fSChristophe Leroy 				    SMP_CACHE_BYTES);
43427e23b5fSChristophe Leroy 	if (!context_mm)
43527e23b5fSChristophe Leroy 		panic("%s: Failed to allocate %zu bytes\n", __func__,
43627e23b5fSChristophe Leroy 		      sizeof(void *) * (LAST_CONTEXT + 1));
437c13066e5SChristophe Leroy 	if (IS_ENABLED(CONFIG_SMP)) {
43827e23b5fSChristophe Leroy 		stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
43927e23b5fSChristophe Leroy 		if (!stale_map[boot_cpuid])
44027e23b5fSChristophe Leroy 			panic("%s: Failed to allocate %zu bytes\n", __func__,
44127e23b5fSChristophe Leroy 			      CTX_MAP_SIZE);
44227e23b5fSChristophe Leroy 
44327e23b5fSChristophe Leroy 		cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
44427e23b5fSChristophe Leroy 					  "powerpc/mmu/ctx:prepare",
44527e23b5fSChristophe Leroy 					  mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
446c13066e5SChristophe Leroy 	}
44727e23b5fSChristophe Leroy 
44827e23b5fSChristophe Leroy 	printk(KERN_INFO
44927e23b5fSChristophe Leroy 	       "MMU: Allocated %zu bytes of context maps for %d contexts\n",
45027e23b5fSChristophe Leroy 	       2 * CTX_MAP_SIZE + (sizeof(void *) * (LAST_CONTEXT + 1)),
45127e23b5fSChristophe Leroy 	       LAST_CONTEXT - FIRST_CONTEXT + 1);
45227e23b5fSChristophe Leroy 
45327e23b5fSChristophe Leroy 	/*
45427e23b5fSChristophe Leroy 	 * Some processors have too few contexts to reserve one for
45527e23b5fSChristophe Leroy 	 * init_mm, and require using context 0 for a normal task.
45627e23b5fSChristophe Leroy 	 * Other processors reserve the use of context zero for the kernel.
45727e23b5fSChristophe Leroy 	 * This code assumes FIRST_CONTEXT < 32.
45827e23b5fSChristophe Leroy 	 */
45927e23b5fSChristophe Leroy 	context_map[0] = (1 << FIRST_CONTEXT) - 1;
46027e23b5fSChristophe Leroy 	next_context = FIRST_CONTEXT;
46127e23b5fSChristophe Leroy 	nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;
46227e23b5fSChristophe Leroy }
463