108dbd0f8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2a7e79840SRichard Kuo /*
3a7e79840SRichard Kuo  * MM context support for the Hexagon architecture
4a7e79840SRichard Kuo  *
5e1858b2aSRichard Kuo  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6a7e79840SRichard Kuo  */
7a7e79840SRichard Kuo 
8a7e79840SRichard Kuo #ifndef _ASM_MMU_CONTEXT_H
9a7e79840SRichard Kuo #define _ASM_MMU_CONTEXT_H
10a7e79840SRichard Kuo 
11589ee628SIngo Molnar #include <linux/mm_types.h>
12589ee628SIngo Molnar 
13a7e79840SRichard Kuo #include <asm/setup.h>
14a7e79840SRichard Kuo #include <asm/page.h>
15a7e79840SRichard Kuo #include <asm/pgalloc.h>
16a7e79840SRichard Kuo #include <asm/mem-layout.h>
17a7e79840SRichard Kuo 
18a7e79840SRichard Kuo /*
19a7e79840SRichard Kuo  * VM port hides all TLB management, so "lazy TLB" isn't very
20a7e79840SRichard Kuo  * meaningful.  Even for ports to architectures with visble TLBs,
21a7e79840SRichard Kuo  * this is almost invariably a null function.
22*c96c177aSNicholas Piggin  *
23*c96c177aSNicholas Piggin  * mm->context is set up by pgd_alloc, so no init_new_context required.
24a7e79840SRichard Kuo  */
25a7e79840SRichard Kuo 
26a7e79840SRichard Kuo /*
27a7e79840SRichard Kuo  *  Switch active mm context
28a7e79840SRichard Kuo  */
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)29a7e79840SRichard Kuo static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
30a7e79840SRichard Kuo 				struct task_struct *tsk)
31a7e79840SRichard Kuo {
32a7e79840SRichard Kuo 	int l1;
33a7e79840SRichard Kuo 
34a7e79840SRichard Kuo 	/*
35a7e79840SRichard Kuo 	 * For virtual machine, we have to update system map if it's been
36a7e79840SRichard Kuo 	 * touched.
37a7e79840SRichard Kuo 	 */
38a7e79840SRichard Kuo 	if (next->context.generation < prev->context.generation) {
39a7e79840SRichard Kuo 		for (l1 = MIN_KERNEL_SEG; l1 <= max_kernel_seg; l1++)
40a7e79840SRichard Kuo 			next->pgd[l1] = init_mm.pgd[l1];
41a7e79840SRichard Kuo 
42a7e79840SRichard Kuo 		next->context.generation = prev->context.generation;
43a7e79840SRichard Kuo 	}
44a7e79840SRichard Kuo 
45a7e79840SRichard Kuo 	__vmnewmap((void *)next->context.ptbase);
46a7e79840SRichard Kuo }
47a7e79840SRichard Kuo 
48a7e79840SRichard Kuo /*
49a7e79840SRichard Kuo  *  Activate new memory map for task
50a7e79840SRichard Kuo  */
51*c96c177aSNicholas Piggin #define activate_mm activate_mm
activate_mm(struct mm_struct * prev,struct mm_struct * next)52a7e79840SRichard Kuo static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
53a7e79840SRichard Kuo {
54a7e79840SRichard Kuo 	unsigned long flags;
55a7e79840SRichard Kuo 
56a7e79840SRichard Kuo 	local_irq_save(flags);
57a7e79840SRichard Kuo 	switch_mm(prev, next, current_thread_info()->task);
58a7e79840SRichard Kuo 	local_irq_restore(flags);
59a7e79840SRichard Kuo }
60a7e79840SRichard Kuo 
61a7e79840SRichard Kuo /*  Generic hooks for arch_dup_mmap and arch_exit_mmap  */
62a7e79840SRichard Kuo #include <asm-generic/mm_hooks.h>
63a7e79840SRichard Kuo 
64*c96c177aSNicholas Piggin #include <asm-generic/mmu_context.h>
65*c96c177aSNicholas Piggin 
66a7e79840SRichard Kuo #endif
67