1 /* 2 * arch/arm/include/asm/mmu_context.h 3 * 4 * Copyright (C) 1996 Russell King. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * Changelog: 11 * 27-06-1996 RMK Created 12 */ 13 #ifndef __ASM_ARM_MMU_CONTEXT_H 14 #define __ASM_ARM_MMU_CONTEXT_H 15 16 #include <linux/compiler.h> 17 #include <linux/sched.h> 18 #include <asm/cacheflush.h> 19 #include <asm/cachetype.h> 20 #include <asm/proc-fns.h> 21 #include <asm-generic/mm_hooks.h> 22 23 void __check_vmalloc_seq(struct mm_struct *mm); 24 25 #ifdef CONFIG_CPU_HAS_ASID 26 27 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 28 #define init_new_context(tsk,mm) ({ mm->context.id = 0; }) 29 30 #else /* !CONFIG_CPU_HAS_ASID */ 31 32 #ifdef CONFIG_MMU 33 34 static inline void check_and_switch_context(struct mm_struct *mm, 35 struct task_struct *tsk) 36 { 37 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) 38 __check_vmalloc_seq(mm); 39 40 if (irqs_disabled()) 41 /* 42 * cpu_switch_mm() needs to flush the VIVT caches. To avoid 43 * high interrupt latencies, defer the call and continue 44 * running with the old mm. Since we only support UP systems 45 * on non-ASID CPUs, the old mm will remain valid until the 46 * finish_arch_post_lock_switch() call. 47 */ 48 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); 49 else 50 cpu_switch_mm(mm->pgd, mm); 51 } 52 53 #define finish_arch_post_lock_switch \ 54 finish_arch_post_lock_switch 55 static inline void finish_arch_post_lock_switch(void) 56 { 57 if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { 58 struct mm_struct *mm = current->mm; 59 cpu_switch_mm(mm->pgd, mm); 60 } 61 } 62 63 #endif /* CONFIG_MMU */ 64 65 #define init_new_context(tsk,mm) 0 66 67 #endif /* CONFIG_CPU_HAS_ASID */ 68 69 #define destroy_context(mm) do { } while(0) 70 #define activate_mm(prev,next) switch_mm(prev, next, NULL) 71 72 /* 73 * This is called when "tsk" is about to enter lazy TLB mode. 74 * 75 * mm: describes the currently active mm context 76 * tsk: task which is entering lazy tlb 77 * cpu: cpu number which is entering lazy tlb 78 * 79 * tsk->mm will be NULL 80 */ 81 static inline void 82 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 83 { 84 } 85 86 /* 87 * This is the actual mm switch as far as the scheduler 88 * is concerned. No registers are touched. We avoid 89 * calling the CPU specific function when the mm hasn't 90 * actually changed. 91 */ 92 static inline void 93 switch_mm(struct mm_struct *prev, struct mm_struct *next, 94 struct task_struct *tsk) 95 { 96 #ifdef CONFIG_MMU 97 unsigned int cpu = smp_processor_id(); 98 99 #ifdef CONFIG_SMP 100 /* check for possible thread migration */ 101 if (!cpumask_empty(mm_cpumask(next)) && 102 !cpumask_test_cpu(cpu, mm_cpumask(next))) 103 __flush_icache_all(); 104 #endif 105 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 106 check_and_switch_context(next, tsk); 107 if (cache_is_vivt()) 108 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 109 } 110 #endif 111 } 112 113 #define deactivate_mm(tsk,mm) do { } while (0) 114 115 #endif 116