1 /* 2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #ifndef __UM_MMU_CONTEXT_H 7 #define __UM_MMU_CONTEXT_H 8 9 #include <linux/sched.h> 10 #include <linux/mm_types.h> 11 12 #include <asm/mmu.h> 13 14 extern void uml_setup_stubs(struct mm_struct *mm); 15 /* 16 * Needed since we do not use the asm-generic/mm_hooks.h: 17 */ 18 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 19 { 20 uml_setup_stubs(mm); 21 return 0; 22 } 23 extern void arch_exit_mmap(struct mm_struct *mm); 24 static inline void arch_unmap(struct mm_struct *mm, 25 struct vm_area_struct *vma, 26 unsigned long start, unsigned long end) 27 { 28 } 29 static inline void arch_bprm_mm_init(struct mm_struct *mm, 30 struct vm_area_struct *vma) 31 { 32 } 33 34 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 35 bool write, bool execute, bool foreign) 36 { 37 /* by default, allow everything */ 38 return true; 39 } 40 41 /* 42 * end asm-generic/mm_hooks.h functions 43 */ 44 45 #define deactivate_mm(tsk,mm) do { } while (0) 46 47 extern void force_flush_all(void); 48 49 static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) 50 { 51 /* 52 * This is called by fs/exec.c and sys_unshare() 53 * when the new ->mm is used for the first time. 54 */ 55 __switch_mm(&new->context.id); 56 down_write(&new->mmap_sem); 57 uml_setup_stubs(new); 58 up_write(&new->mmap_sem); 59 } 60 61 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 62 struct task_struct *tsk) 63 { 64 unsigned cpu = smp_processor_id(); 65 66 if(prev != next){ 67 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 68 cpumask_set_cpu(cpu, mm_cpumask(next)); 69 if(next != &init_mm) 70 __switch_mm(&next->context.id); 71 } 72 } 73 74 static inline void enter_lazy_tlb(struct mm_struct *mm, 75 struct task_struct *tsk) 76 { 77 } 78 79 extern int init_new_context(struct task_struct *task, struct mm_struct *mm); 80 81 extern void destroy_context(struct mm_struct *mm); 82 83 #endif 84