1 /* 2 * Copyright (C) 2012 Regents of the University of California 3 * Copyright (C) 2017 SiFive 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation, version 2. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #ifndef _ASM_RISCV_MMU_CONTEXT_H 16 #define _ASM_RISCV_MMU_CONTEXT_H 17 18 #include <linux/mm_types.h> 19 #include <asm-generic/mm_hooks.h> 20 21 #include <linux/mm.h> 22 #include <linux/sched.h> 23 #include <asm/tlbflush.h> 24 #include <asm/cacheflush.h> 25 26 static inline void enter_lazy_tlb(struct mm_struct *mm, 27 struct task_struct *task) 28 { 29 } 30 31 /* Initialize context-related info for a new mm_struct */ 32 static inline int init_new_context(struct task_struct *task, 33 struct mm_struct *mm) 34 { 35 return 0; 36 } 37 38 static inline void destroy_context(struct mm_struct *mm) 39 { 40 } 41 42 static inline pgd_t *current_pgdir(void) 43 { 44 return pfn_to_virt(csr_read(sptbr) & SPTBR_PPN); 45 } 46 47 static inline void set_pgdir(pgd_t *pgd) 48 { 49 csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE); 50 } 51 52 /* 53 * When necessary, performs a deferred icache flush for the given MM context, 54 * on the local CPU. RISC-V has no direct mechanism for instruction cache 55 * shoot downs, so instead we send an IPI that informs the remote harts they 56 * need to flush their local instruction caches. To avoid pathologically slow 57 * behavior in a common case (a bunch of single-hart processes on a many-hart 58 * machine, ie 'make -j') we avoid the IPIs for harts that are not currently 59 * executing a MM context and instead schedule a deferred local instruction 60 * cache flush to be performed before execution resumes on each hart. This 61 * actually performs that local instruction cache flush, which implicitly only 62 * refers to the current hart. 63 */ 64 static inline void flush_icache_deferred(struct mm_struct *mm) 65 { 66 #ifdef CONFIG_SMP 67 unsigned int cpu = smp_processor_id(); 68 cpumask_t *mask = &mm->context.icache_stale_mask; 69 70 if (cpumask_test_cpu(cpu, mask)) { 71 cpumask_clear_cpu(cpu, mask); 72 /* 73 * Ensure the remote hart's writes are visible to this hart. 74 * This pairs with a barrier in flush_icache_mm. 75 */ 76 smp_mb(); 77 local_flush_icache_all(); 78 } 79 #endif 80 } 81 82 static inline void switch_mm(struct mm_struct *prev, 83 struct mm_struct *next, struct task_struct *task) 84 { 85 if (likely(prev != next)) { 86 /* 87 * Mark the current MM context as inactive, and the next as 88 * active. This is at least used by the icache flushing 89 * routines in order to determine who should 90 */ 91 unsigned int cpu = smp_processor_id(); 92 93 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 94 cpumask_set_cpu(cpu, mm_cpumask(next)); 95 96 set_pgdir(next->pgd); 97 local_flush_tlb_all(); 98 99 flush_icache_deferred(next); 100 } 101 } 102 103 static inline void activate_mm(struct mm_struct *prev, 104 struct mm_struct *next) 105 { 106 switch_mm(prev, next, NULL); 107 } 108 109 static inline void deactivate_mm(struct task_struct *task, 110 struct mm_struct *mm) 111 { 112 } 113 114 #endif /* _ASM_RISCV_MMU_CONTEXT_H */ 115