1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017 SiFive 4 */ 5 6 #include <linux/of.h> 7 #include <asm/cacheflush.h> 8 9 #ifdef CONFIG_SMP 10 11 #include <asm/sbi.h> 12 13 static void ipi_remote_fence_i(void *info) 14 { 15 return local_flush_icache_all(); 16 } 17 18 void flush_icache_all(void) 19 { 20 local_flush_icache_all(); 21 22 if (IS_ENABLED(CONFIG_RISCV_SBI) && !riscv_use_ipi_for_rfence()) 23 sbi_remote_fence_i(NULL); 24 else 25 on_each_cpu(ipi_remote_fence_i, NULL, 1); 26 } 27 EXPORT_SYMBOL(flush_icache_all); 28 29 /* 30 * Performs an icache flush for the given MM context. RISC-V has no direct 31 * mechanism for instruction cache shoot downs, so instead we send an IPI that 32 * informs the remote harts they need to flush their local instruction caches. 33 * To avoid pathologically slow behavior in a common case (a bunch of 34 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the 35 * IPIs for harts that are not currently executing a MM context and instead 36 * schedule a deferred local instruction cache flush to be performed before 37 * execution resumes on each hart. 38 */ 39 void flush_icache_mm(struct mm_struct *mm, bool local) 40 { 41 unsigned int cpu; 42 cpumask_t others, *mask; 43 44 preempt_disable(); 45 46 /* Mark every hart's icache as needing a flush for this MM. */ 47 mask = &mm->context.icache_stale_mask; 48 cpumask_setall(mask); 49 /* Flush this hart's I$ now, and mark it as flushed. */ 50 cpu = smp_processor_id(); 51 cpumask_clear_cpu(cpu, mask); 52 local_flush_icache_all(); 53 54 /* 55 * Flush the I$ of other harts concurrently executing, and mark them as 56 * flushed. 57 */ 58 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); 59 local |= cpumask_empty(&others); 60 if (mm == current->active_mm && local) { 61 /* 62 * It's assumed that at least one strongly ordered operation is 63 * performed on this hart between setting a hart's cpumask bit 64 * and scheduling this MM context on that hart. Sending an SBI 65 * remote message will do this, but in the case where no 66 * messages are sent we still need to order this hart's writes 67 * with flush_icache_deferred(). 68 */ 69 smp_mb(); 70 } else if (IS_ENABLED(CONFIG_RISCV_SBI) && 71 !riscv_use_ipi_for_rfence()) { 72 sbi_remote_fence_i(&others); 73 } else { 74 on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); 75 } 76 77 preempt_enable(); 78 } 79 80 #endif /* CONFIG_SMP */ 81 82 #ifdef CONFIG_MMU 83 void flush_icache_pte(pte_t pte) 84 { 85 struct page *page = pte_page(pte); 86 87 /* 88 * HugeTLB pages are always fully mapped, so only setting head page's 89 * PG_dcache_clean flag is enough. 90 */ 91 if (PageHuge(page)) 92 page = compound_head(page); 93 94 if (!test_bit(PG_dcache_clean, &page->flags)) { 95 flush_icache_all(); 96 set_bit(PG_dcache_clean, &page->flags); 97 } 98 } 99 #endif /* CONFIG_MMU */ 100 101 unsigned int riscv_cbom_block_size; 102 EXPORT_SYMBOL_GPL(riscv_cbom_block_size); 103 104 void riscv_init_cbom_blocksize(void) 105 { 106 struct device_node *node; 107 unsigned long cbom_hartid; 108 u32 val, probed_block_size; 109 int ret; 110 111 probed_block_size = 0; 112 for_each_of_cpu_node(node) { 113 unsigned long hartid; 114 115 ret = riscv_of_processor_hartid(node, &hartid); 116 if (ret) 117 continue; 118 119 /* set block-size for cbom extension if available */ 120 ret = of_property_read_u32(node, "riscv,cbom-block-size", &val); 121 if (ret) 122 continue; 123 124 if (!probed_block_size) { 125 probed_block_size = val; 126 cbom_hartid = hartid; 127 } else { 128 if (probed_block_size != val) 129 pr_warn("cbom-block-size mismatched between harts %lu and %lu\n", 130 cbom_hartid, hartid); 131 } 132 } 133 134 if (probed_block_size) 135 riscv_cbom_block_size = probed_block_size; 136 } 137