150acfb2bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
208f051edSAndrew Waterman /*
308f051edSAndrew Waterman * Copyright (C) 2017 SiFive
408f051edSAndrew Waterman */
508f051edSAndrew Waterman
65c20a3a9SAndrew Jones #include <linux/of.h>
708f051edSAndrew Waterman #include <asm/cacheflush.h>
808f051edSAndrew Waterman
958de7754SGary Guo #ifdef CONFIG_SMP
1058de7754SGary Guo
1158de7754SGary Guo #include <asm/sbi.h>
1258de7754SGary Guo
ipi_remote_fence_i(void * info)138bf90f32SChristoph Hellwig static void ipi_remote_fence_i(void *info)
148bf90f32SChristoph Hellwig {
158bf90f32SChristoph Hellwig return local_flush_icache_all();
168bf90f32SChristoph Hellwig }
178bf90f32SChristoph Hellwig
flush_icache_all(void)1858de7754SGary Guo void flush_icache_all(void)
1958de7754SGary Guo {
20bb8958d5SAlexandre Ghiti local_flush_icache_all();
21bb8958d5SAlexandre Ghiti
2262792284SAnup Patel if (IS_ENABLED(CONFIG_RISCV_SBI) && !riscv_use_ipi_for_rfence())
2358de7754SGary Guo sbi_remote_fence_i(NULL);
248bf90f32SChristoph Hellwig else
258bf90f32SChristoph Hellwig on_each_cpu(ipi_remote_fence_i, NULL, 1);
2658de7754SGary Guo }
271833e327SOlof Johansson EXPORT_SYMBOL(flush_icache_all);
2858de7754SGary Guo
2958de7754SGary Guo /*
3058de7754SGary Guo * Performs an icache flush for the given MM context. RISC-V has no direct
3158de7754SGary Guo * mechanism for instruction cache shoot downs, so instead we send an IPI that
3258de7754SGary Guo * informs the remote harts they need to flush their local instruction caches.
3358de7754SGary Guo * To avoid pathologically slow behavior in a common case (a bunch of
3458de7754SGary Guo * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
3558de7754SGary Guo * IPIs for harts that are not currently executing a MM context and instead
3658de7754SGary Guo * schedule a deferred local instruction cache flush to be performed before
3758de7754SGary Guo * execution resumes on each hart.
3858de7754SGary Guo */
flush_icache_mm(struct mm_struct * mm,bool local)3958de7754SGary Guo void flush_icache_mm(struct mm_struct *mm, bool local)
4058de7754SGary Guo {
4158de7754SGary Guo unsigned int cpu;
428bf90f32SChristoph Hellwig cpumask_t others, *mask;
4358de7754SGary Guo
4458de7754SGary Guo preempt_disable();
4558de7754SGary Guo
4658de7754SGary Guo /* Mark every hart's icache as needing a flush for this MM. */
4758de7754SGary Guo mask = &mm->context.icache_stale_mask;
4858de7754SGary Guo cpumask_setall(mask);
4958de7754SGary Guo /* Flush this hart's I$ now, and mark it as flushed. */
5058de7754SGary Guo cpu = smp_processor_id();
5158de7754SGary Guo cpumask_clear_cpu(cpu, mask);
5258de7754SGary Guo local_flush_icache_all();
5358de7754SGary Guo
5458de7754SGary Guo /*
5558de7754SGary Guo * Flush the I$ of other harts concurrently executing, and mark them as
5658de7754SGary Guo * flushed.
5758de7754SGary Guo */
5858de7754SGary Guo cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
5958de7754SGary Guo local |= cpumask_empty(&others);
608bf90f32SChristoph Hellwig if (mm == current->active_mm && local) {
6158de7754SGary Guo /*
6258de7754SGary Guo * It's assumed that at least one strongly ordered operation is
6358de7754SGary Guo * performed on this hart between setting a hart's cpumask bit
6458de7754SGary Guo * and scheduling this MM context on that hart. Sending an SBI
6558de7754SGary Guo * remote message will do this, but in the case where no
6658de7754SGary Guo * messages are sent we still need to order this hart's writes
6758de7754SGary Guo * with flush_icache_deferred().
6858de7754SGary Guo */
6958de7754SGary Guo smp_mb();
7062792284SAnup Patel } else if (IS_ENABLED(CONFIG_RISCV_SBI) &&
7162792284SAnup Patel !riscv_use_ipi_for_rfence()) {
7226fb751cSAtish Patra sbi_remote_fence_i(&others);
738bf90f32SChristoph Hellwig } else {
748bf90f32SChristoph Hellwig on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
7558de7754SGary Guo }
7658de7754SGary Guo
7758de7754SGary Guo preempt_enable();
7858de7754SGary Guo }
7958de7754SGary Guo
8058de7754SGary Guo #endif /* CONFIG_SMP */
8158de7754SGary Guo
826bd33e1eSChristoph Hellwig #ifdef CONFIG_MMU
flush_icache_pte(pte_t pte)8308f051edSAndrew Waterman void flush_icache_pte(pte_t pte)
8408f051edSAndrew Waterman {
85*864609c6SMatthew Wilcox (Oracle) struct folio *folio = page_folio(pte_page(pte));
8608f051edSAndrew Waterman
87*864609c6SMatthew Wilcox (Oracle) if (!test_bit(PG_dcache_clean, &folio->flags)) {
8808f051edSAndrew Waterman flush_icache_all();
89*864609c6SMatthew Wilcox (Oracle) set_bit(PG_dcache_clean, &folio->flags);
90950b879bSGuo Ren }
9108f051edSAndrew Waterman }
926bd33e1eSChristoph Hellwig #endif /* CONFIG_MMU */
935c20a3a9SAndrew Jones
945c20a3a9SAndrew Jones unsigned int riscv_cbom_block_size;
955c20a3a9SAndrew Jones EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
965c20a3a9SAndrew Jones
977ea5a736SAndrew Jones unsigned int riscv_cboz_block_size;
987ea5a736SAndrew Jones EXPORT_SYMBOL_GPL(riscv_cboz_block_size);
997ea5a736SAndrew Jones
cbo_get_block_size(struct device_node * node,const char * name,u32 * block_size,unsigned long * first_hartid)1003b472f86SJisheng Zhang static void __init cbo_get_block_size(struct device_node *node,
1018b05e7d0SAndrew Jones const char *name, u32 *block_size,
1028b05e7d0SAndrew Jones unsigned long *first_hartid)
1035c20a3a9SAndrew Jones {
1045c20a3a9SAndrew Jones unsigned long hartid;
1058b05e7d0SAndrew Jones u32 val;
1065c20a3a9SAndrew Jones
1078b05e7d0SAndrew Jones if (riscv_of_processor_hartid(node, &hartid))
1088b05e7d0SAndrew Jones return;
1095c20a3a9SAndrew Jones
1108b05e7d0SAndrew Jones if (of_property_read_u32(node, name, &val))
1118b05e7d0SAndrew Jones return;
1125c20a3a9SAndrew Jones
1138b05e7d0SAndrew Jones if (!*block_size) {
1148b05e7d0SAndrew Jones *block_size = val;
1158b05e7d0SAndrew Jones *first_hartid = hartid;
1168b05e7d0SAndrew Jones } else if (*block_size != val) {
1178b05e7d0SAndrew Jones pr_warn("%s mismatched between harts %lu and %lu\n",
1188b05e7d0SAndrew Jones name, *first_hartid, hartid);
1195c20a3a9SAndrew Jones }
1205c20a3a9SAndrew Jones }
1215c20a3a9SAndrew Jones
riscv_init_cbo_blocksizes(void)1223b472f86SJisheng Zhang void __init riscv_init_cbo_blocksizes(void)
1235c20a3a9SAndrew Jones {
1247ea5a736SAndrew Jones unsigned long cbom_hartid, cboz_hartid;
1257ea5a736SAndrew Jones u32 cbom_block_size = 0, cboz_block_size = 0;
1265c20a3a9SAndrew Jones struct device_node *node;
1275c20a3a9SAndrew Jones
1285c20a3a9SAndrew Jones for_each_of_cpu_node(node) {
1297ea5a736SAndrew Jones /* set block-size for cbom and/or cboz extension if available */
1308b05e7d0SAndrew Jones cbo_get_block_size(node, "riscv,cbom-block-size",
1317ea5a736SAndrew Jones &cbom_block_size, &cbom_hartid);
1327ea5a736SAndrew Jones cbo_get_block_size(node, "riscv,cboz-block-size",
1337ea5a736SAndrew Jones &cboz_block_size, &cboz_hartid);
1345c20a3a9SAndrew Jones }
1355c20a3a9SAndrew Jones
1367ea5a736SAndrew Jones if (cbom_block_size)
1377ea5a736SAndrew Jones riscv_cbom_block_size = cbom_block_size;
1387ea5a736SAndrew Jones
1397ea5a736SAndrew Jones if (cboz_block_size)
1407ea5a736SAndrew Jones riscv_cboz_block_size = cboz_block_size;
1415c20a3a9SAndrew Jones }
142