1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2014 Darius Rad <darius@bluespec.com> 5 * Copyright (C) 2017 SiFive 6 */ 7 8 #include <linux/syscalls.h> 9 #include <asm/cacheflush.h> 10 #include <asm/cpufeature.h> 11 #include <asm/hwprobe.h> 12 #include <asm/sbi.h> 13 #include <asm/switch_to.h> 14 #include <asm/uaccess.h> 15 #include <asm/unistd.h> 16 #include <asm-generic/mman-common.h> 17 #include <vdso/vsyscall.h> 18 19 static long riscv_sys_mmap(unsigned long addr, unsigned long len, 20 unsigned long prot, unsigned long flags, 21 unsigned long fd, off_t offset, 22 unsigned long page_shift_offset) 23 { 24 if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) 25 return -EINVAL; 26 27 return ksys_mmap_pgoff(addr, len, prot, flags, fd, 28 offset >> (PAGE_SHIFT - page_shift_offset)); 29 } 30 31 #ifdef CONFIG_64BIT 32 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, 33 unsigned long, prot, unsigned long, flags, 34 unsigned long, fd, off_t, offset) 35 { 36 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0); 37 } 38 #endif 39 40 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 41 SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, 42 unsigned long, prot, unsigned long, flags, 43 unsigned long, fd, off_t, offset) 44 { 45 /* 46 * Note that the shift for mmap2 is constant (12), 47 * regardless of PAGE_SIZE 48 */ 49 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12); 50 } 51 #endif 52 53 /* 54 * Allows the instruction cache to be flushed from userspace. Despite RISC-V 55 * having a direct 'fence.i' instruction available to userspace (which we 56 * can't trap!), that's not actually viable when running on Linux because the 57 * kernel might schedule a process on another hart. There is no way for 58 * userspace to handle this without invoking the kernel (as it doesn't know the 59 * thread->hart mappings), so we've defined a RISC-V specific system call to 60 * flush the instruction cache. 61 * 62 * sys_riscv_flush_icache() is defined to flush the instruction cache over an 63 * address range, with the flush applying to either all threads or just the 64 * caller. We don't currently do anything with the address range, that's just 65 * in there for forwards compatibility. 66 */ 67 SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, 68 uintptr_t, flags) 69 { 70 /* Check the reserved flags. */ 71 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) 72 return -EINVAL; 73 74 flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); 75 76 return 0; 77 } 78 79 /* 80 * The hwprobe interface, for allowing userspace to probe to see which features 81 * are supported by the hardware. See Documentation/riscv/hwprobe.rst for more 82 * details. 83 */ 84 static void hwprobe_arch_id(struct riscv_hwprobe *pair, 85 const struct cpumask *cpus) 86 { 87 u64 id = -1ULL; 88 bool first = true; 89 int cpu; 90 91 for_each_cpu(cpu, cpus) { 92 u64 cpu_id; 93 94 switch (pair->key) { 95 case RISCV_HWPROBE_KEY_MVENDORID: 96 cpu_id = riscv_cached_mvendorid(cpu); 97 break; 98 case RISCV_HWPROBE_KEY_MIMPID: 99 cpu_id = riscv_cached_mimpid(cpu); 100 break; 101 case RISCV_HWPROBE_KEY_MARCHID: 102 cpu_id = riscv_cached_marchid(cpu); 103 break; 104 } 105 106 if (first) { 107 id = cpu_id; 108 first = false; 109 } 110 111 /* 112 * If there's a mismatch for the given set, return -1 in the 113 * value. 114 */ 115 if (id != cpu_id) { 116 id = -1ULL; 117 break; 118 } 119 } 120 121 pair->value = id; 122 } 123 124 static u64 hwprobe_misaligned(const struct cpumask *cpus) 125 { 126 int cpu; 127 u64 perf = -1ULL; 128 129 for_each_cpu(cpu, cpus) { 130 int this_perf = per_cpu(misaligned_access_speed, cpu); 131 132 if (perf == -1ULL) 133 perf = this_perf; 134 135 if (perf != this_perf) { 136 perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; 137 break; 138 } 139 } 140 141 if (perf == -1ULL) 142 return RISCV_HWPROBE_MISALIGNED_UNKNOWN; 143 144 return perf; 145 } 146 147 static void hwprobe_one_pair(struct riscv_hwprobe *pair, 148 const struct cpumask *cpus) 149 { 150 switch (pair->key) { 151 case RISCV_HWPROBE_KEY_MVENDORID: 152 case RISCV_HWPROBE_KEY_MARCHID: 153 case RISCV_HWPROBE_KEY_MIMPID: 154 hwprobe_arch_id(pair, cpus); 155 break; 156 /* 157 * The kernel already assumes that the base single-letter ISA 158 * extensions are supported on all harts, and only supports the 159 * IMA base, so just cheat a bit here and tell that to 160 * userspace. 161 */ 162 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: 163 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; 164 break; 165 166 case RISCV_HWPROBE_KEY_IMA_EXT_0: 167 pair->value = 0; 168 if (has_fpu()) 169 pair->value |= RISCV_HWPROBE_IMA_FD; 170 171 if (riscv_isa_extension_available(NULL, c)) 172 pair->value |= RISCV_HWPROBE_IMA_C; 173 174 break; 175 176 case RISCV_HWPROBE_KEY_CPUPERF_0: 177 pair->value = hwprobe_misaligned(cpus); 178 break; 179 180 /* 181 * For forward compatibility, unknown keys don't fail the whole 182 * call, but get their element key set to -1 and value set to 0 183 * indicating they're unrecognized. 184 */ 185 default: 186 pair->key = -1; 187 pair->value = 0; 188 break; 189 } 190 } 191 192 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, 193 size_t pair_count, size_t cpu_count, 194 unsigned long __user *cpus_user, 195 unsigned int flags) 196 { 197 size_t out; 198 int ret; 199 cpumask_t cpus; 200 201 /* Check the reserved flags. */ 202 if (flags != 0) 203 return -EINVAL; 204 205 /* 206 * The interface supports taking in a CPU mask, and returns values that 207 * are consistent across that mask. Allow userspace to specify NULL and 208 * 0 as a shortcut to all online CPUs. 209 */ 210 cpumask_clear(&cpus); 211 if (!cpu_count && !cpus_user) { 212 cpumask_copy(&cpus, cpu_online_mask); 213 } else { 214 if (cpu_count > cpumask_size()) 215 cpu_count = cpumask_size(); 216 217 ret = copy_from_user(&cpus, cpus_user, cpu_count); 218 if (ret) 219 return -EFAULT; 220 221 /* 222 * Userspace must provide at least one online CPU, without that 223 * there's no way to define what is supported. 224 */ 225 cpumask_and(&cpus, &cpus, cpu_online_mask); 226 if (cpumask_empty(&cpus)) 227 return -EINVAL; 228 } 229 230 for (out = 0; out < pair_count; out++, pairs++) { 231 struct riscv_hwprobe pair; 232 233 if (get_user(pair.key, &pairs->key)) 234 return -EFAULT; 235 236 pair.value = 0; 237 hwprobe_one_pair(&pair, &cpus); 238 ret = put_user(pair.key, &pairs->key); 239 if (ret == 0) 240 ret = put_user(pair.value, &pairs->value); 241 242 if (ret) 243 return -EFAULT; 244 } 245 246 return 0; 247 } 248 249 #ifdef CONFIG_MMU 250 251 static int __init init_hwprobe_vdso_data(void) 252 { 253 struct vdso_data *vd = __arch_get_k_vdso_data(); 254 struct arch_vdso_data *avd = &vd->arch_data; 255 u64 id_bitsmash = 0; 256 struct riscv_hwprobe pair; 257 int key; 258 259 /* 260 * Initialize vDSO data with the answers for the "all CPUs" case, to 261 * save a syscall in the common case. 262 */ 263 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { 264 pair.key = key; 265 hwprobe_one_pair(&pair, cpu_online_mask); 266 267 WARN_ON_ONCE(pair.key < 0); 268 269 avd->all_cpu_hwprobe_values[key] = pair.value; 270 /* 271 * Smash together the vendor, arch, and impl IDs to see if 272 * they're all 0 or any negative. 273 */ 274 if (key <= RISCV_HWPROBE_KEY_MIMPID) 275 id_bitsmash |= pair.value; 276 } 277 278 /* 279 * If the arch, vendor, and implementation ID are all the same across 280 * all harts, then assume all CPUs are the same, and allow the vDSO to 281 * answer queries for arbitrary masks. However if all values are 0 (not 282 * populated) or any value returns -1 (varies across CPUs), then the 283 * vDSO should defer to the kernel for exotic cpu masks. 284 */ 285 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; 286 return 0; 287 } 288 289 arch_initcall_sync(init_hwprobe_vdso_data); 290 291 #endif /* CONFIG_MMU */ 292 293 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, 294 size_t, pair_count, size_t, cpu_count, unsigned long __user *, 295 cpus, unsigned int, flags) 296 { 297 return do_riscv_hwprobe(pairs, pair_count, cpu_count, 298 cpus, flags); 299 } 300