1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * BIOS run time interface routines. 4 * 5 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) Russ Anderson <rja@sgi.com> 7 */ 8 9 #include <linux/efi.h> 10 #include <linux/export.h> 11 #include <linux/slab.h> 12 #include <asm/efi.h> 13 #include <linux/io.h> 14 #include <asm/uv/bios.h> 15 #include <asm/uv/uv_hub.h> 16 17 unsigned long uv_systab_phys __ro_after_init = EFI_INVALID_TABLE_ADDR; 18 19 struct uv_systab *uv_systab; 20 21 static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, 22 u64 a4, u64 a5) 23 { 24 struct uv_systab *tab = uv_systab; 25 s64 ret; 26 27 if (!tab || !tab->function) 28 /* 29 * BIOS does not support UV systab 30 */ 31 return BIOS_STATUS_UNIMPLEMENTED; 32 33 /* 34 * If EFI_UV1_MEMMAP is set, we need to fall back to using our old EFI 35 * callback method, which uses efi_call() directly, with the kernel page tables: 36 */ 37 if (unlikely(efi_enabled(EFI_UV1_MEMMAP))) { 38 kernel_fpu_begin(); 39 ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5); 40 kernel_fpu_end(); 41 } else { 42 ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5); 43 } 44 45 return ret; 46 } 47 48 s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) 49 { 50 s64 ret; 51 52 if (down_interruptible(&__efi_uv_runtime_lock)) 53 return BIOS_STATUS_ABORT; 54 55 ret = __uv_bios_call(which, a1, a2, a3, a4, a5); 56 up(&__efi_uv_runtime_lock); 57 58 return ret; 59 } 60 EXPORT_SYMBOL_GPL(uv_bios_call); 61 62 s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, 63 u64 a4, u64 a5) 64 { 65 unsigned long bios_flags; 66 s64 ret; 67 68 if (down_interruptible(&__efi_uv_runtime_lock)) 69 return BIOS_STATUS_ABORT; 70 71 local_irq_save(bios_flags); 72 ret = __uv_bios_call(which, a1, a2, a3, a4, a5); 73 local_irq_restore(bios_flags); 74 75 up(&__efi_uv_runtime_lock); 76 77 return ret; 78 } 79 80 81 long sn_partition_id; 82 EXPORT_SYMBOL_GPL(sn_partition_id); 83 long sn_coherency_id; 84 EXPORT_SYMBOL_GPL(sn_coherency_id); 85 long sn_region_size; 86 EXPORT_SYMBOL_GPL(sn_region_size); 87 long system_serial_number; 88 EXPORT_SYMBOL_GPL(system_serial_number); 89 int uv_type; 90 EXPORT_SYMBOL_GPL(uv_type); 91 92 93 s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher, 94 long *region, long *ssn) 95 { 96 s64 ret; 97 u64 v0, v1; 98 union partition_info_u part; 99 100 ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc, 101 (u64)(&v0), (u64)(&v1), 0, 0); 102 if (ret != BIOS_STATUS_SUCCESS) 103 return ret; 104 105 part.val = v0; 106 if (uvtype) 107 *uvtype = part.hub_version; 108 if (partid) 109 *partid = part.partition_id; 110 if (coher) 111 *coher = part.coherence_id; 112 if (region) 113 *region = part.region_size; 114 if (ssn) 115 *ssn = v1; 116 return ret; 117 } 118 EXPORT_SYMBOL_GPL(uv_bios_get_sn_info); 119 120 int 121 uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size, 122 unsigned long *intr_mmr_offset) 123 { 124 u64 watchlist; 125 s64 ret; 126 127 /* 128 * bios returns watchlist number or negative error number. 129 */ 130 ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr, 131 mq_size, (u64)intr_mmr_offset, 132 (u64)&watchlist, 0); 133 if (ret < BIOS_STATUS_SUCCESS) 134 return ret; 135 136 return watchlist; 137 } 138 EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc); 139 140 int 141 uv_bios_mq_watchlist_free(int blade, int watchlist_num) 142 { 143 return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE, 144 blade, watchlist_num, 0, 0, 0); 145 } 146 EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free); 147 148 s64 149 uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms) 150 { 151 return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len, 152 perms, 0, 0); 153 } 154 EXPORT_SYMBOL_GPL(uv_bios_change_memprotect); 155 156 s64 157 uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len) 158 { 159 return uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie, 160 (u64)addr, buf, (u64)len, 0); 161 } 162 EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa); 163 164 s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second) 165 { 166 return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type, 167 (u64)ticks_per_second, 0, 0, 0); 168 } 169 EXPORT_SYMBOL_GPL(uv_bios_freq_base); 170 171 /* 172 * uv_bios_set_legacy_vga_target - Set Legacy VGA I/O Target 173 * @decode: true to enable target, false to disable target 174 * @domain: PCI domain number 175 * @bus: PCI bus number 176 * 177 * Returns: 178 * 0: Success 179 * -EINVAL: Invalid domain or bus number 180 * -ENOSYS: Capability not available 181 * -EBUSY: Legacy VGA I/O cannot be retargeted at this time 182 */ 183 int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus) 184 { 185 return uv_bios_call(UV_BIOS_SET_LEGACY_VGA_TARGET, 186 (u64)decode, (u64)domain, (u64)bus, 0, 0); 187 } 188 EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target); 189 190 int uv_bios_init(void) 191 { 192 uv_systab = NULL; 193 if ((uv_systab_phys == EFI_INVALID_TABLE_ADDR) || 194 !uv_systab_phys || efi_runtime_disabled()) { 195 pr_crit("UV: UVsystab: missing\n"); 196 return -EEXIST; 197 } 198 199 uv_systab = ioremap(uv_systab_phys, sizeof(struct uv_systab)); 200 if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) { 201 pr_err("UV: UVsystab: bad signature!\n"); 202 iounmap(uv_systab); 203 return -EINVAL; 204 } 205 206 /* Starting with UV4 the UV systab size is variable */ 207 if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) { 208 int size = uv_systab->size; 209 210 iounmap(uv_systab); 211 uv_systab = ioremap(uv_systab_phys, size); 212 if (!uv_systab) { 213 pr_err("UV: UVsystab: ioremap(%d) failed!\n", size); 214 return -EFAULT; 215 } 216 } 217 pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision); 218 return 0; 219 } 220 221 static void __init early_code_mapping_set_exec(int executable) 222 { 223 efi_memory_desc_t *md; 224 225 if (!(__supported_pte_mask & _PAGE_NX)) 226 return; 227 228 /* Make EFI service code area executable */ 229 for_each_efi_memory_desc(md) { 230 if (md->type == EFI_RUNTIME_SERVICES_CODE || 231 md->type == EFI_BOOT_SERVICES_CODE) 232 efi_set_executable(md, executable); 233 } 234 } 235 236 void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd) 237 { 238 /* 239 * After the lock is released, the original page table is restored. 240 */ 241 int pgd_idx, i; 242 int nr_pgds; 243 pgd_t *pgd; 244 p4d_t *p4d; 245 pud_t *pud; 246 247 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 248 249 for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) { 250 pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); 251 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); 252 253 if (!pgd_present(*pgd)) 254 continue; 255 256 for (i = 0; i < PTRS_PER_P4D; i++) { 257 p4d = p4d_offset(pgd, 258 pgd_idx * PGDIR_SIZE + i * P4D_SIZE); 259 260 if (!p4d_present(*p4d)) 261 continue; 262 263 pud = (pud_t *)p4d_page_vaddr(*p4d); 264 pud_free(&init_mm, pud); 265 } 266 267 p4d = (p4d_t *)pgd_page_vaddr(*pgd); 268 p4d_free(&init_mm, p4d); 269 } 270 271 kfree(save_pgd); 272 273 __flush_tlb_all(); 274 early_code_mapping_set_exec(0); 275 } 276 277 pgd_t * __init efi_uv1_memmap_phys_prolog(void) 278 { 279 unsigned long vaddr, addr_pgd, addr_p4d, addr_pud; 280 pgd_t *save_pgd, *pgd_k, *pgd_efi; 281 p4d_t *p4d, *p4d_k, *p4d_efi; 282 pud_t *pud; 283 284 int pgd; 285 int n_pgds, i, j; 286 287 early_code_mapping_set_exec(1); 288 289 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 290 save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); 291 if (!save_pgd) 292 return NULL; 293 294 /* 295 * Build 1:1 identity mapping for UV1 memmap usage. Note that 296 * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while 297 * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical 298 * address X, the pud_index(X) != pud_index(__va(X)), we can only copy 299 * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping. 300 * This means here we can only reuse the PMD tables of the direct mapping. 301 */ 302 for (pgd = 0; pgd < n_pgds; pgd++) { 303 addr_pgd = (unsigned long)(pgd * PGDIR_SIZE); 304 vaddr = (unsigned long)__va(pgd * PGDIR_SIZE); 305 pgd_efi = pgd_offset_k(addr_pgd); 306 save_pgd[pgd] = *pgd_efi; 307 308 p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd); 309 if (!p4d) { 310 pr_err("Failed to allocate p4d table!\n"); 311 goto out; 312 } 313 314 for (i = 0; i < PTRS_PER_P4D; i++) { 315 addr_p4d = addr_pgd + i * P4D_SIZE; 316 p4d_efi = p4d + p4d_index(addr_p4d); 317 318 pud = pud_alloc(&init_mm, p4d_efi, addr_p4d); 319 if (!pud) { 320 pr_err("Failed to allocate pud table!\n"); 321 goto out; 322 } 323 324 for (j = 0; j < PTRS_PER_PUD; j++) { 325 addr_pud = addr_p4d + j * PUD_SIZE; 326 327 if (addr_pud > (max_pfn << PAGE_SHIFT)) 328 break; 329 330 vaddr = (unsigned long)__va(addr_pud); 331 332 pgd_k = pgd_offset_k(vaddr); 333 p4d_k = p4d_offset(pgd_k, vaddr); 334 pud[j] = *pud_offset(p4d_k, vaddr); 335 } 336 } 337 pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX; 338 } 339 340 __flush_tlb_all(); 341 return save_pgd; 342 out: 343 efi_uv1_memmap_phys_epilog(save_pgd); 344 return NULL; 345 } 346 347 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 348 u32 type, u64 attribute) 349 { 350 unsigned long last_map_pfn; 351 352 if (type == EFI_MEMORY_MAPPED_IO) 353 return ioremap(phys_addr, size); 354 355 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size, 356 PAGE_KERNEL); 357 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 358 unsigned long top = last_map_pfn << PAGE_SHIFT; 359 efi_ioremap(top, size - (top - phys_addr), type, attribute); 360 } 361 362 if (!(attribute & EFI_MEMORY_WB)) 363 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size); 364 365 return (void __iomem *)__va(phys_addr); 366 } 367 368 static int __init arch_parse_efi_cmdline(char *str) 369 { 370 if (!str) { 371 pr_warn("need at least one option\n"); 372 return -EINVAL; 373 } 374 375 if (!efi_is_mixed() && parse_option_str(str, "old_map")) 376 set_bit(EFI_UV1_MEMMAP, &efi.flags); 377 378 return 0; 379 } 380 early_param("efi", arch_parse_efi_cmdline); 381