1 #include <linux/sched.h> 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/mm.h> 5 #include <linux/nmi.h> 6 #include <linux/swap.h> 7 #include <linux/smp.h> 8 #include <linux/highmem.h> 9 #include <linux/pagemap.h> 10 #include <linux/spinlock.h> 11 12 #include <asm/pgtable.h> 13 #include <asm/pgalloc.h> 14 #include <asm/fixmap.h> 15 #include <asm/e820/api.h> 16 #include <asm/tlb.h> 17 #include <asm/tlbflush.h> 18 #include <asm/io.h> 19 20 unsigned int __VMALLOC_RESERVE = 128 << 20; 21 22 /* 23 * Associate a virtual page frame with a given physical page frame 24 * and protection flags for that frame. 25 */ 26 void set_pte_vaddr(unsigned long vaddr, pte_t pteval) 27 { 28 pgd_t *pgd; 29 p4d_t *p4d; 30 pud_t *pud; 31 pmd_t *pmd; 32 pte_t *pte; 33 34 pgd = swapper_pg_dir + pgd_index(vaddr); 35 if (pgd_none(*pgd)) { 36 BUG(); 37 return; 38 } 39 p4d = p4d_offset(pgd, vaddr); 40 if (p4d_none(*p4d)) { 41 BUG(); 42 return; 43 } 44 pud = pud_offset(p4d, vaddr); 45 if (pud_none(*pud)) { 46 BUG(); 47 return; 48 } 49 pmd = pmd_offset(pud, vaddr); 50 if (pmd_none(*pmd)) { 51 BUG(); 52 return; 53 } 54 pte = pte_offset_kernel(pmd, vaddr); 55 if (!pte_none(pteval)) 56 set_pte_at(&init_mm, vaddr, pte, pteval); 57 else 58 pte_clear(&init_mm, vaddr, pte); 59 60 /* 61 * It's enough to flush this one mapping. 62 * (PGE mappings get flushed as well) 63 */ 64 __flush_tlb_one(vaddr); 65 } 66 67 unsigned long __FIXADDR_TOP = 0xfffff000; 68 EXPORT_SYMBOL(__FIXADDR_TOP); 69 70 /* 71 * vmalloc=size forces the vmalloc area to be exactly 'size' 72 * bytes. This can be used to increase (or decrease) the 73 * vmalloc area - the default is 128m. 74 */ 75 static int __init parse_vmalloc(char *arg) 76 { 77 if (!arg) 78 return -EINVAL; 79 80 /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ 81 __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET; 82 return 0; 83 } 84 early_param("vmalloc", parse_vmalloc); 85 86 /* 87 * reservetop=size reserves a hole at the top of the kernel address space which 88 * a hypervisor can load into later. Needed for dynamically loaded hypervisors, 89 * so relocating the fixmap can be done before paging initialization. 90 */ 91 static int __init parse_reservetop(char *arg) 92 { 93 unsigned long address; 94 95 if (!arg) 96 return -EINVAL; 97 98 address = memparse(arg, &arg); 99 reserve_top_address(address); 100 early_ioremap_init(); 101 return 0; 102 } 103 early_param("reservetop", parse_reservetop); 104