1 #include <linux/sched.h> 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/mm.h> 5 #include <linux/nmi.h> 6 #include <linux/swap.h> 7 #include <linux/smp.h> 8 #include <linux/highmem.h> 9 #include <linux/pagemap.h> 10 #include <linux/spinlock.h> 11 #include <linux/module.h> 12 13 #include <asm/system.h> 14 #include <asm/pgtable.h> 15 #include <asm/pgalloc.h> 16 #include <asm/fixmap.h> 17 #include <asm/e820.h> 18 #include <asm/tlb.h> 19 #include <asm/tlbflush.h> 20 #include <asm/io.h> 21 22 unsigned int __VMALLOC_RESERVE = 128 << 20; 23 24 /* 25 * Associate a virtual page frame with a given physical page frame 26 * and protection flags for that frame. 27 */ 28 void set_pte_vaddr(unsigned long vaddr, pte_t pteval) 29 { 30 pgd_t *pgd; 31 pud_t *pud; 32 pmd_t *pmd; 33 pte_t *pte; 34 35 pgd = swapper_pg_dir + pgd_index(vaddr); 36 if (pgd_none(*pgd)) { 37 BUG(); 38 return; 39 } 40 pud = pud_offset(pgd, vaddr); 41 if (pud_none(*pud)) { 42 BUG(); 43 return; 44 } 45 pmd = pmd_offset(pud, vaddr); 46 if (pmd_none(*pmd)) { 47 BUG(); 48 return; 49 } 50 pte = pte_offset_kernel(pmd, vaddr); 51 if (pte_val(pteval)) 52 set_pte_at(&init_mm, vaddr, pte, pteval); 53 else 54 pte_clear(&init_mm, vaddr, pte); 55 56 /* 57 * It's enough to flush this one mapping. 58 * (PGE mappings get flushed as well) 59 */ 60 __flush_tlb_one(vaddr); 61 } 62 63 /* 64 * Associate a large virtual page frame with a given physical page frame 65 * and protection flags for that frame. pfn is for the base of the page, 66 * vaddr is what the page gets mapped to - both must be properly aligned. 67 * The pmd must already be instantiated. Assumes PAE mode. 68 */ 69 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) 70 { 71 pgd_t *pgd; 72 pud_t *pud; 73 pmd_t *pmd; 74 75 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ 76 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); 77 return; /* BUG(); */ 78 } 79 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ 80 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); 81 return; /* BUG(); */ 82 } 83 pgd = swapper_pg_dir + pgd_index(vaddr); 84 if (pgd_none(*pgd)) { 85 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); 86 return; /* BUG(); */ 87 } 88 pud = pud_offset(pgd, vaddr); 89 pmd = pmd_offset(pud, vaddr); 90 set_pmd(pmd, pfn_pmd(pfn, flags)); 91 /* 92 * It's enough to flush this one mapping. 93 * (PGE mappings get flushed as well) 94 */ 95 __flush_tlb_one(vaddr); 96 } 97 98 unsigned long __FIXADDR_TOP = 0xfffff000; 99 EXPORT_SYMBOL(__FIXADDR_TOP); 100 101 /* 102 * vmalloc=size forces the vmalloc area to be exactly 'size' 103 * bytes. This can be used to increase (or decrease) the 104 * vmalloc area - the default is 128m. 105 */ 106 static int __init parse_vmalloc(char *arg) 107 { 108 if (!arg) 109 return -EINVAL; 110 111 /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ 112 __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET; 113 return 0; 114 } 115 early_param("vmalloc", parse_vmalloc); 116 117 /* 118 * reservetop=size reserves a hole at the top of the kernel address space which 119 * a hypervisor can load into later. Needed for dynamically loaded hypervisors, 120 * so relocating the fixmap can be done before paging initialization. 121 */ 122 static int __init parse_reservetop(char *arg) 123 { 124 unsigned long address; 125 126 if (!arg) 127 return -EINVAL; 128 129 address = memparse(arg, &arg); 130 reserve_top_address(address); 131 fixup_early_ioremap(); 132 return 0; 133 } 134 early_param("reservetop", parse_reservetop); 135