1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * (C) Copyright 1995 1996 Linus Torvalds 7 * (C) Copyright 2001, 2002 Ralf Baechle 8 */ 9 #include <linux/module.h> 10 #include <asm/addrspace.h> 11 #include <asm/byteorder.h> 12 #include <linux/sched.h> 13 #include <linux/vmalloc.h> 14 #include <asm/cacheflush.h> 15 #include <asm/io.h> 16 #include <asm/tlbflush.h> 17 18 static inline void remap_area_pte(pte_t * pte, unsigned long address, 19 phys_t size, phys_t phys_addr, unsigned long flags) 20 { 21 phys_t end; 22 unsigned long pfn; 23 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE 24 | __WRITEABLE | flags); 25 26 address &= ~PMD_MASK; 27 end = address + size; 28 if (end > PMD_SIZE) 29 end = PMD_SIZE; 30 BUG_ON(address >= end); 31 pfn = phys_addr >> PAGE_SHIFT; 32 do { 33 if (!pte_none(*pte)) { 34 printk("remap_area_pte: page already exists\n"); 35 BUG(); 36 } 37 set_pte(pte, pfn_pte(pfn, pgprot)); 38 address += PAGE_SIZE; 39 pfn++; 40 pte++; 41 } while (address && (address < end)); 42 } 43 44 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, 45 phys_t size, phys_t phys_addr, unsigned long flags) 46 { 47 phys_t end; 48 49 address &= ~PGDIR_MASK; 50 end = address + size; 51 if (end > PGDIR_SIZE) 52 end = PGDIR_SIZE; 53 phys_addr -= address; 54 BUG_ON(address >= end); 55 do { 56 pte_t * pte = pte_alloc_kernel(pmd, address); 57 if (!pte) 58 return -ENOMEM; 59 remap_area_pte(pte, address, end - address, address + phys_addr, flags); 60 address = (address + PMD_SIZE) & PMD_MASK; 61 pmd++; 62 } while (address && (address < end)); 63 return 0; 64 } 65 66 static int remap_area_pages(unsigned long address, phys_t phys_addr, 67 phys_t size, unsigned long flags) 68 { 69 int error; 70 pgd_t * dir; 71 unsigned long end = address + size; 72 73 phys_addr -= address; 74 dir = pgd_offset(&init_mm, address); 75 flush_cache_all(); 76 BUG_ON(address >= end); 77 do { 78 pud_t *pud; 79 pmd_t *pmd; 80 81 error = -ENOMEM; 82 pud = pud_alloc(&init_mm, dir, address); 83 if (!pud) 84 break; 85 pmd = pmd_alloc(&init_mm, pud, address); 86 if (!pmd) 87 break; 88 if (remap_area_pmd(pmd, address, end - address, 89 phys_addr + address, flags)) 90 break; 91 error = 0; 92 address = (address + PGDIR_SIZE) & PGDIR_MASK; 93 dir++; 94 } while (address && (address < end)); 95 flush_tlb_all(); 96 return error; 97 } 98 99 /* 100 * Generic mapping function (not visible outside): 101 */ 102 103 /* 104 * Remap an arbitrary physical address space into the kernel virtual 105 * address space. Needed when the kernel wants to access high addresses 106 * directly. 107 * 108 * NOTE! We need to allow non-page-aligned mappings too: we will obviously 109 * have to convert them into an offset in a page-aligned mapping, but the 110 * caller shouldn't need to know that small detail. 111 */ 112 113 #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) 114 115 void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) 116 { 117 struct vm_struct * area; 118 unsigned long offset; 119 phys_t last_addr; 120 void * addr; 121 122 phys_addr = fixup_bigphys_addr(phys_addr, size); 123 124 /* Don't allow wraparound or zero size */ 125 last_addr = phys_addr + size - 1; 126 if (!size || last_addr < phys_addr) 127 return NULL; 128 129 /* 130 * Map uncached objects in the low 512mb of address space using KSEG1, 131 * otherwise map using page tables. 132 */ 133 if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) && 134 flags == _CACHE_UNCACHED) 135 return (void __iomem *) CKSEG1ADDR(phys_addr); 136 137 /* 138 * Don't allow anybody to remap normal RAM that we're using.. 139 */ 140 if (phys_addr < virt_to_phys(high_memory)) { 141 char *t_addr, *t_end; 142 struct page *page; 143 144 t_addr = __va(phys_addr); 145 t_end = t_addr + (size - 1); 146 147 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) 148 if(!PageReserved(page)) 149 return NULL; 150 } 151 152 /* 153 * Mappings have to be page-aligned 154 */ 155 offset = phys_addr & ~PAGE_MASK; 156 phys_addr &= PAGE_MASK; 157 size = PAGE_ALIGN(last_addr + 1) - phys_addr; 158 159 /* 160 * Ok, go for it.. 161 */ 162 area = get_vm_area(size, VM_IOREMAP); 163 if (!area) 164 return NULL; 165 addr = area->addr; 166 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { 167 vunmap(addr); 168 return NULL; 169 } 170 171 return (void __iomem *) (offset + (char *)addr); 172 } 173 174 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) 175 176 void __iounmap(const volatile void __iomem *addr) 177 { 178 struct vm_struct *p; 179 180 if (IS_KSEG1(addr)) 181 return; 182 183 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); 184 if (!p) 185 printk(KERN_ERR "iounmap: bad address %p\n", addr); 186 187 kfree(p); 188 } 189 190 EXPORT_SYMBOL(__ioremap); 191 EXPORT_SYMBOL(__iounmap); 192