1 /* 2 * This file contains ioremap and related functions for 64-bit machines. 3 * 4 * Derived from arch/ppc64/mm/init.c 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * 7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) 8 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 9 * Copyright (C) 1996 Paul Mackerras 10 * 11 * Derived from "arch/i386/mm/init.c" 12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 * 14 * Dave Engebretsen <engebret@us.ibm.com> 15 * Rework for PPC64 port. 16 * 17 * This program is free software; you can redistribute it and/or 18 * modify it under the terms of the GNU General Public License 19 * as published by the Free Software Foundation; either version 20 * 2 of the License, or (at your option) any later version. 21 * 22 */ 23 24 #include <linux/signal.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/errno.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/mman.h> 31 #include <linux/mm.h> 32 #include <linux/swap.h> 33 #include <linux/stddef.h> 34 #include <linux/vmalloc.h> 35 #include <linux/init.h> 36 37 #include <asm/pgalloc.h> 38 #include <asm/page.h> 39 #include <asm/prom.h> 40 #include <asm/io.h> 41 #include <asm/mmu_context.h> 42 #include <asm/pgtable.h> 43 #include <asm/mmu.h> 44 #include <asm/smp.h> 45 #include <asm/machdep.h> 46 #include <asm/tlb.h> 47 #include <asm/processor.h> 48 #include <asm/cputable.h> 49 #include <asm/sections.h> 50 #include <asm/system.h> 51 #include <asm/abs_addr.h> 52 #include <asm/firmware.h> 53 54 #include "mmu_decl.h" 55 56 unsigned long ioremap_bot = IOREMAP_BASE; 57 58 /* 59 * map_io_page currently only called by __ioremap 60 * map_io_page adds an entry to the ioremap page table 61 * and adds an entry to the HPT, possibly bolting it 62 */ 63 static int map_io_page(unsigned long ea, unsigned long pa, int flags) 64 { 65 pgd_t *pgdp; 66 pud_t *pudp; 67 pmd_t *pmdp; 68 pte_t *ptep; 69 70 if (mem_init_done) { 71 pgdp = pgd_offset_k(ea); 72 pudp = pud_alloc(&init_mm, pgdp, ea); 73 if (!pudp) 74 return -ENOMEM; 75 pmdp = pmd_alloc(&init_mm, pudp, ea); 76 if (!pmdp) 77 return -ENOMEM; 78 ptep = pte_alloc_kernel(pmdp, ea); 79 if (!ptep) 80 return -ENOMEM; 81 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 82 __pgprot(flags))); 83 } else { 84 /* 85 * If the mm subsystem is not fully up, we cannot create a 86 * linux page table entry for this mapping. Simply bolt an 87 * entry in the hardware page table. 88 * 89 */ 90 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, 91 mmu_io_psize, mmu_kernel_ssize)) { 92 printk(KERN_ERR "Failed to do bolted mapping IO " 93 "memory at %016lx !\n", pa); 94 return -ENOMEM; 95 } 96 } 97 return 0; 98 } 99 100 101 /** 102 * __ioremap_at - Low level function to establish the page tables 103 * for an IO mapping 104 */ 105 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, 106 unsigned long flags) 107 { 108 unsigned long i; 109 110 /* Make sure we have the base flags */ 111 if ((flags & _PAGE_PRESENT) == 0) 112 flags |= pgprot_val(PAGE_KERNEL); 113 114 /* Non-cacheable page cannot be coherent */ 115 if (flags & _PAGE_NO_CACHE) 116 flags &= ~_PAGE_COHERENT; 117 118 /* We don't support the 4K PFN hack with ioremap */ 119 if (flags & _PAGE_4K_PFN) 120 return NULL; 121 122 WARN_ON(pa & ~PAGE_MASK); 123 WARN_ON(((unsigned long)ea) & ~PAGE_MASK); 124 WARN_ON(size & ~PAGE_MASK); 125 126 for (i = 0; i < size; i += PAGE_SIZE) 127 if (map_io_page((unsigned long)ea+i, pa+i, flags)) 128 return NULL; 129 130 return (void __iomem *)ea; 131 } 132 133 /** 134 * __iounmap_from - Low level function to tear down the page tables 135 * for an IO mapping. This is used for mappings that 136 * are manipulated manually, like partial unmapping of 137 * PCI IOs or ISA space. 138 */ 139 void __iounmap_at(void *ea, unsigned long size) 140 { 141 WARN_ON(((unsigned long)ea) & ~PAGE_MASK); 142 WARN_ON(size & ~PAGE_MASK); 143 144 unmap_kernel_range((unsigned long)ea, size); 145 } 146 147 void __iomem * __ioremap(phys_addr_t addr, unsigned long size, 148 unsigned long flags) 149 { 150 phys_addr_t paligned; 151 void __iomem *ret; 152 153 /* 154 * Choose an address to map it to. 155 * Once the imalloc system is running, we use it. 156 * Before that, we map using addresses going 157 * up from ioremap_bot. imalloc will use 158 * the addresses from ioremap_bot through 159 * IMALLOC_END 160 * 161 */ 162 paligned = addr & PAGE_MASK; 163 size = PAGE_ALIGN(addr + size) - paligned; 164 165 if ((size == 0) || (paligned == 0)) 166 return NULL; 167 168 if (mem_init_done) { 169 struct vm_struct *area; 170 171 area = __get_vm_area(size, VM_IOREMAP, 172 ioremap_bot, IOREMAP_END); 173 if (area == NULL) 174 return NULL; 175 ret = __ioremap_at(paligned, area->addr, size, flags); 176 if (!ret) 177 vunmap(area->addr); 178 } else { 179 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); 180 if (ret) 181 ioremap_bot += size; 182 } 183 184 if (ret) 185 ret += addr & ~PAGE_MASK; 186 return ret; 187 } 188 189 190 void __iomem * ioremap(phys_addr_t addr, unsigned long size) 191 { 192 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED; 193 194 if (ppc_md.ioremap) 195 return ppc_md.ioremap(addr, size, flags); 196 return __ioremap(addr, size, flags); 197 } 198 199 void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, 200 unsigned long flags) 201 { 202 /* writeable implies dirty for kernel addresses */ 203 if (flags & _PAGE_RW) 204 flags |= _PAGE_DIRTY; 205 206 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ 207 flags &= ~(_PAGE_USER | _PAGE_EXEC); 208 209 if (ppc_md.ioremap) 210 return ppc_md.ioremap(addr, size, flags); 211 return __ioremap(addr, size, flags); 212 } 213 214 215 /* 216 * Unmap an IO region and remove it from imalloc'd list. 217 * Access to IO memory should be serialized by driver. 218 */ 219 void __iounmap(volatile void __iomem *token) 220 { 221 void *addr; 222 223 if (!mem_init_done) 224 return; 225 226 addr = (void *) ((unsigned long __force) 227 PCI_FIX_ADDR(token) & PAGE_MASK); 228 if ((unsigned long)addr < ioremap_bot) { 229 printk(KERN_WARNING "Attempt to iounmap early bolted mapping" 230 " at 0x%p\n", addr); 231 return; 232 } 233 vunmap(addr); 234 } 235 236 void iounmap(volatile void __iomem *token) 237 { 238 if (ppc_md.iounmap) 239 ppc_md.iounmap(token); 240 else 241 __iounmap(token); 242 } 243 244 EXPORT_SYMBOL(ioremap); 245 EXPORT_SYMBOL(ioremap_flags); 246 EXPORT_SYMBOL(__ioremap); 247 EXPORT_SYMBOL(__ioremap_at); 248 EXPORT_SYMBOL(iounmap); 249 EXPORT_SYMBOL(__iounmap); 250 EXPORT_SYMBOL(__iounmap_at); 251