1 /* 2 * Trapped io support 3 * 4 * Copyright (C) 2008 Magnus Damm 5 * 6 * Intercept io operations by trapping. 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 */ 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/bitops.h> 15 #include <linux/vmalloc.h> 16 #include <linux/module.h> 17 #include <asm/system.h> 18 #include <asm/mmu_context.h> 19 #include <asm/uaccess.h> 20 #include <asm/io.h> 21 #include <asm/io_trapped.h> 22 23 #define TRAPPED_PAGES_MAX 16 24 25 #ifdef CONFIG_HAS_IOPORT 26 LIST_HEAD(trapped_io); 27 EXPORT_SYMBOL_GPL(trapped_io); 28 #endif 29 #ifdef CONFIG_HAS_IOMEM 30 LIST_HEAD(trapped_mem); 31 EXPORT_SYMBOL_GPL(trapped_mem); 32 #endif 33 static DEFINE_SPINLOCK(trapped_lock); 34 35 int register_trapped_io(struct trapped_io *tiop) 36 { 37 struct resource *res; 38 unsigned long len = 0, flags = 0; 39 struct page *pages[TRAPPED_PAGES_MAX]; 40 int k, n; 41 42 /* structure must be page aligned */ 43 if ((unsigned long)tiop & (PAGE_SIZE - 1)) 44 goto bad; 45 46 for (k = 0; k < tiop->num_resources; k++) { 47 res = tiop->resource + k; 48 len += roundup((res->end - res->start) + 1, PAGE_SIZE); 49 flags |= res->flags; 50 } 51 52 /* support IORESOURCE_IO _or_ MEM, not both */ 53 if (hweight_long(flags) != 1) 54 goto bad; 55 56 n = len >> PAGE_SHIFT; 57 58 if (n >= TRAPPED_PAGES_MAX) 59 goto bad; 60 61 for (k = 0; k < n; k++) 62 pages[k] = virt_to_page(tiop); 63 64 tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE); 65 if (!tiop->virt_base) 66 goto bad; 67 68 len = 0; 69 for (k = 0; k < tiop->num_resources; k++) { 70 res = tiop->resource + k; 71 pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n", 72 (unsigned long)(tiop->virt_base + len), 73 res->flags & IORESOURCE_IO ? "io" : "mmio", 74 (unsigned long)res->start); 75 len += roundup((res->end - res->start) + 1, PAGE_SIZE); 76 } 77 78 tiop->magic = IO_TRAPPED_MAGIC; 79 INIT_LIST_HEAD(&tiop->list); 80 spin_lock_irq(&trapped_lock); 81 if (flags & IORESOURCE_IO) 82 list_add(&tiop->list, &trapped_io); 83 if (flags & IORESOURCE_MEM) 84 list_add(&tiop->list, &trapped_mem); 85 spin_unlock_irq(&trapped_lock); 86 87 return 0; 88 bad: 89 pr_warning("unable to install trapped io filter\n"); 90 return -1; 91 } 92 EXPORT_SYMBOL_GPL(register_trapped_io); 93 94 void __iomem *match_trapped_io_handler(struct list_head *list, 95 unsigned long offset, 96 unsigned long size) 97 { 98 unsigned long voffs; 99 struct trapped_io *tiop; 100 struct resource *res; 101 int k, len; 102 103 spin_lock_irq(&trapped_lock); 104 list_for_each_entry(tiop, list, list) { 105 voffs = 0; 106 for (k = 0; k < tiop->num_resources; k++) { 107 res = tiop->resource + k; 108 if (res->start == offset) { 109 spin_unlock_irq(&trapped_lock); 110 return tiop->virt_base + voffs; 111 } 112 113 len = (res->end - res->start) + 1; 114 voffs += roundup(len, PAGE_SIZE); 115 } 116 } 117 spin_unlock_irq(&trapped_lock); 118 return NULL; 119 } 120 EXPORT_SYMBOL_GPL(match_trapped_io_handler); 121 122 static struct trapped_io *lookup_tiop(unsigned long address) 123 { 124 pgd_t *pgd_k; 125 pud_t *pud_k; 126 pmd_t *pmd_k; 127 pte_t *pte_k; 128 pte_t entry; 129 130 pgd_k = swapper_pg_dir + pgd_index(address); 131 if (!pgd_present(*pgd_k)) 132 return NULL; 133 134 pud_k = pud_offset(pgd_k, address); 135 if (!pud_present(*pud_k)) 136 return NULL; 137 138 pmd_k = pmd_offset(pud_k, address); 139 if (!pmd_present(*pmd_k)) 140 return NULL; 141 142 pte_k = pte_offset_kernel(pmd_k, address); 143 entry = *pte_k; 144 145 return pfn_to_kaddr(pte_pfn(entry)); 146 } 147 148 static unsigned long lookup_address(struct trapped_io *tiop, 149 unsigned long address) 150 { 151 struct resource *res; 152 unsigned long vaddr = (unsigned long)tiop->virt_base; 153 unsigned long len; 154 int k; 155 156 for (k = 0; k < tiop->num_resources; k++) { 157 res = tiop->resource + k; 158 len = roundup((res->end - res->start) + 1, PAGE_SIZE); 159 if (address < (vaddr + len)) 160 return res->start + (address - vaddr); 161 vaddr += len; 162 } 163 return 0; 164 } 165 166 static unsigned long long copy_word(unsigned long src_addr, int src_len, 167 unsigned long dst_addr, int dst_len) 168 { 169 unsigned long long tmp = 0; 170 171 switch (src_len) { 172 case 1: 173 tmp = ctrl_inb(src_addr); 174 break; 175 case 2: 176 tmp = ctrl_inw(src_addr); 177 break; 178 case 4: 179 tmp = ctrl_inl(src_addr); 180 break; 181 case 8: 182 tmp = ctrl_inq(src_addr); 183 break; 184 } 185 186 switch (dst_len) { 187 case 1: 188 ctrl_outb(tmp, dst_addr); 189 break; 190 case 2: 191 ctrl_outw(tmp, dst_addr); 192 break; 193 case 4: 194 ctrl_outl(tmp, dst_addr); 195 break; 196 case 8: 197 ctrl_outq(tmp, dst_addr); 198 break; 199 } 200 201 return tmp; 202 } 203 204 static unsigned long from_device(void *dst, const void *src, unsigned long cnt) 205 { 206 struct trapped_io *tiop; 207 unsigned long src_addr = (unsigned long)src; 208 unsigned long long tmp; 209 210 pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt); 211 tiop = lookup_tiop(src_addr); 212 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); 213 214 src_addr = lookup_address(tiop, src_addr); 215 if (!src_addr) 216 return cnt; 217 218 tmp = copy_word(src_addr, 219 max_t(unsigned long, cnt, 220 (tiop->minimum_bus_width / 8)), 221 (unsigned long)dst, cnt); 222 223 pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp); 224 return 0; 225 } 226 227 static unsigned long to_device(void *dst, const void *src, unsigned long cnt) 228 { 229 struct trapped_io *tiop; 230 unsigned long dst_addr = (unsigned long)dst; 231 unsigned long long tmp; 232 233 pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt); 234 tiop = lookup_tiop(dst_addr); 235 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); 236 237 dst_addr = lookup_address(tiop, dst_addr); 238 if (!dst_addr) 239 return cnt; 240 241 tmp = copy_word((unsigned long)src, cnt, 242 dst_addr, max_t(unsigned long, cnt, 243 (tiop->minimum_bus_width / 8))); 244 245 pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp); 246 return 0; 247 } 248 249 static struct mem_access trapped_io_access = { 250 from_device, 251 to_device, 252 }; 253 254 int handle_trapped_io(struct pt_regs *regs, unsigned long address) 255 { 256 mm_segment_t oldfs; 257 opcode_t instruction; 258 int tmp; 259 260 if (!lookup_tiop(address)) 261 return 0; 262 263 WARN_ON(user_mode(regs)); 264 265 oldfs = get_fs(); 266 set_fs(KERNEL_DS); 267 if (copy_from_user(&instruction, (void *)(regs->pc), 268 sizeof(instruction))) { 269 set_fs(oldfs); 270 return 0; 271 } 272 273 tmp = handle_unaligned_access(instruction, regs, &trapped_io_access); 274 set_fs(oldfs); 275 return tmp == 0; 276 } 277