1 /* 2 * arch/sh/mm/pmb.c 3 * 4 * Privileged Space Mapping Buffer (PMB) Support. 5 * 6 * Copyright (C) 2005, 2006, 2007 Paul Mundt 7 * 8 * P1/P2 Section mapping definitions from map32.h, which was: 9 * 10 * Copyright 2003 (c) Lineo Solutions,Inc. 11 * 12 * This file is subject to the terms and conditions of the GNU General Public 13 * License. See the file "COPYING" in the main directory of this archive 14 * for more details. 15 */ 16 #include <linux/init.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/bitops.h> 21 #include <linux/debugfs.h> 22 #include <linux/fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/err.h> 25 #include <asm/system.h> 26 #include <asm/uaccess.h> 27 #include <asm/pgtable.h> 28 #include <asm/mmu.h> 29 #include <asm/io.h> 30 31 #define NR_PMB_ENTRIES 16 32 33 static struct kmem_cache *pmb_cache; 34 static unsigned long pmb_map; 35 36 static struct pmb_entry pmb_init_map[] = { 37 /* vpn ppn flags (ub/sz/c/wt) */ 38 39 /* P1 Section Mappings */ 40 { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, 41 { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, 42 { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, 43 { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, 44 { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, 45 { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, 46 47 /* P2 Section Mappings */ 48 { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, 49 { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, 50 { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, 51 { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, 52 { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, 53 { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, 54 }; 55 56 static inline unsigned long mk_pmb_entry(unsigned int entry) 57 { 58 return (entry & PMB_E_MASK) << PMB_E_SHIFT; 59 } 60 61 static inline unsigned long mk_pmb_addr(unsigned int entry) 62 { 63 return mk_pmb_entry(entry) | PMB_ADDR; 64 } 65 66 static inline unsigned long mk_pmb_data(unsigned int entry) 67 { 68 return mk_pmb_entry(entry) | PMB_DATA; 69 } 70 71 static DEFINE_SPINLOCK(pmb_list_lock); 72 static struct pmb_entry *pmb_list; 73 74 static inline void pmb_list_add(struct pmb_entry *pmbe) 75 { 76 struct pmb_entry **p, *tmp; 77 78 p = &pmb_list; 79 while ((tmp = *p) != NULL) 80 p = &tmp->next; 81 82 pmbe->next = tmp; 83 *p = pmbe; 84 } 85 86 static inline void pmb_list_del(struct pmb_entry *pmbe) 87 { 88 struct pmb_entry **p, *tmp; 89 90 for (p = &pmb_list; (tmp = *p); p = &tmp->next) 91 if (tmp == pmbe) { 92 *p = tmp->next; 93 return; 94 } 95 } 96 97 struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, 98 unsigned long flags) 99 { 100 struct pmb_entry *pmbe; 101 102 pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); 103 if (!pmbe) 104 return ERR_PTR(-ENOMEM); 105 106 pmbe->vpn = vpn; 107 pmbe->ppn = ppn; 108 pmbe->flags = flags; 109 110 spin_lock_irq(&pmb_list_lock); 111 pmb_list_add(pmbe); 112 spin_unlock_irq(&pmb_list_lock); 113 114 return pmbe; 115 } 116 117 void pmb_free(struct pmb_entry *pmbe) 118 { 119 spin_lock_irq(&pmb_list_lock); 120 pmb_list_del(pmbe); 121 spin_unlock_irq(&pmb_list_lock); 122 123 kmem_cache_free(pmb_cache, pmbe); 124 } 125 126 /* 127 * Must be in P2 for __set_pmb_entry() 128 */ 129 int __set_pmb_entry(unsigned long vpn, unsigned long ppn, 130 unsigned long flags, int *entry) 131 { 132 unsigned int pos = *entry; 133 134 if (unlikely(pos == PMB_NO_ENTRY)) 135 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); 136 137 repeat: 138 if (unlikely(pos > NR_PMB_ENTRIES)) 139 return -ENOSPC; 140 141 if (test_and_set_bit(pos, &pmb_map)) { 142 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); 143 goto repeat; 144 } 145 146 ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); 147 148 #ifdef CONFIG_SH_WRITETHROUGH 149 /* 150 * When we are in 32-bit address extended mode, CCR.CB becomes 151 * invalid, so care must be taken to manually adjust cacheable 152 * translations. 153 */ 154 if (likely(flags & PMB_C)) 155 flags |= PMB_WT; 156 #endif 157 158 ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); 159 160 *entry = pos; 161 162 return 0; 163 } 164 165 int set_pmb_entry(struct pmb_entry *pmbe) 166 { 167 int ret; 168 169 jump_to_P2(); 170 ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); 171 back_to_P1(); 172 173 return ret; 174 } 175 176 void clear_pmb_entry(struct pmb_entry *pmbe) 177 { 178 unsigned int entry = pmbe->entry; 179 unsigned long addr; 180 181 /* 182 * Don't allow clearing of wired init entries, P1 or P2 access 183 * without a corresponding mapping in the PMB will lead to reset 184 * by the TLB. 185 */ 186 if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || 187 entry >= NR_PMB_ENTRIES)) 188 return; 189 190 jump_to_P2(); 191 192 /* Clear V-bit */ 193 addr = mk_pmb_addr(entry); 194 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); 195 196 addr = mk_pmb_data(entry); 197 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); 198 199 back_to_P1(); 200 201 clear_bit(entry, &pmb_map); 202 } 203 204 205 static struct { 206 unsigned long size; 207 int flag; 208 } pmb_sizes[] = { 209 { .size = 0x20000000, .flag = PMB_SZ_512M, }, 210 { .size = 0x08000000, .flag = PMB_SZ_128M, }, 211 { .size = 0x04000000, .flag = PMB_SZ_64M, }, 212 { .size = 0x01000000, .flag = PMB_SZ_16M, }, 213 }; 214 215 long pmb_remap(unsigned long vaddr, unsigned long phys, 216 unsigned long size, unsigned long flags) 217 { 218 struct pmb_entry *pmbp; 219 unsigned long wanted; 220 int pmb_flags, i; 221 222 /* Convert typical pgprot value to the PMB equivalent */ 223 if (flags & _PAGE_CACHABLE) { 224 if (flags & _PAGE_WT) 225 pmb_flags = PMB_WT; 226 else 227 pmb_flags = PMB_C; 228 } else 229 pmb_flags = PMB_WT | PMB_UB; 230 231 pmbp = NULL; 232 wanted = size; 233 234 again: 235 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { 236 struct pmb_entry *pmbe; 237 int ret; 238 239 if (size < pmb_sizes[i].size) 240 continue; 241 242 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); 243 if (IS_ERR(pmbe)) 244 return PTR_ERR(pmbe); 245 246 ret = set_pmb_entry(pmbe); 247 if (ret != 0) { 248 pmb_free(pmbe); 249 return -EBUSY; 250 } 251 252 phys += pmb_sizes[i].size; 253 vaddr += pmb_sizes[i].size; 254 size -= pmb_sizes[i].size; 255 256 /* 257 * Link adjacent entries that span multiple PMB entries 258 * for easier tear-down. 259 */ 260 if (likely(pmbp)) 261 pmbp->link = pmbe; 262 263 pmbp = pmbe; 264 } 265 266 if (size >= 0x1000000) 267 goto again; 268 269 return wanted - size; 270 } 271 272 void pmb_unmap(unsigned long addr) 273 { 274 struct pmb_entry **p, *pmbe; 275 276 for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) 277 if (pmbe->vpn == addr) 278 break; 279 280 if (unlikely(!pmbe)) 281 return; 282 283 WARN_ON(!test_bit(pmbe->entry, &pmb_map)); 284 285 do { 286 struct pmb_entry *pmblink = pmbe; 287 288 clear_pmb_entry(pmbe); 289 pmbe = pmblink->link; 290 291 pmb_free(pmblink); 292 } while (pmbe); 293 } 294 295 static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, 296 unsigned long flags) 297 { 298 struct pmb_entry *pmbe = pmb; 299 300 memset(pmb, 0, sizeof(struct pmb_entry)); 301 302 pmbe->entry = PMB_NO_ENTRY; 303 } 304 305 static int __init pmb_init(void) 306 { 307 unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); 308 unsigned int entry; 309 310 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); 311 312 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, 313 SLAB_PANIC, pmb_cache_ctor, NULL); 314 315 jump_to_P2(); 316 317 /* 318 * Ordering is important, P2 must be mapped in the PMB before we 319 * can set PMB.SE, and P1 must be mapped before we jump back to 320 * P1 space. 321 */ 322 for (entry = 0; entry < nr_entries; entry++) { 323 struct pmb_entry *pmbe = pmb_init_map + entry; 324 325 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); 326 } 327 328 ctrl_outl(0, PMB_IRMCR); 329 330 /* PMB.SE and UB[7] */ 331 ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); 332 333 back_to_P1(); 334 335 return 0; 336 } 337 arch_initcall(pmb_init); 338 339 static int pmb_seq_show(struct seq_file *file, void *iter) 340 { 341 int i; 342 343 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n" 344 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n"); 345 seq_printf(file, "ety vpn ppn size flags\n"); 346 347 for (i = 0; i < NR_PMB_ENTRIES; i++) { 348 unsigned long addr, data; 349 unsigned int size; 350 char *sz_str = NULL; 351 352 addr = ctrl_inl(mk_pmb_addr(i)); 353 data = ctrl_inl(mk_pmb_data(i)); 354 355 size = data & PMB_SZ_MASK; 356 sz_str = (size == PMB_SZ_16M) ? " 16MB": 357 (size == PMB_SZ_64M) ? " 64MB": 358 (size == PMB_SZ_128M) ? "128MB": 359 "512MB"; 360 361 /* 02: V 0x88 0x08 128MB C CB B */ 362 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n", 363 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ', 364 (addr >> 24) & 0xff, (data >> 24) & 0xff, 365 sz_str, (data & PMB_C) ? 'C' : ' ', 366 (data & PMB_WT) ? "WT" : "CB", 367 (data & PMB_UB) ? "UB" : " B"); 368 } 369 370 return 0; 371 } 372 373 static int pmb_debugfs_open(struct inode *inode, struct file *file) 374 { 375 return single_open(file, pmb_seq_show, NULL); 376 } 377 378 static const struct file_operations pmb_debugfs_fops = { 379 .owner = THIS_MODULE, 380 .open = pmb_debugfs_open, 381 .read = seq_read, 382 .llseek = seq_lseek, 383 .release = seq_release, 384 }; 385 386 static int __init pmb_debugfs_init(void) 387 { 388 struct dentry *dentry; 389 390 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO, 391 NULL, NULL, &pmb_debugfs_fops); 392 if (IS_ERR(dentry)) 393 return PTR_ERR(dentry); 394 395 return 0; 396 } 397 postcore_initcall(pmb_debugfs_init); 398