xref: /openbmc/linux/arch/sh/mm/pmb.c (revision 9d56dd3b083a3bec56e9da35ce07baca81030b03)
1 /*
2  * arch/sh/mm/pmb.c
3  *
4  * Privileged Space Mapping Buffer (PMB) Support.
5  *
6  * Copyright (C) 2005 - 2010  Paul Mundt
7  * Copyright (C) 2010  Matt Fleming
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sysdev.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/bitops.h>
20 #include <linux/debugfs.h>
21 #include <linux/fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/err.h>
24 #include <asm/system.h>
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
27 #include <asm/mmu.h>
28 #include <asm/io.h>
29 #include <asm/mmu_context.h>
30 
31 #define NR_PMB_ENTRIES	16
32 
33 static void __pmb_unmap(struct pmb_entry *);
34 
35 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
36 static unsigned long pmb_map;
37 
38 static inline unsigned long mk_pmb_entry(unsigned int entry)
39 {
40 	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
41 }
42 
43 static inline unsigned long mk_pmb_addr(unsigned int entry)
44 {
45 	return mk_pmb_entry(entry) | PMB_ADDR;
46 }
47 
48 static inline unsigned long mk_pmb_data(unsigned int entry)
49 {
50 	return mk_pmb_entry(entry) | PMB_DATA;
51 }
52 
53 static int pmb_alloc_entry(void)
54 {
55 	unsigned int pos;
56 
57 repeat:
58 	pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
59 
60 	if (unlikely(pos > NR_PMB_ENTRIES))
61 		return -ENOSPC;
62 
63 	if (test_and_set_bit(pos, &pmb_map))
64 		goto repeat;
65 
66 	return pos;
67 }
68 
69 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
70 				   unsigned long flags, int entry)
71 {
72 	struct pmb_entry *pmbe;
73 	int pos;
74 
75 	if (entry == PMB_NO_ENTRY) {
76 		pos = pmb_alloc_entry();
77 		if (pos < 0)
78 			return ERR_PTR(pos);
79 	} else {
80 		if (test_bit(entry, &pmb_map))
81 			return ERR_PTR(-ENOSPC);
82 		pos = entry;
83 	}
84 
85 	pmbe = &pmb_entry_list[pos];
86 	if (!pmbe)
87 		return ERR_PTR(-ENOMEM);
88 
89 	pmbe->vpn	= vpn;
90 	pmbe->ppn	= ppn;
91 	pmbe->flags	= flags;
92 	pmbe->entry	= pos;
93 
94 	return pmbe;
95 }
96 
97 static void pmb_free(struct pmb_entry *pmbe)
98 {
99 	int pos = pmbe->entry;
100 
101 	pmbe->vpn	= 0;
102 	pmbe->ppn	= 0;
103 	pmbe->flags	= 0;
104 	pmbe->entry	= 0;
105 
106 	clear_bit(pos, &pmb_map);
107 }
108 
109 /*
110  * Must be in P2 for __set_pmb_entry()
111  */
112 static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
113 			    unsigned long flags, int pos)
114 {
115 	__raw_writel(vpn | PMB_V, mk_pmb_addr(pos));
116 
117 #ifdef CONFIG_CACHE_WRITETHROUGH
118 	/*
119 	 * When we are in 32-bit address extended mode, CCR.CB becomes
120 	 * invalid, so care must be taken to manually adjust cacheable
121 	 * translations.
122 	 */
123 	if (likely(flags & PMB_C))
124 		flags |= PMB_WT;
125 #endif
126 
127 	__raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos));
128 }
129 
130 static void set_pmb_entry(struct pmb_entry *pmbe)
131 {
132 	jump_to_uncached();
133 	__set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
134 	back_to_cached();
135 }
136 
137 static void clear_pmb_entry(struct pmb_entry *pmbe)
138 {
139 	unsigned int entry = pmbe->entry;
140 	unsigned long addr;
141 
142 	if (unlikely(entry >= NR_PMB_ENTRIES))
143 		return;
144 
145 	jump_to_uncached();
146 
147 	/* Clear V-bit */
148 	addr = mk_pmb_addr(entry);
149 	__raw_writel(__raw_readl(addr) & ~PMB_V, addr);
150 
151 	addr = mk_pmb_data(entry);
152 	__raw_writel(__raw_readl(addr) & ~PMB_V, addr);
153 
154 	back_to_cached();
155 }
156 
157 
158 static struct {
159 	unsigned long size;
160 	int flag;
161 } pmb_sizes[] = {
162 	{ .size	= 0x20000000, .flag = PMB_SZ_512M, },
163 	{ .size = 0x08000000, .flag = PMB_SZ_128M, },
164 	{ .size = 0x04000000, .flag = PMB_SZ_64M,  },
165 	{ .size = 0x01000000, .flag = PMB_SZ_16M,  },
166 };
167 
168 long pmb_remap(unsigned long vaddr, unsigned long phys,
169 	       unsigned long size, unsigned long flags)
170 {
171 	struct pmb_entry *pmbp, *pmbe;
172 	unsigned long wanted;
173 	int pmb_flags, i;
174 	long err;
175 
176 	/* Convert typical pgprot value to the PMB equivalent */
177 	if (flags & _PAGE_CACHABLE) {
178 		if (flags & _PAGE_WT)
179 			pmb_flags = PMB_WT;
180 		else
181 			pmb_flags = PMB_C;
182 	} else
183 		pmb_flags = PMB_WT | PMB_UB;
184 
185 	pmbp = NULL;
186 	wanted = size;
187 
188 again:
189 	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
190 		if (size < pmb_sizes[i].size)
191 			continue;
192 
193 		pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
194 				 PMB_NO_ENTRY);
195 		if (IS_ERR(pmbe)) {
196 			err = PTR_ERR(pmbe);
197 			goto out;
198 		}
199 
200 		set_pmb_entry(pmbe);
201 
202 		phys	+= pmb_sizes[i].size;
203 		vaddr	+= pmb_sizes[i].size;
204 		size	-= pmb_sizes[i].size;
205 
206 		/*
207 		 * Link adjacent entries that span multiple PMB entries
208 		 * for easier tear-down.
209 		 */
210 		if (likely(pmbp))
211 			pmbp->link = pmbe;
212 
213 		pmbp = pmbe;
214 
215 		/*
216 		 * Instead of trying smaller sizes on every iteration
217 		 * (even if we succeed in allocating space), try using
218 		 * pmb_sizes[i].size again.
219 		 */
220 		i--;
221 	}
222 
223 	if (size >= 0x1000000)
224 		goto again;
225 
226 	return wanted - size;
227 
228 out:
229 	if (pmbp)
230 		__pmb_unmap(pmbp);
231 
232 	return err;
233 }
234 
235 void pmb_unmap(unsigned long addr)
236 {
237 	struct pmb_entry *pmbe = NULL;
238 	int i;
239 
240 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
241 		if (test_bit(i, &pmb_map)) {
242 			pmbe = &pmb_entry_list[i];
243 			if (pmbe->vpn == addr)
244 				break;
245 		}
246 	}
247 
248 	if (unlikely(!pmbe))
249 		return;
250 
251 	__pmb_unmap(pmbe);
252 }
253 
254 static void __pmb_unmap(struct pmb_entry *pmbe)
255 {
256 	BUG_ON(!test_bit(pmbe->entry, &pmb_map));
257 
258 	do {
259 		struct pmb_entry *pmblink = pmbe;
260 
261 		/*
262 		 * We may be called before this pmb_entry has been
263 		 * entered into the PMB table via set_pmb_entry(), but
264 		 * that's OK because we've allocated a unique slot for
265 		 * this entry in pmb_alloc() (even if we haven't filled
266 		 * it yet).
267 		 *
268 		 * Therefore, calling clear_pmb_entry() is safe as no
269 		 * other mapping can be using that slot.
270 		 */
271 		clear_pmb_entry(pmbe);
272 
273 		pmbe = pmblink->link;
274 
275 		pmb_free(pmblink);
276 	} while (pmbe);
277 }
278 
279 #ifdef CONFIG_PMB_LEGACY
280 static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
281 {
282 	return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE;
283 }
284 
285 static int pmb_apply_legacy_mappings(void)
286 {
287 	unsigned int applied = 0;
288 	int i;
289 
290 	pr_info("PMB: Preserving legacy mappings:\n");
291 
292 	/*
293 	 * The following entries are setup by the bootloader.
294 	 *
295 	 * Entry       VPN	   PPN	    V	SZ	C	UB
296 	 * --------------------------------------------------------
297 	 *   0      0xA0000000 0x00000000   1   64MB    0       0
298 	 *   1      0xA4000000 0x04000000   1   16MB    0       0
299 	 *   2      0xA6000000 0x08000000   1   16MB    0       0
300 	 *   9      0x88000000 0x48000000   1  128MB    1       1
301 	 *  10      0x90000000 0x50000000   1  128MB    1       1
302 	 *  11      0x98000000 0x58000000   1  128MB    1       1
303 	 *  13      0xA8000000 0x48000000   1  128MB    0       0
304 	 *  14      0xB0000000 0x50000000   1  128MB    0       0
305 	 *  15      0xB8000000 0x58000000   1  128MB    0       0
306 	 *
307 	 * The only entries the we need are the ones that map the kernel
308 	 * at the cached and uncached addresses.
309 	 */
310 	for (i = 0; i < PMB_ENTRY_MAX; i++) {
311 		unsigned long addr, data;
312 		unsigned long addr_val, data_val;
313 		unsigned long ppn, vpn;
314 
315 		addr = mk_pmb_addr(i);
316 		data = mk_pmb_data(i);
317 
318 		addr_val = __raw_readl(addr);
319 		data_val = __raw_readl(data);
320 
321 		/*
322 		 * Skip over any bogus entries
323 		 */
324 		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
325 			continue;
326 
327 		ppn = data_val & PMB_PFN_MASK;
328 		vpn = addr_val & PMB_PFN_MASK;
329 
330 		/*
331 		 * Only preserve in-range mappings.
332 		 */
333 		if (pmb_ppn_in_range(ppn)) {
334 			unsigned int size;
335 			char *sz_str = NULL;
336 
337 			size = data_val & PMB_SZ_MASK;
338 
339 			sz_str = (size == PMB_SZ_16M)  ? " 16MB":
340 				 (size == PMB_SZ_64M)  ? " 64MB":
341 				 (size == PMB_SZ_128M) ? "128MB":
342 							 "512MB";
343 
344 			pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
345 				vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
346 				(data_val & PMB_C) ? "" : "un");
347 
348 			applied++;
349 		} else {
350 			/*
351 			 * Invalidate anything out of bounds.
352 			 */
353 			__raw_writel(addr_val & ~PMB_V, addr);
354 			__raw_writel(data_val & ~PMB_V, data);
355 		}
356 	}
357 
358 	return (applied == 0);
359 }
360 #else
361 static inline int pmb_apply_legacy_mappings(void)
362 {
363 	return 1;
364 }
365 #endif
366 
367 int pmb_init(void)
368 {
369 	int i;
370 	unsigned long addr, data;
371 	unsigned long ret;
372 
373 	jump_to_uncached();
374 
375 	/*
376 	 * Attempt to apply the legacy boot mappings if configured. If
377 	 * this is successful then we simply carry on with those and
378 	 * don't bother establishing additional memory mappings. Dynamic
379 	 * device mappings through pmb_remap() can still be bolted on
380 	 * after this.
381 	 */
382 	ret = pmb_apply_legacy_mappings();
383 	if (ret == 0) {
384 		back_to_cached();
385 		return 0;
386 	}
387 
388 	/*
389 	 * Sync our software copy of the PMB mappings with those in
390 	 * hardware. The mappings in the hardware PMB were either set up
391 	 * by the bootloader or very early on by the kernel.
392 	 */
393 	for (i = 0; i < PMB_ENTRY_MAX; i++) {
394 		struct pmb_entry *pmbe;
395 		unsigned long vpn, ppn, flags;
396 
397 		addr = PMB_DATA + (i << PMB_E_SHIFT);
398 		data = __raw_readl(addr);
399 		if (!(data & PMB_V))
400 			continue;
401 
402 		if (data & PMB_C) {
403 #if defined(CONFIG_CACHE_WRITETHROUGH)
404 			data |= PMB_WT;
405 #elif defined(CONFIG_CACHE_WRITEBACK)
406 			data &= ~PMB_WT;
407 #else
408 			data &= ~(PMB_C | PMB_WT);
409 #endif
410 		}
411 		__raw_writel(data, addr);
412 
413 		ppn = data & PMB_PFN_MASK;
414 
415 		flags = data & (PMB_C | PMB_WT | PMB_UB);
416 		flags |= data & PMB_SZ_MASK;
417 
418 		addr = PMB_ADDR + (i << PMB_E_SHIFT);
419 		data = __raw_readl(addr);
420 
421 		vpn = data & PMB_PFN_MASK;
422 
423 		pmbe = pmb_alloc(vpn, ppn, flags, i);
424 		WARN_ON(IS_ERR(pmbe));
425 	}
426 
427 	__raw_writel(0, PMB_IRMCR);
428 
429 	/* Flush out the TLB */
430 	i =  __raw_readl(MMUCR);
431 	i |= MMUCR_TI;
432 	__raw_writel(i, MMUCR);
433 
434 	back_to_cached();
435 
436 	return 0;
437 }
438 
439 bool __in_29bit_mode(void)
440 {
441         return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
442 }
443 
444 static int pmb_seq_show(struct seq_file *file, void *iter)
445 {
446 	int i;
447 
448 	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
449 			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
450 	seq_printf(file, "ety   vpn  ppn  size   flags\n");
451 
452 	for (i = 0; i < NR_PMB_ENTRIES; i++) {
453 		unsigned long addr, data;
454 		unsigned int size;
455 		char *sz_str = NULL;
456 
457 		addr = __raw_readl(mk_pmb_addr(i));
458 		data = __raw_readl(mk_pmb_data(i));
459 
460 		size = data & PMB_SZ_MASK;
461 		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
462 			 (size == PMB_SZ_64M)  ? " 64MB":
463 			 (size == PMB_SZ_128M) ? "128MB":
464 					         "512MB";
465 
466 		/* 02: V 0x88 0x08 128MB C CB  B */
467 		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
468 			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
469 			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
470 			   sz_str, (data & PMB_C) ? 'C' : ' ',
471 			   (data & PMB_WT) ? "WT" : "CB",
472 			   (data & PMB_UB) ? "UB" : " B");
473 	}
474 
475 	return 0;
476 }
477 
478 static int pmb_debugfs_open(struct inode *inode, struct file *file)
479 {
480 	return single_open(file, pmb_seq_show, NULL);
481 }
482 
483 static const struct file_operations pmb_debugfs_fops = {
484 	.owner		= THIS_MODULE,
485 	.open		= pmb_debugfs_open,
486 	.read		= seq_read,
487 	.llseek		= seq_lseek,
488 	.release	= single_release,
489 };
490 
491 static int __init pmb_debugfs_init(void)
492 {
493 	struct dentry *dentry;
494 
495 	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
496 				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
497 	if (!dentry)
498 		return -ENOMEM;
499 	if (IS_ERR(dentry))
500 		return PTR_ERR(dentry);
501 
502 	return 0;
503 }
504 postcore_initcall(pmb_debugfs_init);
505 
506 #ifdef CONFIG_PM
507 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
508 {
509 	static pm_message_t prev_state;
510 	int i;
511 
512 	/* Restore the PMB after a resume from hibernation */
513 	if (state.event == PM_EVENT_ON &&
514 	    prev_state.event == PM_EVENT_FREEZE) {
515 		struct pmb_entry *pmbe;
516 		for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
517 			if (test_bit(i, &pmb_map)) {
518 				pmbe = &pmb_entry_list[i];
519 				set_pmb_entry(pmbe);
520 			}
521 		}
522 	}
523 	prev_state = state;
524 	return 0;
525 }
526 
527 static int pmb_sysdev_resume(struct sys_device *dev)
528 {
529 	return pmb_sysdev_suspend(dev, PMSG_ON);
530 }
531 
532 static struct sysdev_driver pmb_sysdev_driver = {
533 	.suspend = pmb_sysdev_suspend,
534 	.resume = pmb_sysdev_resume,
535 };
536 
537 static int __init pmb_sysdev_init(void)
538 {
539 	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
540 }
541 subsys_initcall(pmb_sysdev_init);
542 #endif
543