xref: /openbmc/linux/arch/sh/mm/pmb.c (revision b627b4ed)
1 /*
2  * arch/sh/mm/pmb.c
3  *
4  * Privileged Space Mapping Buffer (PMB) Support.
5  *
6  * Copyright (C) 2005, 2006, 2007 Paul Mundt
7  *
8  * P1/P2 Section mapping definitions from map32.h, which was:
9  *
10  *	Copyright 2003 (c) Lineo Solutions,Inc.
11  *
12  * This file is subject to the terms and conditions of the GNU General Public
13  * License.  See the file "COPYING" in the main directory of this archive
14  * for more details.
15  */
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/sysdev.h>
19 #include <linux/cpu.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/bitops.h>
23 #include <linux/debugfs.h>
24 #include <linux/fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/err.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/mmu.h>
31 #include <asm/io.h>
32 #include <asm/mmu_context.h>
33 
34 #define NR_PMB_ENTRIES	16
35 
36 static struct kmem_cache *pmb_cache;
37 static unsigned long pmb_map;
38 
39 static struct pmb_entry pmb_init_map[] = {
40 	/* vpn         ppn         flags (ub/sz/c/wt) */
41 
42 	/* P1 Section Mappings */
43 	{ 0x80000000, 0x00000000, PMB_SZ_64M  | PMB_C, },
44 	{ 0x84000000, 0x04000000, PMB_SZ_64M  | PMB_C, },
45 	{ 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, },
46 	{ 0x90000000, 0x10000000, PMB_SZ_64M  | PMB_C, },
47 	{ 0x94000000, 0x14000000, PMB_SZ_64M  | PMB_C, },
48 	{ 0x98000000, 0x18000000, PMB_SZ_64M  | PMB_C, },
49 
50 	/* P2 Section Mappings */
51 	{ 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M  | PMB_WT, },
52 	{ 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M  | PMB_WT, },
53 	{ 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, },
54 	{ 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M  | PMB_WT, },
55 	{ 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M  | PMB_WT, },
56 	{ 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M  | PMB_WT, },
57 };
58 
59 static inline unsigned long mk_pmb_entry(unsigned int entry)
60 {
61 	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
62 }
63 
64 static inline unsigned long mk_pmb_addr(unsigned int entry)
65 {
66 	return mk_pmb_entry(entry) | PMB_ADDR;
67 }
68 
69 static inline unsigned long mk_pmb_data(unsigned int entry)
70 {
71 	return mk_pmb_entry(entry) | PMB_DATA;
72 }
73 
74 static DEFINE_SPINLOCK(pmb_list_lock);
75 static struct pmb_entry *pmb_list;
76 
77 static inline void pmb_list_add(struct pmb_entry *pmbe)
78 {
79 	struct pmb_entry **p, *tmp;
80 
81 	p = &pmb_list;
82 	while ((tmp = *p) != NULL)
83 		p = &tmp->next;
84 
85 	pmbe->next = tmp;
86 	*p = pmbe;
87 }
88 
89 static inline void pmb_list_del(struct pmb_entry *pmbe)
90 {
91 	struct pmb_entry **p, *tmp;
92 
93 	for (p = &pmb_list; (tmp = *p); p = &tmp->next)
94 		if (tmp == pmbe) {
95 			*p = tmp->next;
96 			return;
97 		}
98 }
99 
100 struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
101 			    unsigned long flags)
102 {
103 	struct pmb_entry *pmbe;
104 
105 	pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL);
106 	if (!pmbe)
107 		return ERR_PTR(-ENOMEM);
108 
109 	pmbe->vpn	= vpn;
110 	pmbe->ppn	= ppn;
111 	pmbe->flags	= flags;
112 
113 	spin_lock_irq(&pmb_list_lock);
114 	pmb_list_add(pmbe);
115 	spin_unlock_irq(&pmb_list_lock);
116 
117 	return pmbe;
118 }
119 
120 void pmb_free(struct pmb_entry *pmbe)
121 {
122 	spin_lock_irq(&pmb_list_lock);
123 	pmb_list_del(pmbe);
124 	spin_unlock_irq(&pmb_list_lock);
125 
126 	kmem_cache_free(pmb_cache, pmbe);
127 }
128 
129 /*
130  * Must be in P2 for __set_pmb_entry()
131  */
132 int __set_pmb_entry(unsigned long vpn, unsigned long ppn,
133 		    unsigned long flags, int *entry)
134 {
135 	unsigned int pos = *entry;
136 
137 	if (unlikely(pos == PMB_NO_ENTRY))
138 		pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
139 
140 repeat:
141 	if (unlikely(pos > NR_PMB_ENTRIES))
142 		return -ENOSPC;
143 
144 	if (test_and_set_bit(pos, &pmb_map)) {
145 		pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
146 		goto repeat;
147 	}
148 
149 	ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
150 
151 #ifdef CONFIG_CACHE_WRITETHROUGH
152 	/*
153 	 * When we are in 32-bit address extended mode, CCR.CB becomes
154 	 * invalid, so care must be taken to manually adjust cacheable
155 	 * translations.
156 	 */
157 	if (likely(flags & PMB_C))
158 		flags |= PMB_WT;
159 #endif
160 
161 	ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
162 
163 	*entry = pos;
164 
165 	return 0;
166 }
167 
168 int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
169 {
170 	int ret;
171 
172 	jump_to_uncached();
173 	ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
174 	back_to_cached();
175 
176 	return ret;
177 }
178 
179 void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
180 {
181 	unsigned int entry = pmbe->entry;
182 	unsigned long addr;
183 
184 	/*
185 	 * Don't allow clearing of wired init entries, P1 or P2 access
186 	 * without a corresponding mapping in the PMB will lead to reset
187 	 * by the TLB.
188 	 */
189 	if (unlikely(entry < ARRAY_SIZE(pmb_init_map) ||
190 		     entry >= NR_PMB_ENTRIES))
191 		return;
192 
193 	jump_to_uncached();
194 
195 	/* Clear V-bit */
196 	addr = mk_pmb_addr(entry);
197 	ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
198 
199 	addr = mk_pmb_data(entry);
200 	ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
201 
202 	back_to_cached();
203 
204 	clear_bit(entry, &pmb_map);
205 }
206 
207 
208 static struct {
209 	unsigned long size;
210 	int flag;
211 } pmb_sizes[] = {
212 	{ .size	= 0x20000000, .flag = PMB_SZ_512M, },
213 	{ .size = 0x08000000, .flag = PMB_SZ_128M, },
214 	{ .size = 0x04000000, .flag = PMB_SZ_64M,  },
215 	{ .size = 0x01000000, .flag = PMB_SZ_16M,  },
216 };
217 
218 long pmb_remap(unsigned long vaddr, unsigned long phys,
219 	       unsigned long size, unsigned long flags)
220 {
221 	struct pmb_entry *pmbp;
222 	unsigned long wanted;
223 	int pmb_flags, i;
224 
225 	/* Convert typical pgprot value to the PMB equivalent */
226 	if (flags & _PAGE_CACHABLE) {
227 		if (flags & _PAGE_WT)
228 			pmb_flags = PMB_WT;
229 		else
230 			pmb_flags = PMB_C;
231 	} else
232 		pmb_flags = PMB_WT | PMB_UB;
233 
234 	pmbp = NULL;
235 	wanted = size;
236 
237 again:
238 	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
239 		struct pmb_entry *pmbe;
240 		int ret;
241 
242 		if (size < pmb_sizes[i].size)
243 			continue;
244 
245 		pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
246 		if (IS_ERR(pmbe))
247 			return PTR_ERR(pmbe);
248 
249 		ret = set_pmb_entry(pmbe);
250 		if (ret != 0) {
251 			pmb_free(pmbe);
252 			return -EBUSY;
253 		}
254 
255 		phys	+= pmb_sizes[i].size;
256 		vaddr	+= pmb_sizes[i].size;
257 		size	-= pmb_sizes[i].size;
258 
259 		/*
260 		 * Link adjacent entries that span multiple PMB entries
261 		 * for easier tear-down.
262 		 */
263 		if (likely(pmbp))
264 			pmbp->link = pmbe;
265 
266 		pmbp = pmbe;
267 	}
268 
269 	if (size >= 0x1000000)
270 		goto again;
271 
272 	return wanted - size;
273 }
274 
275 void pmb_unmap(unsigned long addr)
276 {
277 	struct pmb_entry **p, *pmbe;
278 
279 	for (p = &pmb_list; (pmbe = *p); p = &pmbe->next)
280 		if (pmbe->vpn == addr)
281 			break;
282 
283 	if (unlikely(!pmbe))
284 		return;
285 
286 	WARN_ON(!test_bit(pmbe->entry, &pmb_map));
287 
288 	do {
289 		struct pmb_entry *pmblink = pmbe;
290 
291 		clear_pmb_entry(pmbe);
292 		pmbe = pmblink->link;
293 
294 		pmb_free(pmblink);
295 	} while (pmbe);
296 }
297 
298 static void pmb_cache_ctor(void *pmb)
299 {
300 	struct pmb_entry *pmbe = pmb;
301 
302 	memset(pmb, 0, sizeof(struct pmb_entry));
303 
304 	pmbe->entry = PMB_NO_ENTRY;
305 }
306 
307 static int __uses_jump_to_uncached pmb_init(void)
308 {
309 	unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
310 	unsigned int entry, i;
311 
312 	BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
313 
314 	pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
315 				      SLAB_PANIC, pmb_cache_ctor);
316 
317 	jump_to_uncached();
318 
319 	/*
320 	 * Ordering is important, P2 must be mapped in the PMB before we
321 	 * can set PMB.SE, and P1 must be mapped before we jump back to
322 	 * P1 space.
323 	 */
324 	for (entry = 0; entry < nr_entries; entry++) {
325 		struct pmb_entry *pmbe = pmb_init_map + entry;
326 
327 		__set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry);
328 	}
329 
330 	ctrl_outl(0, PMB_IRMCR);
331 
332 	/* PMB.SE and UB[7] */
333 	ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
334 
335 	/* Flush out the TLB */
336 	i =  ctrl_inl(MMUCR);
337 	i |= MMUCR_TI;
338 	ctrl_outl(i, MMUCR);
339 
340 	back_to_cached();
341 
342 	return 0;
343 }
344 arch_initcall(pmb_init);
345 
346 static int pmb_seq_show(struct seq_file *file, void *iter)
347 {
348 	int i;
349 
350 	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
351 			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
352 	seq_printf(file, "ety   vpn  ppn  size   flags\n");
353 
354 	for (i = 0; i < NR_PMB_ENTRIES; i++) {
355 		unsigned long addr, data;
356 		unsigned int size;
357 		char *sz_str = NULL;
358 
359 		addr = ctrl_inl(mk_pmb_addr(i));
360 		data = ctrl_inl(mk_pmb_data(i));
361 
362 		size = data & PMB_SZ_MASK;
363 		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
364 			 (size == PMB_SZ_64M)  ? " 64MB":
365 			 (size == PMB_SZ_128M) ? "128MB":
366 					         "512MB";
367 
368 		/* 02: V 0x88 0x08 128MB C CB  B */
369 		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
370 			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
371 			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
372 			   sz_str, (data & PMB_C) ? 'C' : ' ',
373 			   (data & PMB_WT) ? "WT" : "CB",
374 			   (data & PMB_UB) ? "UB" : " B");
375 	}
376 
377 	return 0;
378 }
379 
380 static int pmb_debugfs_open(struct inode *inode, struct file *file)
381 {
382 	return single_open(file, pmb_seq_show, NULL);
383 }
384 
385 static const struct file_operations pmb_debugfs_fops = {
386 	.owner		= THIS_MODULE,
387 	.open		= pmb_debugfs_open,
388 	.read		= seq_read,
389 	.llseek		= seq_lseek,
390 	.release	= single_release,
391 };
392 
393 static int __init pmb_debugfs_init(void)
394 {
395 	struct dentry *dentry;
396 
397 	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
398 				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
399 	if (!dentry)
400 		return -ENOMEM;
401 	if (IS_ERR(dentry))
402 		return PTR_ERR(dentry);
403 
404 	return 0;
405 }
406 postcore_initcall(pmb_debugfs_init);
407 
408 #ifdef CONFIG_PM
409 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
410 {
411 	static pm_message_t prev_state;
412 
413 	/* Restore the PMB after a resume from hibernation */
414 	if (state.event == PM_EVENT_ON &&
415 	    prev_state.event == PM_EVENT_FREEZE) {
416 		struct pmb_entry *pmbe;
417 		spin_lock_irq(&pmb_list_lock);
418 		for (pmbe = pmb_list; pmbe; pmbe = pmbe->next)
419 			set_pmb_entry(pmbe);
420 		spin_unlock_irq(&pmb_list_lock);
421 	}
422 	prev_state = state;
423 	return 0;
424 }
425 
426 static int pmb_sysdev_resume(struct sys_device *dev)
427 {
428 	return pmb_sysdev_suspend(dev, PMSG_ON);
429 }
430 
431 static struct sysdev_driver pmb_sysdev_driver = {
432 	.suspend = pmb_sysdev_suspend,
433 	.resume = pmb_sysdev_resume,
434 };
435 
436 static int __init pmb_sysdev_init(void)
437 {
438 	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
439 }
440 
441 subsys_initcall(pmb_sysdev_init);
442 #endif
443