xref: /openbmc/linux/arch/sh/mm/pmb.c (revision f8bade6c9a6213c2c5ba6e5bf32415ecab6e41e5)
10c7b1df6SPaul Mundt /*
20c7b1df6SPaul Mundt  * arch/sh/mm/pmb.c
30c7b1df6SPaul Mundt  *
40c7b1df6SPaul Mundt  * Privileged Space Mapping Buffer (PMB) Support.
50c7b1df6SPaul Mundt  *
6d4cc183fSPaul Mundt  * Copyright (C) 2005 - 2011  Paul Mundt
73d467676SMatt Fleming  * Copyright (C) 2010  Matt Fleming
80c7b1df6SPaul Mundt  *
90c7b1df6SPaul Mundt  * This file is subject to the terms and conditions of the GNU General Public
100c7b1df6SPaul Mundt  * License.  See the file "COPYING" in the main directory of this archive
110c7b1df6SPaul Mundt  * for more details.
120c7b1df6SPaul Mundt  */
130c7b1df6SPaul Mundt #include <linux/init.h>
140c7b1df6SPaul Mundt #include <linux/kernel.h>
15d4cc183fSPaul Mundt #include <linux/syscore_ops.h>
16a83c0b73SFrancesco VIRLINZI #include <linux/cpu.h>
170c7b1df6SPaul Mundt #include <linux/module.h>
180c7b1df6SPaul Mundt #include <linux/bitops.h>
190c7b1df6SPaul Mundt #include <linux/debugfs.h>
200c7b1df6SPaul Mundt #include <linux/fs.h>
210c7b1df6SPaul Mundt #include <linux/seq_file.h>
220c7b1df6SPaul Mundt #include <linux/err.h>
2351becfd9SPaul Mundt #include <linux/io.h>
24d53a0d33SPaul Mundt #include <linux/spinlock.h>
2590e7d649SPaul Mundt #include <linux/vmalloc.h>
2665fddcfcSMike Rapoport #include <linux/pgtable.h>
27281983d6SPaul Mundt #include <asm/cacheflush.h>
2887dfb311SMasahiro Yamada #include <linux/sizes.h>
297c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
307bdda620SPaul Mundt #include <asm/page.h>
310c7b1df6SPaul Mundt #include <asm/mmu.h>
32eddeeb32SStuart Menefy #include <asm/mmu_context.h>
330c7b1df6SPaul Mundt 
34d53a0d33SPaul Mundt struct pmb_entry;
35d53a0d33SPaul Mundt 
36d53a0d33SPaul Mundt struct pmb_entry {
37d53a0d33SPaul Mundt 	unsigned long vpn;
38d53a0d33SPaul Mundt 	unsigned long ppn;
39d53a0d33SPaul Mundt 	unsigned long flags;
40d53a0d33SPaul Mundt 	unsigned long size;
41d53a0d33SPaul Mundt 
42f7fcec93SPaul Mundt 	raw_spinlock_t lock;
43d53a0d33SPaul Mundt 
44d53a0d33SPaul Mundt 	/*
45d53a0d33SPaul Mundt 	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
46d53a0d33SPaul Mundt 	 * PMB_NO_ENTRY to search for a free one
47d53a0d33SPaul Mundt 	 */
48d53a0d33SPaul Mundt 	int entry;
49d53a0d33SPaul Mundt 
50d53a0d33SPaul Mundt 	/* Adjacent entry link for contiguous multi-entry mappings */
51d53a0d33SPaul Mundt 	struct pmb_entry *link;
52d53a0d33SPaul Mundt };
53d53a0d33SPaul Mundt 
5490e7d649SPaul Mundt static struct {
5590e7d649SPaul Mundt 	unsigned long size;
5690e7d649SPaul Mundt 	int flag;
5790e7d649SPaul Mundt } pmb_sizes[] = {
5890e7d649SPaul Mundt 	{ .size	= SZ_512M, .flag = PMB_SZ_512M, },
5990e7d649SPaul Mundt 	{ .size = SZ_128M, .flag = PMB_SZ_128M, },
6090e7d649SPaul Mundt 	{ .size = SZ_64M,  .flag = PMB_SZ_64M,  },
6190e7d649SPaul Mundt 	{ .size = SZ_16M,  .flag = PMB_SZ_16M,  },
6290e7d649SPaul Mundt };
6390e7d649SPaul Mundt 
64d01447b3SPaul Mundt static void pmb_unmap_entry(struct pmb_entry *, int depth);
65fc2bdefdSMatt Fleming 
66d53a0d33SPaul Mundt static DEFINE_RWLOCK(pmb_rwlock);
67edd7de80SMatt Fleming static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
6851becfd9SPaul Mundt static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
690c7b1df6SPaul Mundt 
704cfa8e75SPaul Mundt static unsigned int pmb_iomapping_enabled;
714cfa8e75SPaul Mundt 
mk_pmb_entry(unsigned int entry)7251becfd9SPaul Mundt static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
730c7b1df6SPaul Mundt {
740c7b1df6SPaul Mundt 	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
750c7b1df6SPaul Mundt }
760c7b1df6SPaul Mundt 
mk_pmb_addr(unsigned int entry)7751becfd9SPaul Mundt static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
780c7b1df6SPaul Mundt {
790c7b1df6SPaul Mundt 	return mk_pmb_entry(entry) | PMB_ADDR;
800c7b1df6SPaul Mundt }
810c7b1df6SPaul Mundt 
mk_pmb_data(unsigned int entry)8251becfd9SPaul Mundt static __always_inline unsigned long mk_pmb_data(unsigned int entry)
830c7b1df6SPaul Mundt {
840c7b1df6SPaul Mundt 	return mk_pmb_entry(entry) | PMB_DATA;
850c7b1df6SPaul Mundt }
860c7b1df6SPaul Mundt 
pmb_ppn_in_range(unsigned long ppn)8790e7d649SPaul Mundt static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
8890e7d649SPaul Mundt {
8990e7d649SPaul Mundt 	return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
9090e7d649SPaul Mundt }
9190e7d649SPaul Mundt 
9290e7d649SPaul Mundt /*
9390e7d649SPaul Mundt  * Ensure that the PMB entries match our cache configuration.
9490e7d649SPaul Mundt  *
9590e7d649SPaul Mundt  * When we are in 32-bit address extended mode, CCR.CB becomes
9690e7d649SPaul Mundt  * invalid, so care must be taken to manually adjust cacheable
9790e7d649SPaul Mundt  * translations.
9890e7d649SPaul Mundt  */
pmb_cache_flags(void)9990e7d649SPaul Mundt static __always_inline unsigned long pmb_cache_flags(void)
10090e7d649SPaul Mundt {
10190e7d649SPaul Mundt 	unsigned long flags = 0;
10290e7d649SPaul Mundt 
10390e7d649SPaul Mundt #if defined(CONFIG_CACHE_OFF)
10490e7d649SPaul Mundt 	flags |= PMB_WT | PMB_UB;
10590e7d649SPaul Mundt #elif defined(CONFIG_CACHE_WRITETHROUGH)
10690e7d649SPaul Mundt 	flags |= PMB_C | PMB_WT | PMB_UB;
10790e7d649SPaul Mundt #elif defined(CONFIG_CACHE_WRITEBACK)
10890e7d649SPaul Mundt 	flags |= PMB_C;
10990e7d649SPaul Mundt #endif
11090e7d649SPaul Mundt 
11190e7d649SPaul Mundt 	return flags;
11290e7d649SPaul Mundt }
11390e7d649SPaul Mundt 
11490e7d649SPaul Mundt /*
11590e7d649SPaul Mundt  * Convert typical pgprot value to the PMB equivalent
11690e7d649SPaul Mundt  */
pgprot_to_pmb_flags(pgprot_t prot)11790e7d649SPaul Mundt static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
11890e7d649SPaul Mundt {
11990e7d649SPaul Mundt 	unsigned long pmb_flags = 0;
12090e7d649SPaul Mundt 	u64 flags = pgprot_val(prot);
12190e7d649SPaul Mundt 
12290e7d649SPaul Mundt 	if (flags & _PAGE_CACHABLE)
12390e7d649SPaul Mundt 		pmb_flags |= PMB_C;
12490e7d649SPaul Mundt 	if (flags & _PAGE_WT)
12590e7d649SPaul Mundt 		pmb_flags |= PMB_WT | PMB_UB;
12690e7d649SPaul Mundt 
12790e7d649SPaul Mundt 	return pmb_flags;
12890e7d649SPaul Mundt }
12990e7d649SPaul Mundt 
pmb_can_merge(struct pmb_entry * a,struct pmb_entry * b)130a1042aa2SPaul Mundt static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
13190e7d649SPaul Mundt {
13290e7d649SPaul Mundt 	return (b->vpn == (a->vpn + a->size)) &&
13390e7d649SPaul Mundt 	       (b->ppn == (a->ppn + a->size)) &&
13490e7d649SPaul Mundt 	       (b->flags == a->flags);
13590e7d649SPaul Mundt }
13690e7d649SPaul Mundt 
pmb_mapping_exists(unsigned long vaddr,phys_addr_t phys,unsigned long size)137a1042aa2SPaul Mundt static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
138a1042aa2SPaul Mundt 			       unsigned long size)
139a1042aa2SPaul Mundt {
140a1042aa2SPaul Mundt 	int i;
141a1042aa2SPaul Mundt 
142a1042aa2SPaul Mundt 	read_lock(&pmb_rwlock);
143a1042aa2SPaul Mundt 
144a1042aa2SPaul Mundt 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
145a1042aa2SPaul Mundt 		struct pmb_entry *pmbe, *iter;
146a1042aa2SPaul Mundt 		unsigned long span;
147a1042aa2SPaul Mundt 
148a1042aa2SPaul Mundt 		if (!test_bit(i, pmb_map))
149a1042aa2SPaul Mundt 			continue;
150a1042aa2SPaul Mundt 
151a1042aa2SPaul Mundt 		pmbe = &pmb_entry_list[i];
152a1042aa2SPaul Mundt 
153a1042aa2SPaul Mundt 		/*
154a1042aa2SPaul Mundt 		 * See if VPN and PPN are bounded by an existing mapping.
155a1042aa2SPaul Mundt 		 */
156a1042aa2SPaul Mundt 		if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
157a1042aa2SPaul Mundt 			continue;
158a1042aa2SPaul Mundt 		if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
159a1042aa2SPaul Mundt 			continue;
160a1042aa2SPaul Mundt 
161a1042aa2SPaul Mundt 		/*
162a1042aa2SPaul Mundt 		 * Now see if we're in range of a simple mapping.
163a1042aa2SPaul Mundt 		 */
164a1042aa2SPaul Mundt 		if (size <= pmbe->size) {
165a1042aa2SPaul Mundt 			read_unlock(&pmb_rwlock);
166a1042aa2SPaul Mundt 			return true;
167a1042aa2SPaul Mundt 		}
168a1042aa2SPaul Mundt 
169a1042aa2SPaul Mundt 		span = pmbe->size;
170a1042aa2SPaul Mundt 
171a1042aa2SPaul Mundt 		/*
172a1042aa2SPaul Mundt 		 * Finally for sizes that involve compound mappings, walk
173a1042aa2SPaul Mundt 		 * the chain.
174a1042aa2SPaul Mundt 		 */
175a1042aa2SPaul Mundt 		for (iter = pmbe->link; iter; iter = iter->link)
176a1042aa2SPaul Mundt 			span += iter->size;
177a1042aa2SPaul Mundt 
178a1042aa2SPaul Mundt 		/*
179a1042aa2SPaul Mundt 		 * Nothing else to do if the range requirements are met.
180a1042aa2SPaul Mundt 		 */
181a1042aa2SPaul Mundt 		if (size <= span) {
182a1042aa2SPaul Mundt 			read_unlock(&pmb_rwlock);
183a1042aa2SPaul Mundt 			return true;
184a1042aa2SPaul Mundt 		}
185a1042aa2SPaul Mundt 	}
186a1042aa2SPaul Mundt 
187a1042aa2SPaul Mundt 	read_unlock(&pmb_rwlock);
188a1042aa2SPaul Mundt 	return false;
189a1042aa2SPaul Mundt }
190a1042aa2SPaul Mundt 
pmb_size_valid(unsigned long size)19190e7d649SPaul Mundt static bool pmb_size_valid(unsigned long size)
19290e7d649SPaul Mundt {
19390e7d649SPaul Mundt 	int i;
19490e7d649SPaul Mundt 
19590e7d649SPaul Mundt 	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
19690e7d649SPaul Mundt 		if (pmb_sizes[i].size == size)
19790e7d649SPaul Mundt 			return true;
19890e7d649SPaul Mundt 
19990e7d649SPaul Mundt 	return false;
20090e7d649SPaul Mundt }
20190e7d649SPaul Mundt 
pmb_addr_valid(unsigned long addr,unsigned long size)20290e7d649SPaul Mundt static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
20390e7d649SPaul Mundt {
20490e7d649SPaul Mundt 	return (addr >= P1SEG && (addr + size - 1) < P3SEG);
20590e7d649SPaul Mundt }
20690e7d649SPaul Mundt 
pmb_prot_valid(pgprot_t prot)20790e7d649SPaul Mundt static inline bool pmb_prot_valid(pgprot_t prot)
20890e7d649SPaul Mundt {
20990e7d649SPaul Mundt 	return (pgprot_val(prot) & _PAGE_USER) == 0;
21090e7d649SPaul Mundt }
21190e7d649SPaul Mundt 
pmb_size_to_flags(unsigned long size)21290e7d649SPaul Mundt static int pmb_size_to_flags(unsigned long size)
21390e7d649SPaul Mundt {
21490e7d649SPaul Mundt 	int i;
21590e7d649SPaul Mundt 
21690e7d649SPaul Mundt 	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
21790e7d649SPaul Mundt 		if (pmb_sizes[i].size == size)
21890e7d649SPaul Mundt 			return pmb_sizes[i].flag;
21990e7d649SPaul Mundt 
22090e7d649SPaul Mundt 	return 0;
22190e7d649SPaul Mundt }
22290e7d649SPaul Mundt 
pmb_alloc_entry(void)223067784f6SMatt Fleming static int pmb_alloc_entry(void)
224067784f6SMatt Fleming {
225d53a0d33SPaul Mundt 	int pos;
226067784f6SMatt Fleming 
22751becfd9SPaul Mundt 	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
228d53a0d33SPaul Mundt 	if (pos >= 0 && pos < NR_PMB_ENTRIES)
229d53a0d33SPaul Mundt 		__set_bit(pos, pmb_map);
230d53a0d33SPaul Mundt 	else
231d53a0d33SPaul Mundt 		pos = -ENOSPC;
232067784f6SMatt Fleming 
233067784f6SMatt Fleming 	return pos;
234067784f6SMatt Fleming }
235067784f6SMatt Fleming 
pmb_alloc(unsigned long vpn,unsigned long ppn,unsigned long flags,int entry)2368386aebbSMatt Fleming static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
23720b5014bSMatt Fleming 				   unsigned long flags, int entry)
2380c7b1df6SPaul Mundt {
2390c7b1df6SPaul Mundt 	struct pmb_entry *pmbe;
240d53a0d33SPaul Mundt 	unsigned long irqflags;
241d53a0d33SPaul Mundt 	void *ret = NULL;
242067784f6SMatt Fleming 	int pos;
243067784f6SMatt Fleming 
244d53a0d33SPaul Mundt 	write_lock_irqsave(&pmb_rwlock, irqflags);
245d53a0d33SPaul Mundt 
24620b5014bSMatt Fleming 	if (entry == PMB_NO_ENTRY) {
247067784f6SMatt Fleming 		pos = pmb_alloc_entry();
248d53a0d33SPaul Mundt 		if (unlikely(pos < 0)) {
249d53a0d33SPaul Mundt 			ret = ERR_PTR(pos);
250d53a0d33SPaul Mundt 			goto out;
251d53a0d33SPaul Mundt 		}
25220b5014bSMatt Fleming 	} else {
253d53a0d33SPaul Mundt 		if (__test_and_set_bit(entry, pmb_map)) {
254d53a0d33SPaul Mundt 			ret = ERR_PTR(-ENOSPC);
255d53a0d33SPaul Mundt 			goto out;
256d53a0d33SPaul Mundt 		}
257d53a0d33SPaul Mundt 
25820b5014bSMatt Fleming 		pos = entry;
25920b5014bSMatt Fleming 	}
2600c7b1df6SPaul Mundt 
261d53a0d33SPaul Mundt 	write_unlock_irqrestore(&pmb_rwlock, irqflags);
262d53a0d33SPaul Mundt 
263edd7de80SMatt Fleming 	pmbe = &pmb_entry_list[pos];
264d53a0d33SPaul Mundt 
265d01447b3SPaul Mundt 	memset(pmbe, 0, sizeof(struct pmb_entry));
266d01447b3SPaul Mundt 
267f7fcec93SPaul Mundt 	raw_spin_lock_init(&pmbe->lock);
2680c7b1df6SPaul Mundt 
2690c7b1df6SPaul Mundt 	pmbe->vpn	= vpn;
2700c7b1df6SPaul Mundt 	pmbe->ppn	= ppn;
2710c7b1df6SPaul Mundt 	pmbe->flags	= flags;
272067784f6SMatt Fleming 	pmbe->entry	= pos;
2730c7b1df6SPaul Mundt 
2740c7b1df6SPaul Mundt 	return pmbe;
275d53a0d33SPaul Mundt 
276d53a0d33SPaul Mundt out:
277d53a0d33SPaul Mundt 	write_unlock_irqrestore(&pmb_rwlock, irqflags);
278d53a0d33SPaul Mundt 	return ret;
2790c7b1df6SPaul Mundt }
2800c7b1df6SPaul Mundt 
pmb_free(struct pmb_entry * pmbe)2818386aebbSMatt Fleming static void pmb_free(struct pmb_entry *pmbe)
2820c7b1df6SPaul Mundt {
283d53a0d33SPaul Mundt 	__clear_bit(pmbe->entry, pmb_map);
284d01447b3SPaul Mundt 
285d7813bc9SPaul Mundt 	pmbe->entry	= PMB_NO_ENTRY;
286d01447b3SPaul Mundt 	pmbe->link	= NULL;
2870c7b1df6SPaul Mundt }
2880c7b1df6SPaul Mundt 
2890c7b1df6SPaul Mundt /*
29051becfd9SPaul Mundt  * Must be run uncached.
2910c7b1df6SPaul Mundt  */
__set_pmb_entry(struct pmb_entry * pmbe)292d53a0d33SPaul Mundt static void __set_pmb_entry(struct pmb_entry *pmbe)
2930c7b1df6SPaul Mundt {
294281983d6SPaul Mundt 	unsigned long addr, data;
295281983d6SPaul Mundt 
296281983d6SPaul Mundt 	addr = mk_pmb_addr(pmbe->entry);
297281983d6SPaul Mundt 	data = mk_pmb_data(pmbe->entry);
298281983d6SPaul Mundt 
299281983d6SPaul Mundt 	jump_to_uncached();
300281983d6SPaul Mundt 
30190e7d649SPaul Mundt 	/* Set V-bit */
302281983d6SPaul Mundt 	__raw_writel(pmbe->vpn | PMB_V, addr);
303281983d6SPaul Mundt 	__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
304281983d6SPaul Mundt 
305281983d6SPaul Mundt 	back_to_cached();
3060c7b1df6SPaul Mundt }
3070c7b1df6SPaul Mundt 
__clear_pmb_entry(struct pmb_entry * pmbe)308d53a0d33SPaul Mundt static void __clear_pmb_entry(struct pmb_entry *pmbe)
3090c7b1df6SPaul Mundt {
3102e450643SPaul Mundt 	unsigned long addr, data;
3112e450643SPaul Mundt 	unsigned long addr_val, data_val;
3120c7b1df6SPaul Mundt 
3132e450643SPaul Mundt 	addr = mk_pmb_addr(pmbe->entry);
3142e450643SPaul Mundt 	data = mk_pmb_data(pmbe->entry);
3152e450643SPaul Mundt 
3162e450643SPaul Mundt 	addr_val = __raw_readl(addr);
3172e450643SPaul Mundt 	data_val = __raw_readl(data);
3180c7b1df6SPaul Mundt 
3190c7b1df6SPaul Mundt 	/* Clear V-bit */
3202e450643SPaul Mundt 	writel_uncached(addr_val & ~PMB_V, addr);
3212e450643SPaul Mundt 	writel_uncached(data_val & ~PMB_V, data);
3220c7b1df6SPaul Mundt }
3230c7b1df6SPaul Mundt 
3243fe0f36cSMatt Fleming #ifdef CONFIG_PM
set_pmb_entry(struct pmb_entry * pmbe)325d53a0d33SPaul Mundt static void set_pmb_entry(struct pmb_entry *pmbe)
326d53a0d33SPaul Mundt {
327d53a0d33SPaul Mundt 	unsigned long flags;
328d53a0d33SPaul Mundt 
329f7fcec93SPaul Mundt 	raw_spin_lock_irqsave(&pmbe->lock, flags);
330d53a0d33SPaul Mundt 	__set_pmb_entry(pmbe);
331f7fcec93SPaul Mundt 	raw_spin_unlock_irqrestore(&pmbe->lock, flags);
332d53a0d33SPaul Mundt }
3333fe0f36cSMatt Fleming #endif /* CONFIG_PM */
334d53a0d33SPaul Mundt 
pmb_bolt_mapping(unsigned long vaddr,phys_addr_t phys,unsigned long size,pgprot_t prot)33590e7d649SPaul Mundt int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
3367bdda620SPaul Mundt 		     unsigned long size, pgprot_t prot)
337d7cdc9e8SPaul Mundt {
33890e7d649SPaul Mundt 	struct pmb_entry *pmbp, *pmbe;
339281983d6SPaul Mundt 	unsigned long orig_addr, orig_size;
340a1042aa2SPaul Mundt 	unsigned long flags, pmb_flags;
34190e7d649SPaul Mundt 	int i, mapped;
34290e7d649SPaul Mundt 
343dfbca899SPaul Mundt 	if (size < SZ_16M)
344dfbca899SPaul Mundt 		return -EINVAL;
3456eb3c735SPaul Mundt 	if (!pmb_addr_valid(vaddr, size))
3466eb3c735SPaul Mundt 		return -EFAULT;
347a1042aa2SPaul Mundt 	if (pmb_mapping_exists(vaddr, phys, size))
348a1042aa2SPaul Mundt 		return 0;
3494cfa8e75SPaul Mundt 
350281983d6SPaul Mundt 	orig_addr = vaddr;
351281983d6SPaul Mundt 	orig_size = size;
352281983d6SPaul Mundt 
353281983d6SPaul Mundt 	flush_tlb_kernel_range(vaddr, vaddr + size);
354281983d6SPaul Mundt 
35590e7d649SPaul Mundt 	pmb_flags = pgprot_to_pmb_flags(prot);
3566eb3c735SPaul Mundt 	pmbp = NULL;
357d7cdc9e8SPaul Mundt 
358a1042aa2SPaul Mundt 	do {
3596eb3c735SPaul Mundt 		for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
360d7cdc9e8SPaul Mundt 			if (size < pmb_sizes[i].size)
361d7cdc9e8SPaul Mundt 				continue;
362d7cdc9e8SPaul Mundt 
363a1042aa2SPaul Mundt 			pmbe = pmb_alloc(vaddr, phys, pmb_flags |
364a1042aa2SPaul Mundt 					 pmb_sizes[i].flag, PMB_NO_ENTRY);
365fc2bdefdSMatt Fleming 			if (IS_ERR(pmbe)) {
36690e7d649SPaul Mundt 				pmb_unmap_entry(pmbp, mapped);
3676eb3c735SPaul Mundt 				return PTR_ERR(pmbe);
368fc2bdefdSMatt Fleming 			}
369d7cdc9e8SPaul Mundt 
370f7fcec93SPaul Mundt 			raw_spin_lock_irqsave(&pmbe->lock, flags);
371d53a0d33SPaul Mundt 
37290e7d649SPaul Mundt 			pmbe->size = pmb_sizes[i].size;
37390e7d649SPaul Mundt 
374d53a0d33SPaul Mundt 			__set_pmb_entry(pmbe);
375d7cdc9e8SPaul Mundt 
37690e7d649SPaul Mundt 			phys	+= pmbe->size;
37790e7d649SPaul Mundt 			vaddr	+= pmbe->size;
37890e7d649SPaul Mundt 			size	-= pmbe->size;
379d7813bc9SPaul Mundt 
380d7cdc9e8SPaul Mundt 			/*
381a1042aa2SPaul Mundt 			 * Link adjacent entries that span multiple PMB
382a1042aa2SPaul Mundt 			 * entries for easier tear-down.
383d7cdc9e8SPaul Mundt 			 */
384d53a0d33SPaul Mundt 			if (likely(pmbp)) {
385f7fcec93SPaul Mundt 				raw_spin_lock_nested(&pmbp->lock,
386f7fcec93SPaul Mundt 						     SINGLE_DEPTH_NESTING);
387d7cdc9e8SPaul Mundt 				pmbp->link = pmbe;
388f7fcec93SPaul Mundt 				raw_spin_unlock(&pmbp->lock);
389d53a0d33SPaul Mundt 			}
390d7cdc9e8SPaul Mundt 
391d7cdc9e8SPaul Mundt 			pmbp = pmbe;
392a2767cfbSMatt Fleming 
393a2767cfbSMatt Fleming 			/*
394a1042aa2SPaul Mundt 			 * Instead of trying smaller sizes on every
395a1042aa2SPaul Mundt 			 * iteration (even if we succeed in allocating
396a1042aa2SPaul Mundt 			 * space), try using pmb_sizes[i].size again.
397a2767cfbSMatt Fleming 			 */
398a2767cfbSMatt Fleming 			i--;
39990e7d649SPaul Mundt 			mapped++;
400d53a0d33SPaul Mundt 
401f7fcec93SPaul Mundt 			raw_spin_unlock_irqrestore(&pmbe->lock, flags);
402d7cdc9e8SPaul Mundt 		}
403a1042aa2SPaul Mundt 	} while (size >= SZ_16M);
404d7cdc9e8SPaul Mundt 
405281983d6SPaul Mundt 	flush_cache_vmap(orig_addr, orig_addr + orig_size);
406281983d6SPaul Mundt 
4076eb3c735SPaul Mundt 	return 0;
4086eb3c735SPaul Mundt }
4096eb3c735SPaul Mundt 
pmb_remap_caller(phys_addr_t phys,unsigned long size,pgprot_t prot,void * caller)4106eb3c735SPaul Mundt void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
4116eb3c735SPaul Mundt 			       pgprot_t prot, void *caller)
4126eb3c735SPaul Mundt {
413281983d6SPaul Mundt 	unsigned long vaddr;
4146eb3c735SPaul Mundt 	phys_addr_t offset, last_addr;
4156eb3c735SPaul Mundt 	phys_addr_t align_mask;
4166eb3c735SPaul Mundt 	unsigned long aligned;
4176eb3c735SPaul Mundt 	struct vm_struct *area;
4186eb3c735SPaul Mundt 	int i, ret;
4196eb3c735SPaul Mundt 
4206eb3c735SPaul Mundt 	if (!pmb_iomapping_enabled)
4216eb3c735SPaul Mundt 		return NULL;
4226eb3c735SPaul Mundt 
4236eb3c735SPaul Mundt 	/*
4246eb3c735SPaul Mundt 	 * Small mappings need to go through the TLB.
4256eb3c735SPaul Mundt 	 */
4266eb3c735SPaul Mundt 	if (size < SZ_16M)
4276eb3c735SPaul Mundt 		return ERR_PTR(-EINVAL);
4286eb3c735SPaul Mundt 	if (!pmb_prot_valid(prot))
4296eb3c735SPaul Mundt 		return ERR_PTR(-EINVAL);
4306eb3c735SPaul Mundt 
4316eb3c735SPaul Mundt 	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
4326eb3c735SPaul Mundt 		if (size >= pmb_sizes[i].size)
4336eb3c735SPaul Mundt 			break;
4346eb3c735SPaul Mundt 
4356eb3c735SPaul Mundt 	last_addr = phys + size;
4366eb3c735SPaul Mundt 	align_mask = ~(pmb_sizes[i].size - 1);
4376eb3c735SPaul Mundt 	offset = phys & ~align_mask;
4386eb3c735SPaul Mundt 	phys &= align_mask;
4396eb3c735SPaul Mundt 	aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
4406eb3c735SPaul Mundt 
441281983d6SPaul Mundt 	/*
442281983d6SPaul Mundt 	 * XXX: This should really start from uncached_end, but this
443281983d6SPaul Mundt 	 * causes the MMU to reset, so for now we restrict it to the
444281983d6SPaul Mundt 	 * 0xb000...0xc000 range.
445281983d6SPaul Mundt 	 */
446281983d6SPaul Mundt 	area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
4476eb3c735SPaul Mundt 				    P3SEG, caller);
4486eb3c735SPaul Mundt 	if (!area)
4496eb3c735SPaul Mundt 		return NULL;
4506eb3c735SPaul Mundt 
4516eb3c735SPaul Mundt 	area->phys_addr = phys;
452281983d6SPaul Mundt 	vaddr = (unsigned long)area->addr;
4536eb3c735SPaul Mundt 
4546eb3c735SPaul Mundt 	ret = pmb_bolt_mapping(vaddr, phys, size, prot);
455a1042aa2SPaul Mundt 	if (unlikely(ret != 0))
4566eb3c735SPaul Mundt 		return ERR_PTR(ret);
4576eb3c735SPaul Mundt 
458281983d6SPaul Mundt 	return (void __iomem *)(offset + (char *)vaddr);
459d7cdc9e8SPaul Mundt }
460d7cdc9e8SPaul Mundt 
pmb_unmap(void __iomem * addr)46190e7d649SPaul Mundt int pmb_unmap(void __iomem *addr)
462d7cdc9e8SPaul Mundt {
463d53a0d33SPaul Mundt 	struct pmb_entry *pmbe = NULL;
46490e7d649SPaul Mundt 	unsigned long vaddr = (unsigned long __force)addr;
46590e7d649SPaul Mundt 	int i, found = 0;
466d7cdc9e8SPaul Mundt 
467d53a0d33SPaul Mundt 	read_lock(&pmb_rwlock);
468d53a0d33SPaul Mundt 
469edd7de80SMatt Fleming 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
47051becfd9SPaul Mundt 		if (test_bit(i, pmb_map)) {
471edd7de80SMatt Fleming 			pmbe = &pmb_entry_list[i];
47290e7d649SPaul Mundt 			if (pmbe->vpn == vaddr) {
47390e7d649SPaul Mundt 				found = 1;
474d7cdc9e8SPaul Mundt 				break;
475edd7de80SMatt Fleming 			}
476edd7de80SMatt Fleming 		}
47790e7d649SPaul Mundt 	}
478d53a0d33SPaul Mundt 
479d53a0d33SPaul Mundt 	read_unlock(&pmb_rwlock);
480d53a0d33SPaul Mundt 
48190e7d649SPaul Mundt 	if (found) {
482d01447b3SPaul Mundt 		pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
483d01447b3SPaul Mundt 		return 0;
484d01447b3SPaul Mundt 	}
485d01447b3SPaul Mundt 
48690e7d649SPaul Mundt 	return -EINVAL;
48790e7d649SPaul Mundt }
48890e7d649SPaul Mundt 
__pmb_unmap_entry(struct pmb_entry * pmbe,int depth)489d01447b3SPaul Mundt static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
490d01447b3SPaul Mundt {
491d7cdc9e8SPaul Mundt 	do {
492d7cdc9e8SPaul Mundt 		struct pmb_entry *pmblink = pmbe;
493d7cdc9e8SPaul Mundt 
494067784f6SMatt Fleming 		/*
495067784f6SMatt Fleming 		 * We may be called before this pmb_entry has been
496067784f6SMatt Fleming 		 * entered into the PMB table via set_pmb_entry(), but
497067784f6SMatt Fleming 		 * that's OK because we've allocated a unique slot for
498067784f6SMatt Fleming 		 * this entry in pmb_alloc() (even if we haven't filled
499067784f6SMatt Fleming 		 * it yet).
500067784f6SMatt Fleming 		 *
501d53a0d33SPaul Mundt 		 * Therefore, calling __clear_pmb_entry() is safe as no
502067784f6SMatt Fleming 		 * other mapping can be using that slot.
503067784f6SMatt Fleming 		 */
504d53a0d33SPaul Mundt 		__clear_pmb_entry(pmbe);
505fc2bdefdSMatt Fleming 
506281983d6SPaul Mundt 		flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
507281983d6SPaul Mundt 
508d7cdc9e8SPaul Mundt 		pmbe = pmblink->link;
509d7cdc9e8SPaul Mundt 
510d7cdc9e8SPaul Mundt 		pmb_free(pmblink);
511d01447b3SPaul Mundt 	} while (pmbe && --depth);
512d01447b3SPaul Mundt }
513d53a0d33SPaul Mundt 
pmb_unmap_entry(struct pmb_entry * pmbe,int depth)514d01447b3SPaul Mundt static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
515d01447b3SPaul Mundt {
516d01447b3SPaul Mundt 	unsigned long flags;
517d01447b3SPaul Mundt 
518d01447b3SPaul Mundt 	if (unlikely(!pmbe))
519d01447b3SPaul Mundt 		return;
520d01447b3SPaul Mundt 
521d01447b3SPaul Mundt 	write_lock_irqsave(&pmb_rwlock, flags);
522d01447b3SPaul Mundt 	__pmb_unmap_entry(pmbe, depth);
523d53a0d33SPaul Mundt 	write_unlock_irqrestore(&pmb_rwlock, flags);
524d7cdc9e8SPaul Mundt }
525d7cdc9e8SPaul Mundt 
pmb_notify(void)526d01447b3SPaul Mundt static void __init pmb_notify(void)
52720b5014bSMatt Fleming {
528d01447b3SPaul Mundt 	int i;
5293d467676SMatt Fleming 
530efd54ea3SPaul Mundt 	pr_info("PMB: boot mappings:\n");
5313d467676SMatt Fleming 
532d01447b3SPaul Mundt 	read_lock(&pmb_rwlock);
533d01447b3SPaul Mundt 
534d01447b3SPaul Mundt 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
535d01447b3SPaul Mundt 		struct pmb_entry *pmbe;
536d01447b3SPaul Mundt 
537d01447b3SPaul Mundt 		if (!test_bit(i, pmb_map))
538d01447b3SPaul Mundt 			continue;
539d01447b3SPaul Mundt 
540d01447b3SPaul Mundt 		pmbe = &pmb_entry_list[i];
541d01447b3SPaul Mundt 
542d01447b3SPaul Mundt 		pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
543d01447b3SPaul Mundt 			pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
544d01447b3SPaul Mundt 			pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
545d01447b3SPaul Mundt 	}
546d01447b3SPaul Mundt 
547d01447b3SPaul Mundt 	read_unlock(&pmb_rwlock);
548d01447b3SPaul Mundt }
549d01447b3SPaul Mundt 
550d01447b3SPaul Mundt /*
551d01447b3SPaul Mundt  * Sync our software copy of the PMB mappings with those in hardware. The
552d01447b3SPaul Mundt  * mappings in the hardware PMB were either set up by the bootloader or
553d01447b3SPaul Mundt  * very early on by the kernel.
554d01447b3SPaul Mundt  */
pmb_synchronize(void)555d01447b3SPaul Mundt static void __init pmb_synchronize(void)
556d01447b3SPaul Mundt {
557d01447b3SPaul Mundt 	struct pmb_entry *pmbp = NULL;
558d01447b3SPaul Mundt 	int i, j;
559d01447b3SPaul Mundt 
5603d467676SMatt Fleming 	/*
561efd54ea3SPaul Mundt 	 * Run through the initial boot mappings, log the established
562efd54ea3SPaul Mundt 	 * ones, and blow away anything that falls outside of the valid
563efd54ea3SPaul Mundt 	 * PPN range. Specifically, we only care about existing mappings
564efd54ea3SPaul Mundt 	 * that impact the cached/uncached sections.
5653d467676SMatt Fleming 	 *
566efd54ea3SPaul Mundt 	 * Note that touching these can be a bit of a minefield; the boot
567efd54ea3SPaul Mundt 	 * loader can establish multi-page mappings with the same caching
568efd54ea3SPaul Mundt 	 * attributes, so we need to ensure that we aren't modifying a
569efd54ea3SPaul Mundt 	 * mapping that we're presently executing from, or may execute
570efd54ea3SPaul Mundt 	 * from in the case of straddling page boundaries.
5713d467676SMatt Fleming 	 *
572efd54ea3SPaul Mundt 	 * In the future we will have to tidy up after the boot loader by
573efd54ea3SPaul Mundt 	 * jumping between the cached and uncached mappings and tearing
574efd54ea3SPaul Mundt 	 * down alternating mappings while executing from the other.
5753d467676SMatt Fleming 	 */
57651becfd9SPaul Mundt 	for (i = 0; i < NR_PMB_ENTRIES; i++) {
5773d467676SMatt Fleming 		unsigned long addr, data;
5783d467676SMatt Fleming 		unsigned long addr_val, data_val;
579efd54ea3SPaul Mundt 		unsigned long ppn, vpn, flags;
580d53a0d33SPaul Mundt 		unsigned long irqflags;
581d7813bc9SPaul Mundt 		unsigned int size;
582efd54ea3SPaul Mundt 		struct pmb_entry *pmbe;
5833d467676SMatt Fleming 
5843d467676SMatt Fleming 		addr = mk_pmb_addr(i);
5853d467676SMatt Fleming 		data = mk_pmb_data(i);
5863d467676SMatt Fleming 
5873d467676SMatt Fleming 		addr_val = __raw_readl(addr);
5883d467676SMatt Fleming 		data_val = __raw_readl(data);
5893d467676SMatt Fleming 
5903d467676SMatt Fleming 		/*
5913d467676SMatt Fleming 		 * Skip over any bogus entries
5923d467676SMatt Fleming 		 */
5933d467676SMatt Fleming 		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
5943d467676SMatt Fleming 			continue;
5953d467676SMatt Fleming 
5963d467676SMatt Fleming 		ppn = data_val & PMB_PFN_MASK;
5973d467676SMatt Fleming 		vpn = addr_val & PMB_PFN_MASK;
5983d467676SMatt Fleming 
5993d467676SMatt Fleming 		/*
6003d467676SMatt Fleming 		 * Only preserve in-range mappings.
6013d467676SMatt Fleming 		 */
602efd54ea3SPaul Mundt 		if (!pmb_ppn_in_range(ppn)) {
6033d467676SMatt Fleming 			/*
6043d467676SMatt Fleming 			 * Invalidate anything out of bounds.
6053d467676SMatt Fleming 			 */
6062e450643SPaul Mundt 			writel_uncached(addr_val & ~PMB_V, addr);
6072e450643SPaul Mundt 			writel_uncached(data_val & ~PMB_V, data);
608efd54ea3SPaul Mundt 			continue;
6093d467676SMatt Fleming 		}
610efd54ea3SPaul Mundt 
611efd54ea3SPaul Mundt 		/*
612efd54ea3SPaul Mundt 		 * Update the caching attributes if necessary
613efd54ea3SPaul Mundt 		 */
614efd54ea3SPaul Mundt 		if (data_val & PMB_C) {
6150065b967SPaul Mundt 			data_val &= ~PMB_CACHE_MASK;
6160065b967SPaul Mundt 			data_val |= pmb_cache_flags();
6172e450643SPaul Mundt 
6182e450643SPaul Mundt 			writel_uncached(data_val, data);
619efd54ea3SPaul Mundt 		}
620efd54ea3SPaul Mundt 
621d7813bc9SPaul Mundt 		size = data_val & PMB_SZ_MASK;
622d7813bc9SPaul Mundt 		flags = size | (data_val & PMB_CACHE_MASK);
623efd54ea3SPaul Mundt 
624efd54ea3SPaul Mundt 		pmbe = pmb_alloc(vpn, ppn, flags, i);
625efd54ea3SPaul Mundt 		if (IS_ERR(pmbe)) {
626efd54ea3SPaul Mundt 			WARN_ON_ONCE(1);
627efd54ea3SPaul Mundt 			continue;
628efd54ea3SPaul Mundt 		}
629efd54ea3SPaul Mundt 
630f7fcec93SPaul Mundt 		raw_spin_lock_irqsave(&pmbe->lock, irqflags);
631d53a0d33SPaul Mundt 
632d7813bc9SPaul Mundt 		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
633d7813bc9SPaul Mundt 			if (pmb_sizes[j].flag == size)
634d7813bc9SPaul Mundt 				pmbe->size = pmb_sizes[j].size;
635d7813bc9SPaul Mundt 
636d53a0d33SPaul Mundt 		if (pmbp) {
637f7fcec93SPaul Mundt 			raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
638d7813bc9SPaul Mundt 			/*
639d7813bc9SPaul Mundt 			 * Compare the previous entry against the current one to
640d7813bc9SPaul Mundt 			 * see if the entries span a contiguous mapping. If so,
641d01447b3SPaul Mundt 			 * setup the entry links accordingly. Compound mappings
642d01447b3SPaul Mundt 			 * are later coalesced.
643d7813bc9SPaul Mundt 			 */
644d01447b3SPaul Mundt 			if (pmb_can_merge(pmbp, pmbe))
645d7813bc9SPaul Mundt 				pmbp->link = pmbe;
646f7fcec93SPaul Mundt 			raw_spin_unlock(&pmbp->lock);
647d53a0d33SPaul Mundt 		}
648d53a0d33SPaul Mundt 
649d7813bc9SPaul Mundt 		pmbp = pmbe;
650d7813bc9SPaul Mundt 
651f7fcec93SPaul Mundt 		raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
652d01447b3SPaul Mundt 	}
6533d467676SMatt Fleming }
6543d467676SMatt Fleming 
pmb_merge(struct pmb_entry * head)655d01447b3SPaul Mundt static void __init pmb_merge(struct pmb_entry *head)
6563d467676SMatt Fleming {
657d01447b3SPaul Mundt 	unsigned long span, newsize;
658d01447b3SPaul Mundt 	struct pmb_entry *tail;
659d01447b3SPaul Mundt 	int i = 1, depth = 0;
660d01447b3SPaul Mundt 
661d01447b3SPaul Mundt 	span = newsize = head->size;
662d01447b3SPaul Mundt 
663d01447b3SPaul Mundt 	tail = head->link;
664d01447b3SPaul Mundt 	while (tail) {
665d01447b3SPaul Mundt 		span += tail->size;
666d01447b3SPaul Mundt 
667d01447b3SPaul Mundt 		if (pmb_size_valid(span)) {
668d01447b3SPaul Mundt 			newsize = span;
669d01447b3SPaul Mundt 			depth = i;
670d01447b3SPaul Mundt 		}
671d01447b3SPaul Mundt 
672d01447b3SPaul Mundt 		/* This is the end of the line.. */
673d01447b3SPaul Mundt 		if (!tail->link)
674d01447b3SPaul Mundt 			break;
675d01447b3SPaul Mundt 
676d01447b3SPaul Mundt 		tail = tail->link;
677d01447b3SPaul Mundt 		i++;
678d01447b3SPaul Mundt 	}
67920b5014bSMatt Fleming 
6803d467676SMatt Fleming 	/*
681d01447b3SPaul Mundt 	 * The merged page size must be valid.
6823d467676SMatt Fleming 	 */
683c7b03fa0SMatt Fleming 	if (!depth || !pmb_size_valid(newsize))
684d01447b3SPaul Mundt 		return;
685d01447b3SPaul Mundt 
686d01447b3SPaul Mundt 	head->flags &= ~PMB_SZ_MASK;
687d01447b3SPaul Mundt 	head->flags |= pmb_size_to_flags(newsize);
688d01447b3SPaul Mundt 
689d01447b3SPaul Mundt 	head->size = newsize;
690d01447b3SPaul Mundt 
691d01447b3SPaul Mundt 	__pmb_unmap_entry(head->link, depth);
692d01447b3SPaul Mundt 	__set_pmb_entry(head);
693d01447b3SPaul Mundt }
694d01447b3SPaul Mundt 
pmb_coalesce(void)695d01447b3SPaul Mundt static void __init pmb_coalesce(void)
696d01447b3SPaul Mundt {
697d01447b3SPaul Mundt 	unsigned long flags;
698d01447b3SPaul Mundt 	int i;
699d01447b3SPaul Mundt 
700d01447b3SPaul Mundt 	write_lock_irqsave(&pmb_rwlock, flags);
701d01447b3SPaul Mundt 
702d01447b3SPaul Mundt 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
703d01447b3SPaul Mundt 		struct pmb_entry *pmbe;
704d01447b3SPaul Mundt 
705d01447b3SPaul Mundt 		if (!test_bit(i, pmb_map))
706d01447b3SPaul Mundt 			continue;
707d01447b3SPaul Mundt 
708d01447b3SPaul Mundt 		pmbe = &pmb_entry_list[i];
709d01447b3SPaul Mundt 
710d01447b3SPaul Mundt 		/*
711d01447b3SPaul Mundt 		 * We're only interested in compound mappings
712d01447b3SPaul Mundt 		 */
713d01447b3SPaul Mundt 		if (!pmbe->link)
714d01447b3SPaul Mundt 			continue;
715d01447b3SPaul Mundt 
716d01447b3SPaul Mundt 		/*
717d01447b3SPaul Mundt 		 * Nothing to do if it already uses the largest possible
718d01447b3SPaul Mundt 		 * page size.
719d01447b3SPaul Mundt 		 */
720d01447b3SPaul Mundt 		if (pmbe->size == SZ_512M)
721d01447b3SPaul Mundt 			continue;
722d01447b3SPaul Mundt 
723d01447b3SPaul Mundt 		pmb_merge(pmbe);
724d01447b3SPaul Mundt 	}
725d01447b3SPaul Mundt 
726d01447b3SPaul Mundt 	write_unlock_irqrestore(&pmb_rwlock, flags);
727d01447b3SPaul Mundt }
728d01447b3SPaul Mundt 
729d01447b3SPaul Mundt #ifdef CONFIG_UNCACHED_MAPPING
pmb_resize(void)730d01447b3SPaul Mundt static void __init pmb_resize(void)
731d01447b3SPaul Mundt {
732d01447b3SPaul Mundt 	int i;
733d01447b3SPaul Mundt 
734d01447b3SPaul Mundt 	/*
735d01447b3SPaul Mundt 	 * If the uncached mapping was constructed by the kernel, it will
736d01447b3SPaul Mundt 	 * already be a reasonable size.
737d01447b3SPaul Mundt 	 */
738d01447b3SPaul Mundt 	if (uncached_size == SZ_16M)
739d01447b3SPaul Mundt 		return;
740d01447b3SPaul Mundt 
741d01447b3SPaul Mundt 	read_lock(&pmb_rwlock);
742d01447b3SPaul Mundt 
743d01447b3SPaul Mundt 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
744d01447b3SPaul Mundt 		struct pmb_entry *pmbe;
745d01447b3SPaul Mundt 		unsigned long flags;
746d01447b3SPaul Mundt 
747d01447b3SPaul Mundt 		if (!test_bit(i, pmb_map))
748d01447b3SPaul Mundt 			continue;
749d01447b3SPaul Mundt 
750d01447b3SPaul Mundt 		pmbe = &pmb_entry_list[i];
751d01447b3SPaul Mundt 
752d01447b3SPaul Mundt 		if (pmbe->vpn != uncached_start)
753d01447b3SPaul Mundt 			continue;
754d01447b3SPaul Mundt 
755d01447b3SPaul Mundt 		/*
756d01447b3SPaul Mundt 		 * Found it, now resize it.
757d01447b3SPaul Mundt 		 */
758f7fcec93SPaul Mundt 		raw_spin_lock_irqsave(&pmbe->lock, flags);
759d01447b3SPaul Mundt 
760d01447b3SPaul Mundt 		pmbe->size = SZ_16M;
761d01447b3SPaul Mundt 		pmbe->flags &= ~PMB_SZ_MASK;
762d01447b3SPaul Mundt 		pmbe->flags |= pmb_size_to_flags(pmbe->size);
763d01447b3SPaul Mundt 
764d01447b3SPaul Mundt 		uncached_resize(pmbe->size);
765d01447b3SPaul Mundt 
766d01447b3SPaul Mundt 		__set_pmb_entry(pmbe);
767d01447b3SPaul Mundt 
768f7fcec93SPaul Mundt 		raw_spin_unlock_irqrestore(&pmbe->lock, flags);
769d01447b3SPaul Mundt 	}
770d01447b3SPaul Mundt 
7710e6f989bSJulia Lawall 	read_unlock(&pmb_rwlock);
772d01447b3SPaul Mundt }
773d01447b3SPaul Mundt #endif
774d01447b3SPaul Mundt 
early_pmb(char * p)7754cfa8e75SPaul Mundt static int __init early_pmb(char *p)
7764cfa8e75SPaul Mundt {
7774cfa8e75SPaul Mundt 	if (!p)
7784cfa8e75SPaul Mundt 		return 0;
7794cfa8e75SPaul Mundt 
7804cfa8e75SPaul Mundt 	if (strstr(p, "iomap"))
7814cfa8e75SPaul Mundt 		pmb_iomapping_enabled = 1;
7824cfa8e75SPaul Mundt 
7834cfa8e75SPaul Mundt 	return 0;
7844cfa8e75SPaul Mundt }
7854cfa8e75SPaul Mundt early_param("pmb", early_pmb);
7864cfa8e75SPaul Mundt 
pmb_init(void)787d01447b3SPaul Mundt void __init pmb_init(void)
788d01447b3SPaul Mundt {
789d01447b3SPaul Mundt 	/* Synchronize software state */
790d01447b3SPaul Mundt 	pmb_synchronize();
791d01447b3SPaul Mundt 
792d01447b3SPaul Mundt 	/* Attempt to combine compound mappings */
793d01447b3SPaul Mundt 	pmb_coalesce();
794d01447b3SPaul Mundt 
795d01447b3SPaul Mundt #ifdef CONFIG_UNCACHED_MAPPING
796d01447b3SPaul Mundt 	/* Resize initial mappings, if necessary */
797d01447b3SPaul Mundt 	pmb_resize();
798d01447b3SPaul Mundt #endif
799d01447b3SPaul Mundt 
800d01447b3SPaul Mundt 	/* Log them */
801d01447b3SPaul Mundt 	pmb_notify();
80220b5014bSMatt Fleming 
8032e450643SPaul Mundt 	writel_uncached(0, PMB_IRMCR);
804a0ab3668SPaul Mundt 
805a0ab3668SPaul Mundt 	/* Flush out the TLB */
806b5b6c7eeSMatt Fleming 	local_flush_tlb_all();
8072e450643SPaul Mundt 	ctrl_barrier();
80820b5014bSMatt Fleming }
8090c7b1df6SPaul Mundt 
__in_29bit_mode(void)8102efa53b2SPaul Mundt bool __in_29bit_mode(void)
8112efa53b2SPaul Mundt {
8122efa53b2SPaul Mundt         return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
8132efa53b2SPaul Mundt }
8142efa53b2SPaul Mundt 
pmb_debugfs_show(struct seq_file * file,void * iter)815*a1153636SQinglang Miao static int pmb_debugfs_show(struct seq_file *file, void *iter)
8160c7b1df6SPaul Mundt {
8170c7b1df6SPaul Mundt 	int i;
8180c7b1df6SPaul Mundt 
8190c7b1df6SPaul Mundt 	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
8200c7b1df6SPaul Mundt 			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
8210c7b1df6SPaul Mundt 	seq_printf(file, "ety   vpn  ppn  size   flags\n");
8220c7b1df6SPaul Mundt 
8230c7b1df6SPaul Mundt 	for (i = 0; i < NR_PMB_ENTRIES; i++) {
8240c7b1df6SPaul Mundt 		unsigned long addr, data;
8250c7b1df6SPaul Mundt 		unsigned int size;
8260c7b1df6SPaul Mundt 		char *sz_str = NULL;
8270c7b1df6SPaul Mundt 
8289d56dd3bSPaul Mundt 		addr = __raw_readl(mk_pmb_addr(i));
8299d56dd3bSPaul Mundt 		data = __raw_readl(mk_pmb_data(i));
8300c7b1df6SPaul Mundt 
8310c7b1df6SPaul Mundt 		size = data & PMB_SZ_MASK;
8320c7b1df6SPaul Mundt 		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
8330c7b1df6SPaul Mundt 			 (size == PMB_SZ_64M)  ? " 64MB":
8340c7b1df6SPaul Mundt 			 (size == PMB_SZ_128M) ? "128MB":
8350c7b1df6SPaul Mundt 					         "512MB";
8360c7b1df6SPaul Mundt 
8370c7b1df6SPaul Mundt 		/* 02: V 0x88 0x08 128MB C CB  B */
8380c7b1df6SPaul Mundt 		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
8390c7b1df6SPaul Mundt 			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
8400c7b1df6SPaul Mundt 			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
8410c7b1df6SPaul Mundt 			   sz_str, (data & PMB_C) ? 'C' : ' ',
8420c7b1df6SPaul Mundt 			   (data & PMB_WT) ? "WT" : "CB",
8430c7b1df6SPaul Mundt 			   (data & PMB_UB) ? "UB" : " B");
8440c7b1df6SPaul Mundt 	}
8450c7b1df6SPaul Mundt 
8460c7b1df6SPaul Mundt 	return 0;
8470c7b1df6SPaul Mundt }
8480c7b1df6SPaul Mundt 
849*a1153636SQinglang Miao DEFINE_SHOW_ATTRIBUTE(pmb_debugfs);
8500c7b1df6SPaul Mundt 
pmb_debugfs_init(void)8510c7b1df6SPaul Mundt static int __init pmb_debugfs_init(void)
8520c7b1df6SPaul Mundt {
85303eb2a08SGreg Kroah-Hartman 	debugfs_create_file("pmb", S_IFREG | S_IRUGO, arch_debugfs_dir, NULL,
85403eb2a08SGreg Kroah-Hartman 			    &pmb_debugfs_fops);
8550c7b1df6SPaul Mundt 	return 0;
8560c7b1df6SPaul Mundt }
85762c8cbbfSPawel Moll subsys_initcall(pmb_debugfs_init);
858a83c0b73SFrancesco VIRLINZI 
859a83c0b73SFrancesco VIRLINZI #ifdef CONFIG_PM
pmb_syscore_resume(void)860d4cc183fSPaul Mundt static void pmb_syscore_resume(void)
861a83c0b73SFrancesco VIRLINZI {
862a83c0b73SFrancesco VIRLINZI 	struct pmb_entry *pmbe;
863d4cc183fSPaul Mundt 	int i;
864d53a0d33SPaul Mundt 
865d53a0d33SPaul Mundt 	read_lock(&pmb_rwlock);
866d53a0d33SPaul Mundt 
867edd7de80SMatt Fleming 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
86851becfd9SPaul Mundt 		if (test_bit(i, pmb_map)) {
869edd7de80SMatt Fleming 			pmbe = &pmb_entry_list[i];
870a83c0b73SFrancesco VIRLINZI 			set_pmb_entry(pmbe);
871edd7de80SMatt Fleming 		}
872edd7de80SMatt Fleming 	}
873d53a0d33SPaul Mundt 
874d53a0d33SPaul Mundt 	read_unlock(&pmb_rwlock);
875a83c0b73SFrancesco VIRLINZI }
876d53a0d33SPaul Mundt 
877d4cc183fSPaul Mundt static struct syscore_ops pmb_syscore_ops = {
878d4cc183fSPaul Mundt 	.resume = pmb_syscore_resume,
879a83c0b73SFrancesco VIRLINZI };
880a83c0b73SFrancesco VIRLINZI 
pmb_sysdev_init(void)881a83c0b73SFrancesco VIRLINZI static int __init pmb_sysdev_init(void)
882a83c0b73SFrancesco VIRLINZI {
883d4cc183fSPaul Mundt 	register_syscore_ops(&pmb_syscore_ops);
884d4cc183fSPaul Mundt 	return 0;
885a83c0b73SFrancesco VIRLINZI }
886a83c0b73SFrancesco VIRLINZI subsys_initcall(pmb_sysdev_init);
887a83c0b73SFrancesco VIRLINZI #endif
888