14726dd60SMichal Simek /* SPDX-License-Identifier: GPL-2.0 */
26a3cece5SMichal Simek /*
315902bf6SMichal Simek * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
415902bf6SMichal Simek * Copyright (C) 2008-2009 PetaLogix
56a3cece5SMichal Simek * Copyright (C) 2006 Atmark Techno, Inc.
66a3cece5SMichal Simek */
76a3cece5SMichal Simek
86a3cece5SMichal Simek #ifndef _ASM_MICROBLAZE_PGTABLE_H
96a3cece5SMichal Simek #define _ASM_MICROBLAZE_PGTABLE_H
106a3cece5SMichal Simek
116a3cece5SMichal Simek #include <asm/setup.h>
126a3cece5SMichal Simek
1379bf3a13SMichal Simek #ifndef __ASSEMBLY__
1479bf3a13SMichal Simek extern int mem_init_done;
1579bf3a13SMichal Simek #endif
1679bf3a13SMichal Simek
17ed48e1f8SMike Rapoport #include <asm-generic/pgtable-nopmd.h>
183ae3ad4eSKirill A. Shutemov
1915902bf6SMichal Simek #ifdef __KERNEL__
2015902bf6SMichal Simek #ifndef __ASSEMBLY__
2115902bf6SMichal Simek
2215902bf6SMichal Simek #include <linux/sched.h>
2315902bf6SMichal Simek #include <linux/threads.h>
2415902bf6SMichal Simek #include <asm/processor.h> /* For TASK_SIZE */
2515902bf6SMichal Simek #include <asm/mmu.h>
2615902bf6SMichal Simek #include <asm/page.h>
2715902bf6SMichal Simek
2815902bf6SMichal Simek extern unsigned long va_to_phys(unsigned long address);
2915902bf6SMichal Simek extern pte_t *va_to_pte(unsigned long address);
3015902bf6SMichal Simek
3115902bf6SMichal Simek /*
3215902bf6SMichal Simek * The following only work if pte_present() is true.
3315902bf6SMichal Simek * Undefined behaviour if not..
3415902bf6SMichal Simek */
3515902bf6SMichal Simek
3615902bf6SMichal Simek /* Start and end of the vmalloc area. */
3715902bf6SMichal Simek /* Make sure to map the vmalloc area above the pinned kernel memory area
3815902bf6SMichal Simek of 32Mb. */
3983a92529SMichal Simek #define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
4015902bf6SMichal Simek #define VMALLOC_END ioremap_bot
4115902bf6SMichal Simek
4215902bf6SMichal Simek #endif /* __ASSEMBLY__ */
4315902bf6SMichal Simek
4415902bf6SMichal Simek /*
45a6475c13SMichal Simek * Macro to mark a page protection value as "uncacheable".
46a6475c13SMichal Simek */
47a6475c13SMichal Simek
48a6475c13SMichal Simek #define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
49a6475c13SMichal Simek _PAGE_WRITETHRU)
50a6475c13SMichal Simek
51a6475c13SMichal Simek #define pgprot_noncached(prot) \
52a6475c13SMichal Simek (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
53a6475c13SMichal Simek _PAGE_NO_CACHE | _PAGE_GUARDED))
54a6475c13SMichal Simek
55a6475c13SMichal Simek #define pgprot_noncached_wc(prot) \
56a6475c13SMichal Simek (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
57a6475c13SMichal Simek _PAGE_NO_CACHE))
58a6475c13SMichal Simek
59a6475c13SMichal Simek /*
6015902bf6SMichal Simek * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
6115902bf6SMichal Simek * table containing PTEs, together with a set of 16 segment registers, to
6215902bf6SMichal Simek * define the virtual to physical address mapping.
6315902bf6SMichal Simek *
6415902bf6SMichal Simek * We use the hash table as an extended TLB, i.e. a cache of currently
6515902bf6SMichal Simek * active mappings. We maintain a two-level page table tree, much
6615902bf6SMichal Simek * like that used by the i386, for the sake of the Linux memory
6715902bf6SMichal Simek * management code. Low-level assembler code in hashtable.S
6815902bf6SMichal Simek * (procedure hash_page) is responsible for extracting ptes from the
6915902bf6SMichal Simek * tree and putting them into the hash table when necessary, and
7015902bf6SMichal Simek * updating the accessed and modified bits in the page table tree.
7115902bf6SMichal Simek */
7215902bf6SMichal Simek
7315902bf6SMichal Simek /*
7415902bf6SMichal Simek * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
7515902bf6SMichal Simek * instruction and data sides share a unified, 64-entry, semi-associative
7615902bf6SMichal Simek * TLB which is maintained totally under software control. In addition, the
7715902bf6SMichal Simek * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
7815902bf6SMichal Simek * TLB which serves as a first level to the shared TLB. These two TLBs are
7915902bf6SMichal Simek * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
8015902bf6SMichal Simek */
8115902bf6SMichal Simek
8215902bf6SMichal Simek /*
8315902bf6SMichal Simek * The normal case is that PTEs are 32-bits and we have a 1-page
8415902bf6SMichal Simek * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
8515902bf6SMichal Simek *
8615902bf6SMichal Simek */
8715902bf6SMichal Simek
8815902bf6SMichal Simek /* PGDIR_SHIFT determines what a top-level page table entry can map */
89ed48e1f8SMike Rapoport #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
9015902bf6SMichal Simek #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
9115902bf6SMichal Simek #define PGDIR_MASK (~(PGDIR_SIZE-1))
9215902bf6SMichal Simek
9315902bf6SMichal Simek /*
9415902bf6SMichal Simek * entries per page directory level: our page-table tree is two-level, so
9515902bf6SMichal Simek * we don't really have any PMD directory.
9615902bf6SMichal Simek */
9715902bf6SMichal Simek #define PTRS_PER_PTE (1 << PTE_SHIFT)
9815902bf6SMichal Simek #define PTRS_PER_PMD 1
9915902bf6SMichal Simek #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
10015902bf6SMichal Simek
10115902bf6SMichal Simek #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
10215902bf6SMichal Simek #define FIRST_USER_PGD_NR 0
10315902bf6SMichal Simek
10415902bf6SMichal Simek #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
10515902bf6SMichal Simek #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
10615902bf6SMichal Simek
10715902bf6SMichal Simek #define pte_ERROR(e) \
10815902bf6SMichal Simek printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
10915902bf6SMichal Simek __FILE__, __LINE__, pte_val(e))
11015902bf6SMichal Simek #define pgd_ERROR(e) \
11115902bf6SMichal Simek printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
11215902bf6SMichal Simek __FILE__, __LINE__, pgd_val(e))
11315902bf6SMichal Simek
11415902bf6SMichal Simek /*
11515902bf6SMichal Simek * Bits in a linux-style PTE. These match the bits in the
11615902bf6SMichal Simek * (hardware-defined) PTE as closely as possible.
11715902bf6SMichal Simek */
11815902bf6SMichal Simek
11915902bf6SMichal Simek /* There are several potential gotchas here. The hardware TLBLO
12015902bf6SMichal Simek * field looks like this:
12115902bf6SMichal Simek *
12215902bf6SMichal Simek * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
12315902bf6SMichal Simek * RPN..................... 0 0 EX WR ZSEL....... W I M G
12415902bf6SMichal Simek *
12515902bf6SMichal Simek * Where possible we make the Linux PTE bits match up with this
12615902bf6SMichal Simek *
12715902bf6SMichal Simek * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
12815902bf6SMichal Simek * support down to 1k pages), this is done in the TLBMiss exception
12915902bf6SMichal Simek * handler.
13015902bf6SMichal Simek * - We use only zones 0 (for kernel pages) and 1 (for user pages)
13115902bf6SMichal Simek * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
13215902bf6SMichal Simek * miss handler. Bit 27 is PAGE_USER, thus selecting the correct
13315902bf6SMichal Simek * zone.
134b5c88f21SDavid Hildenbrand * - PRESENT *must* be in the bottom two bits because swap PTEs use the top
135b5c88f21SDavid Hildenbrand * 30 bits. Because 4xx doesn't support SMP anyway, M is irrelevant so we
136b5c88f21SDavid Hildenbrand * borrow it for PAGE_PRESENT. Bit 30 is cleared in the TLB miss handler
137b5c88f21SDavid Hildenbrand * before the TLB entry is loaded.
13815902bf6SMichal Simek * - All other bits of the PTE are loaded into TLBLO without
13915902bf6SMichal Simek * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
14068a385c6SGeert Uytterhoeven * software PTE bits. We actually use bits 21, 24, 25, and
14115902bf6SMichal Simek * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
14215902bf6SMichal Simek * PRESENT.
14315902bf6SMichal Simek */
14415902bf6SMichal Simek
14515902bf6SMichal Simek /* Definitions for MicroBlaze. */
14615902bf6SMichal Simek #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
14715902bf6SMichal Simek #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
14815902bf6SMichal Simek #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
14915902bf6SMichal Simek #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
15015902bf6SMichal Simek #define _PAGE_USER 0x010 /* matches one of the zone permission bits */
15115902bf6SMichal Simek #define _PAGE_RW 0x040 /* software: Writes permitted */
15215902bf6SMichal Simek #define _PAGE_DIRTY 0x080 /* software: dirty page */
15315902bf6SMichal Simek #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
15415902bf6SMichal Simek #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
15515902bf6SMichal Simek #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
15615902bf6SMichal Simek #define _PMD_PRESENT PAGE_MASK
15715902bf6SMichal Simek
158b5c88f21SDavid Hildenbrand /* We borrow bit 24 to store the exclusive marker in swap PTEs. */
159b5c88f21SDavid Hildenbrand #define _PAGE_SWP_EXCLUSIVE _PAGE_DIRTY
160b5c88f21SDavid Hildenbrand
16115902bf6SMichal Simek /*
16215902bf6SMichal Simek * Some bits are unused...
16315902bf6SMichal Simek */
16415902bf6SMichal Simek #ifndef _PAGE_HASHPTE
16515902bf6SMichal Simek #define _PAGE_HASHPTE 0
16615902bf6SMichal Simek #endif
16715902bf6SMichal Simek #ifndef _PTE_NONE_MASK
16815902bf6SMichal Simek #define _PTE_NONE_MASK 0
16915902bf6SMichal Simek #endif
17015902bf6SMichal Simek #ifndef _PAGE_SHARED
17115902bf6SMichal Simek #define _PAGE_SHARED 0
17215902bf6SMichal Simek #endif
17315902bf6SMichal Simek #ifndef _PAGE_EXEC
17415902bf6SMichal Simek #define _PAGE_EXEC 0
17515902bf6SMichal Simek #endif
17615902bf6SMichal Simek
17715902bf6SMichal Simek #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
17815902bf6SMichal Simek
17915902bf6SMichal Simek /*
18015902bf6SMichal Simek * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
18115902bf6SMichal Simek * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
18215902bf6SMichal Simek * to have it in the Linux PTE, and in fact the bit could be reused for
18315902bf6SMichal Simek * another purpose. -- paulus.
18415902bf6SMichal Simek */
18515902bf6SMichal Simek #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
18615902bf6SMichal Simek #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
18715902bf6SMichal Simek
18815902bf6SMichal Simek #define _PAGE_KERNEL \
18915902bf6SMichal Simek (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
19015902bf6SMichal Simek
19115902bf6SMichal Simek #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
19215902bf6SMichal Simek
19315902bf6SMichal Simek #define PAGE_NONE __pgprot(_PAGE_BASE)
19415902bf6SMichal Simek #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
19515902bf6SMichal Simek #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
19615902bf6SMichal Simek #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
19715902bf6SMichal Simek #define PAGE_SHARED_X \
19815902bf6SMichal Simek __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
19915902bf6SMichal Simek #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
20015902bf6SMichal Simek #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
20115902bf6SMichal Simek
20215902bf6SMichal Simek #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
20315902bf6SMichal Simek #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
20415902bf6SMichal Simek #define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
20515902bf6SMichal Simek
20615902bf6SMichal Simek /*
20715902bf6SMichal Simek * We consider execute permission the same as read.
20815902bf6SMichal Simek * Also, write permissions imply read permissions.
20915902bf6SMichal Simek */
21015902bf6SMichal Simek
21115902bf6SMichal Simek #ifndef __ASSEMBLY__
21215902bf6SMichal Simek /*
21315902bf6SMichal Simek * ZERO_PAGE is a global shared page that is always zero: used
21415902bf6SMichal Simek * for zero-mapped memory areas etc..
21515902bf6SMichal Simek */
21615902bf6SMichal Simek extern unsigned long empty_zero_page[1024];
21715902bf6SMichal Simek #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
21815902bf6SMichal Simek
21915902bf6SMichal Simek #endif /* __ASSEMBLY__ */
22015902bf6SMichal Simek
22115902bf6SMichal Simek #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
22215902bf6SMichal Simek #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
22315902bf6SMichal Simek #define pte_clear(mm, addr, ptep) \
22415902bf6SMichal Simek do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
22515902bf6SMichal Simek
22615902bf6SMichal Simek #define pmd_none(pmd) (!pmd_val(pmd))
22715902bf6SMichal Simek #define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
22815902bf6SMichal Simek #define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
22915902bf6SMichal Simek #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
23015902bf6SMichal Simek
23115902bf6SMichal Simek #define pte_page(x) (mem_map + (unsigned long) \
23215902bf6SMichal Simek ((pte_val(x) - memory_start) >> PAGE_SHIFT))
233*27a8b944SMatthew Wilcox (Oracle) #define PFN_PTE_SHIFT PAGE_SHIFT
23415902bf6SMichal Simek
235*27a8b944SMatthew Wilcox (Oracle) #define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT)
23615902bf6SMichal Simek
23715902bf6SMichal Simek #define pfn_pte(pfn, prot) \
238*27a8b944SMatthew Wilcox (Oracle) __pte(((pte_basic_t)(pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
23915902bf6SMichal Simek
24015902bf6SMichal Simek #ifndef __ASSEMBLY__
24115902bf6SMichal Simek /*
24215902bf6SMichal Simek * The following only work if pte_present() is true.
24315902bf6SMichal Simek * Undefined behaviour if not..
24415902bf6SMichal Simek */
pte_read(pte_t pte)24515902bf6SMichal Simek static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
pte_write(pte_t pte)24615902bf6SMichal Simek static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
pte_exec(pte_t pte)24715902bf6SMichal Simek static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
pte_dirty(pte_t pte)24815902bf6SMichal Simek static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
pte_young(pte_t pte)24915902bf6SMichal Simek static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
25015902bf6SMichal Simek
pte_uncache(pte_t pte)25115902bf6SMichal Simek static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
pte_cache(pte_t pte)25215902bf6SMichal Simek static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
25315902bf6SMichal Simek
pte_rdprotect(pte_t pte)25415902bf6SMichal Simek static inline pte_t pte_rdprotect(pte_t pte) \
25515902bf6SMichal Simek { pte_val(pte) &= ~_PAGE_USER; return pte; }
pte_wrprotect(pte_t pte)25615902bf6SMichal Simek static inline pte_t pte_wrprotect(pte_t pte) \
25715902bf6SMichal Simek { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
pte_exprotect(pte_t pte)25815902bf6SMichal Simek static inline pte_t pte_exprotect(pte_t pte) \
25915902bf6SMichal Simek { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
pte_mkclean(pte_t pte)26015902bf6SMichal Simek static inline pte_t pte_mkclean(pte_t pte) \
26115902bf6SMichal Simek { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
pte_mkold(pte_t pte)26215902bf6SMichal Simek static inline pte_t pte_mkold(pte_t pte) \
26315902bf6SMichal Simek { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
26415902bf6SMichal Simek
pte_mkread(pte_t pte)26515902bf6SMichal Simek static inline pte_t pte_mkread(pte_t pte) \
26615902bf6SMichal Simek { pte_val(pte) |= _PAGE_USER; return pte; }
pte_mkexec(pte_t pte)26715902bf6SMichal Simek static inline pte_t pte_mkexec(pte_t pte) \
26815902bf6SMichal Simek { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
pte_mkwrite_novma(pte_t pte)26915902bf6SMichal Simek static inline pte_t pte_mkwrite_novma(pte_t pte) \
27015902bf6SMichal Simek { pte_val(pte) |= _PAGE_RW; return pte; }
pte_mkdirty(pte_t pte)27115902bf6SMichal Simek static inline pte_t pte_mkdirty(pte_t pte) \
27215902bf6SMichal Simek { pte_val(pte) |= _PAGE_DIRTY; return pte; }
pte_mkyoung(pte_t pte)27315902bf6SMichal Simek static inline pte_t pte_mkyoung(pte_t pte) \
27415902bf6SMichal Simek { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
27515902bf6SMichal Simek
27615902bf6SMichal Simek /*
27715902bf6SMichal Simek * Conversion functions: convert a page and protection to a page entry,
27815902bf6SMichal Simek * and a page entry and page directory to the page they refer to.
27915902bf6SMichal Simek */
28015902bf6SMichal Simek
mk_pte_phys(phys_addr_t physpage,pgprot_t pgprot)28115902bf6SMichal Simek static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
28215902bf6SMichal Simek {
28315902bf6SMichal Simek pte_t pte;
28415902bf6SMichal Simek pte_val(pte) = physpage | pgprot_val(pgprot);
28515902bf6SMichal Simek return pte;
28615902bf6SMichal Simek }
28715902bf6SMichal Simek
28815902bf6SMichal Simek #define mk_pte(page, pgprot) \
28915902bf6SMichal Simek ({ \
29015902bf6SMichal Simek pte_t pte; \
29115902bf6SMichal Simek pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
29215902bf6SMichal Simek pgprot_val(pgprot); \
29315902bf6SMichal Simek pte; \
29415902bf6SMichal Simek })
29515902bf6SMichal Simek
pte_modify(pte_t pte,pgprot_t newprot)29615902bf6SMichal Simek static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
29715902bf6SMichal Simek {
29815902bf6SMichal Simek pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
29915902bf6SMichal Simek return pte;
30015902bf6SMichal Simek }
30115902bf6SMichal Simek
30215902bf6SMichal Simek /*
30315902bf6SMichal Simek * Atomic PTE updates.
30415902bf6SMichal Simek *
30515902bf6SMichal Simek * pte_update clears and sets bit atomically, and returns
30615902bf6SMichal Simek * the old pte value.
30715902bf6SMichal Simek * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
30815902bf6SMichal Simek * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
30915902bf6SMichal Simek */
pte_update(pte_t * p,unsigned long clr,unsigned long set)31015902bf6SMichal Simek static inline unsigned long pte_update(pte_t *p, unsigned long clr,
31115902bf6SMichal Simek unsigned long set)
31215902bf6SMichal Simek {
3131f80a67dSMichal Simek unsigned long flags, old, tmp;
31415902bf6SMichal Simek
3151f80a67dSMichal Simek raw_local_irq_save(flags);
3161f80a67dSMichal Simek
3171f80a67dSMichal Simek __asm__ __volatile__( "lw %0, %2, r0 \n"
3181f80a67dSMichal Simek "andn %1, %0, %3 \n"
3191f80a67dSMichal Simek "or %1, %1, %4 \n"
3201f80a67dSMichal Simek "sw %1, %2, r0 \n"
3211f80a67dSMichal Simek : "=&r" (old), "=&r" (tmp)
3221f80a67dSMichal Simek : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
32315902bf6SMichal Simek : "cc");
32415902bf6SMichal Simek
3251f80a67dSMichal Simek raw_local_irq_restore(flags);
3261f80a67dSMichal Simek
32715902bf6SMichal Simek return old;
32815902bf6SMichal Simek }
32915902bf6SMichal Simek
33015902bf6SMichal Simek /*
33115902bf6SMichal Simek * set_pte stores a linux PTE into the linux page table.
33215902bf6SMichal Simek */
set_pte(pte_t * ptep,pte_t pte)333*27a8b944SMatthew Wilcox (Oracle) static inline void set_pte(pte_t *ptep, pte_t pte)
33415902bf6SMichal Simek {
33515902bf6SMichal Simek *ptep = pte;
33615902bf6SMichal Simek }
33715902bf6SMichal Simek
338658b368dSMichal Simek #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)339658b368dSMichal Simek static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
340658b368dSMichal Simek unsigned long address, pte_t *ptep)
34115902bf6SMichal Simek {
34215902bf6SMichal Simek return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
34315902bf6SMichal Simek }
34415902bf6SMichal Simek
ptep_test_and_clear_dirty(struct mm_struct * mm,unsigned long addr,pte_t * ptep)34515902bf6SMichal Simek static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
34615902bf6SMichal Simek unsigned long addr, pte_t *ptep)
34715902bf6SMichal Simek {
34815902bf6SMichal Simek return (pte_update(ptep, \
34915902bf6SMichal Simek (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
35015902bf6SMichal Simek }
35115902bf6SMichal Simek
352658b368dSMichal Simek #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)35315902bf6SMichal Simek static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
35415902bf6SMichal Simek unsigned long addr, pte_t *ptep)
35515902bf6SMichal Simek {
35615902bf6SMichal Simek return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
35715902bf6SMichal Simek }
35815902bf6SMichal Simek
35915902bf6SMichal Simek /*static inline void ptep_set_wrprotect(struct mm_struct *mm,
36015902bf6SMichal Simek unsigned long addr, pte_t *ptep)
36115902bf6SMichal Simek {
36215902bf6SMichal Simek pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
36315902bf6SMichal Simek }*/
36415902bf6SMichal Simek
ptep_mkdirty(struct mm_struct * mm,unsigned long addr,pte_t * ptep)36515902bf6SMichal Simek static inline void ptep_mkdirty(struct mm_struct *mm,
36615902bf6SMichal Simek unsigned long addr, pte_t *ptep)
36715902bf6SMichal Simek {
36815902bf6SMichal Simek pte_update(ptep, 0, _PAGE_DIRTY);
36915902bf6SMichal Simek }
37015902bf6SMichal Simek
37115902bf6SMichal Simek /*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
37215902bf6SMichal Simek
37315902bf6SMichal Simek /* Convert pmd entry to page */
37415902bf6SMichal Simek /* our pmd entry is an effective address of pte table*/
37515902bf6SMichal Simek /* returns effective address of the pmd entry*/
pmd_page_vaddr(pmd_t pmd)376974b9b2cSMike Rapoport static inline unsigned long pmd_page_vaddr(pmd_t pmd)
377974b9b2cSMike Rapoport {
378974b9b2cSMike Rapoport return ((unsigned long) (pmd_val(pmd) & PAGE_MASK));
379974b9b2cSMike Rapoport }
38015902bf6SMichal Simek
3817106c51eSMike Rapoport /* returns pfn of the pmd entry*/
3827106c51eSMike Rapoport #define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
3837106c51eSMike Rapoport
38415902bf6SMichal Simek /* returns struct *page of the pmd entry*/
38515902bf6SMichal Simek #define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
38615902bf6SMichal Simek
38715902bf6SMichal Simek /* Find an entry in the third-level page table.. */
38815902bf6SMichal Simek
38915902bf6SMichal Simek extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
39015902bf6SMichal Simek
39115902bf6SMichal Simek /*
392b5c88f21SDavid Hildenbrand * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
393b5c88f21SDavid Hildenbrand * are !pte_none() && !pte_present().
394b5c88f21SDavid Hildenbrand *
395b5c88f21SDavid Hildenbrand * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
396b5c88f21SDavid Hildenbrand * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
397b5c88f21SDavid Hildenbrand * <------------------ offset -------------------> E < type -> 0 0
398b5c88f21SDavid Hildenbrand *
399b5c88f21SDavid Hildenbrand * E is the exclusive marker that is not stored in swap entries.
40015902bf6SMichal Simek */
401b5c88f21SDavid Hildenbrand #define __swp_type(entry) ((entry).val & 0x1f)
40215902bf6SMichal Simek #define __swp_offset(entry) ((entry).val >> 6)
40315902bf6SMichal Simek #define __swp_entry(type, offset) \
404b5c88f21SDavid Hildenbrand ((swp_entry_t) { ((type) & 0x1f) | ((offset) << 6) })
40515902bf6SMichal Simek #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
40615902bf6SMichal Simek #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
40715902bf6SMichal Simek
pte_swp_exclusive(pte_t pte)408b5c88f21SDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte)
409b5c88f21SDavid Hildenbrand {
410b5c88f21SDavid Hildenbrand return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
411b5c88f21SDavid Hildenbrand }
412b5c88f21SDavid Hildenbrand
pte_swp_mkexclusive(pte_t pte)413b5c88f21SDavid Hildenbrand static inline pte_t pte_swp_mkexclusive(pte_t pte)
414b5c88f21SDavid Hildenbrand {
415b5c88f21SDavid Hildenbrand pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
416b5c88f21SDavid Hildenbrand return pte;
417b5c88f21SDavid Hildenbrand }
418b5c88f21SDavid Hildenbrand
pte_swp_clear_exclusive(pte_t pte)419b5c88f21SDavid Hildenbrand static inline pte_t pte_swp_clear_exclusive(pte_t pte)
420b5c88f21SDavid Hildenbrand {
421b5c88f21SDavid Hildenbrand pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
422b5c88f21SDavid Hildenbrand return pte;
423b5c88f21SDavid Hildenbrand }
424b5c88f21SDavid Hildenbrand
42515902bf6SMichal Simek extern unsigned long iopa(unsigned long addr);
42615902bf6SMichal Simek
42715902bf6SMichal Simek /* Values for nocacheflag and cmode */
42815902bf6SMichal Simek /* These are not used by the APUS kernel_map, but prevents
42915902bf6SMichal Simek * compilation errors.
43015902bf6SMichal Simek */
43115902bf6SMichal Simek #define IOMAP_FULL_CACHING 0
43215902bf6SMichal Simek #define IOMAP_NOCACHE_SER 1
43315902bf6SMichal Simek #define IOMAP_NOCACHE_NONSER 2
43415902bf6SMichal Simek #define IOMAP_NO_COPYBACK 3
43515902bf6SMichal Simek
43615902bf6SMichal Simek void do_page_fault(struct pt_regs *regs, unsigned long address,
43715902bf6SMichal Simek unsigned long error_code);
43815902bf6SMichal Simek
43915902bf6SMichal Simek void mapin_ram(void);
44015902bf6SMichal Simek int map_page(unsigned long va, phys_addr_t pa, int flags);
44115902bf6SMichal Simek
44215902bf6SMichal Simek extern int mem_init_done;
44315902bf6SMichal Simek
44415902bf6SMichal Simek asmlinkage void __init mmu_init(void);
44515902bf6SMichal Simek
44615902bf6SMichal Simek #endif /* __ASSEMBLY__ */
44715902bf6SMichal Simek #endif /* __KERNEL__ */
44815902bf6SMichal Simek
4496a3cece5SMichal Simek #ifndef __ASSEMBLY__
450ae8ee150SMichal Simek extern unsigned long ioremap_bot, ioremap_base;
451ae8ee150SMichal Simek
4526a3cece5SMichal Simek void setup_memory(void);
4536a3cece5SMichal Simek #endif /* __ASSEMBLY__ */
4546a3cece5SMichal Simek
4556a3cece5SMichal Simek #endif /* _ASM_MICROBLAZE_PGTABLE_H */
456