1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 217ed9e31SAneesh Kumar K.V #ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H 317ed9e31SAneesh Kumar K.V #define _ASM_POWERPC_NOHASH_32_PTE_40x_H 417ed9e31SAneesh Kumar K.V #ifdef __KERNEL__ 517ed9e31SAneesh Kumar K.V 617ed9e31SAneesh Kumar K.V /* 717ed9e31SAneesh Kumar K.V * At present, all PowerPC 400-class processors share a similar TLB 817ed9e31SAneesh Kumar K.V * architecture. The instruction and data sides share a unified, 917ed9e31SAneesh Kumar K.V * 64-entry, fully-associative TLB which is maintained totally under 1017ed9e31SAneesh Kumar K.V * software control. In addition, the instruction side has a 1117ed9e31SAneesh Kumar K.V * hardware-managed, 4-entry, fully-associative TLB which serves as a 1217ed9e31SAneesh Kumar K.V * first level to the shared TLB. These two TLBs are known as the UTLB 1317ed9e31SAneesh Kumar K.V * and ITLB, respectively (see "mmu.h" for definitions). 1417ed9e31SAneesh Kumar K.V * 1517ed9e31SAneesh Kumar K.V * There are several potential gotchas here. The 40x hardware TLBLO 1617ed9e31SAneesh Kumar K.V * field looks like this: 1717ed9e31SAneesh Kumar K.V * 1817ed9e31SAneesh Kumar K.V * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 1917ed9e31SAneesh Kumar K.V * RPN..................... 0 0 EX WR ZSEL....... W I M G 2017ed9e31SAneesh Kumar K.V * 2117ed9e31SAneesh Kumar K.V * Where possible we make the Linux PTE bits match up with this 2217ed9e31SAneesh Kumar K.V * 2317ed9e31SAneesh Kumar K.V * - bits 20 and 21 must be cleared, because we use 4k pages (40x can 2417ed9e31SAneesh Kumar K.V * support down to 1k pages), this is done in the TLBMiss exception 2517ed9e31SAneesh Kumar K.V * handler. 2617ed9e31SAneesh Kumar K.V * - We use only zones 0 (for kernel pages) and 1 (for user pages) 2717ed9e31SAneesh Kumar K.V * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB 2817ed9e31SAneesh Kumar K.V * miss handler. Bit 27 is PAGE_USER, thus selecting the correct 2917ed9e31SAneesh Kumar K.V * zone. 30*2bba2ffbSDavid Hildenbrand * - PRESENT *must* be in the bottom two bits because swap PTEs 31*2bba2ffbSDavid Hildenbrand * use the top 30 bits. Because 40x doesn't support SMP anyway, M is 32*2bba2ffbSDavid Hildenbrand * irrelevant so we borrow it for PAGE_PRESENT. Bit 30 3317ed9e31SAneesh Kumar K.V * is cleared in the TLB miss handler before the TLB entry is loaded. 3417ed9e31SAneesh Kumar K.V * - All other bits of the PTE are loaded into TLBLO without 3517ed9e31SAneesh Kumar K.V * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for 36e734dd63SGeert Uytterhoeven * software PTE bits. We actually use bits 21, 24, 25, and 3717ed9e31SAneesh Kumar K.V * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and 3817ed9e31SAneesh Kumar K.V * PRESENT. 3917ed9e31SAneesh Kumar K.V */ 4017ed9e31SAneesh Kumar K.V 4117ed9e31SAneesh Kumar K.V #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ 4217ed9e31SAneesh Kumar K.V #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ 4317ed9e31SAneesh Kumar K.V #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ 4417ed9e31SAneesh Kumar K.V #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ 4517ed9e31SAneesh Kumar K.V #define _PAGE_USER 0x010 /* matches one of the zone permission bits */ 4617ed9e31SAneesh Kumar K.V #define _PAGE_SPECIAL 0x020 /* software: Special page */ 4717ed9e31SAneesh Kumar K.V #define _PAGE_DIRTY 0x080 /* software: dirty page */ 482c74e258SChristophe Leroy #define _PAGE_RW 0x100 /* hardware: WR, anded with dirty in exception */ 4917ed9e31SAneesh Kumar K.V #define _PAGE_EXEC 0x200 /* hardware: EX permission */ 5017ed9e31SAneesh Kumar K.V #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ 5117ed9e31SAneesh Kumar K.V 52d82fd29cSChristophe Leroy /* No page size encoding in the linux PTE */ 53d82fd29cSChristophe Leroy #define _PAGE_PSIZE 0 54d82fd29cSChristophe Leroy 55cbcbbf4aSChristophe Leroy /* cache related flags non existing on 40x */ 56cbcbbf4aSChristophe Leroy #define _PAGE_COHERENT 0 57cbcbbf4aSChristophe Leroy 58d82fd29cSChristophe Leroy #define _PAGE_KERNEL_RO 0 59d82fd29cSChristophe Leroy #define _PAGE_KERNEL_ROX _PAGE_EXEC 602c74e258SChristophe Leroy #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) 612c74e258SChristophe Leroy #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) 62d82fd29cSChristophe Leroy 6317ed9e31SAneesh Kumar K.V #define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ 64d82fd29cSChristophe Leroy #define _PMD_PRESENT_MASK _PMD_PRESENT 6517ed9e31SAneesh Kumar K.V #define _PMD_BAD 0x802 6617ed9e31SAneesh Kumar K.V #define _PMD_SIZE_4M 0x0c0 6717ed9e31SAneesh Kumar K.V #define _PMD_SIZE_16M 0x0e0 68d82fd29cSChristophe Leroy #define _PMD_USER 0 69d82fd29cSChristophe Leroy 70d82fd29cSChristophe Leroy #define _PTE_NONE_MASK 0 7117ed9e31SAneesh Kumar K.V 72d82fd29cSChristophe Leroy #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) 73d82fd29cSChristophe Leroy #define _PAGE_BASE (_PAGE_BASE_NC) 74d82fd29cSChristophe Leroy 75d82fd29cSChristophe Leroy /* Permission masks used to generate the __P and __S table */ 76d82fd29cSChristophe Leroy #define PAGE_NONE __pgprot(_PAGE_BASE) 77d82fd29cSChristophe Leroy #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) 78d82fd29cSChristophe Leroy #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) 79d82fd29cSChristophe Leroy #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 80d82fd29cSChristophe Leroy #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 81d82fd29cSChristophe Leroy #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 82d82fd29cSChristophe Leroy #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 83d82fd29cSChristophe Leroy 8417ed9e31SAneesh Kumar K.V #endif /* __KERNEL__ */ 8517ed9e31SAneesh Kumar K.V #endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ 86