xref: /openbmc/linux/arch/xtensa/mm/init.c (revision 173d6681)
13f65ce4dSChris Zankel /*
23f65ce4dSChris Zankel  * arch/xtensa/mm/init.c
33f65ce4dSChris Zankel  *
43f65ce4dSChris Zankel  * Derived from MIPS, PPC.
53f65ce4dSChris Zankel  *
63f65ce4dSChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
73f65ce4dSChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
83f65ce4dSChris Zankel  * for more details.
93f65ce4dSChris Zankel  *
103f65ce4dSChris Zankel  * Copyright (C) 2001 - 2005 Tensilica Inc.
113f65ce4dSChris Zankel  *
123f65ce4dSChris Zankel  * Chris Zankel	<chris@zankel.net>
133f65ce4dSChris Zankel  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
143f65ce4dSChris Zankel  * Marc Gauthier
153f65ce4dSChris Zankel  * Kevin Chea
163f65ce4dSChris Zankel  */
173f65ce4dSChris Zankel 
183f65ce4dSChris Zankel #include <linux/init.h>
193f65ce4dSChris Zankel #include <linux/signal.h>
203f65ce4dSChris Zankel #include <linux/sched.h>
213f65ce4dSChris Zankel #include <linux/kernel.h>
223f65ce4dSChris Zankel #include <linux/errno.h>
233f65ce4dSChris Zankel #include <linux/string.h>
243f65ce4dSChris Zankel #include <linux/types.h>
253f65ce4dSChris Zankel #include <linux/ptrace.h>
263f65ce4dSChris Zankel #include <linux/bootmem.h>
273f65ce4dSChris Zankel #include <linux/swap.h>
283f65ce4dSChris Zankel 
293f65ce4dSChris Zankel #include <asm/pgtable.h>
303f65ce4dSChris Zankel #include <asm/bootparam.h>
313f65ce4dSChris Zankel #include <asm/mmu_context.h>
323f65ce4dSChris Zankel #include <asm/tlb.h>
333f65ce4dSChris Zankel #include <asm/tlbflush.h>
343f65ce4dSChris Zankel #include <asm/page.h>
353f65ce4dSChris Zankel #include <asm/pgalloc.h>
363f65ce4dSChris Zankel #include <asm/pgtable.h>
373f65ce4dSChris Zankel 
383f65ce4dSChris Zankel 
393f65ce4dSChris Zankel #define DEBUG 0
403f65ce4dSChris Zankel 
413f65ce4dSChris Zankel DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
423f65ce4dSChris Zankel //static DEFINE_SPINLOCK(tlb_lock);
433f65ce4dSChris Zankel 
443f65ce4dSChris Zankel /*
453f65ce4dSChris Zankel  * This flag is used to indicate that the page was mapped and modified in
463f65ce4dSChris Zankel  * kernel space, so the cache is probably dirty at that address.
473f65ce4dSChris Zankel  * If cache aliasing is enabled and the page color mismatches, update_mmu_cache
483f65ce4dSChris Zankel  * synchronizes the caches if this bit is set.
493f65ce4dSChris Zankel  */
503f65ce4dSChris Zankel 
513f65ce4dSChris Zankel #define PG_cache_clean PG_arch_1
523f65ce4dSChris Zankel 
533f65ce4dSChris Zankel /* References to section boundaries */
543f65ce4dSChris Zankel 
553f65ce4dSChris Zankel extern char _ftext, _etext, _fdata, _edata, _rodata_end;
563f65ce4dSChris Zankel extern char __init_begin, __init_end;
573f65ce4dSChris Zankel 
583f65ce4dSChris Zankel /*
593f65ce4dSChris Zankel  * mem_reserve(start, end, must_exist)
603f65ce4dSChris Zankel  *
613f65ce4dSChris Zankel  * Reserve some memory from the memory pool.
623f65ce4dSChris Zankel  *
633f65ce4dSChris Zankel  * Parameters:
643f65ce4dSChris Zankel  *  start	Start of region,
653f65ce4dSChris Zankel  *  end		End of region,
663f65ce4dSChris Zankel  *  must_exist	Must exist in memory pool.
673f65ce4dSChris Zankel  *
683f65ce4dSChris Zankel  * Returns:
693f65ce4dSChris Zankel  *  0 (memory area couldn't be mapped)
703f65ce4dSChris Zankel  * -1 (success)
713f65ce4dSChris Zankel  */
723f65ce4dSChris Zankel 
733f65ce4dSChris Zankel int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
743f65ce4dSChris Zankel {
753f65ce4dSChris Zankel 	int i;
763f65ce4dSChris Zankel 
773f65ce4dSChris Zankel 	if (start == end)
783f65ce4dSChris Zankel 		return 0;
793f65ce4dSChris Zankel 
803f65ce4dSChris Zankel 	start = start & PAGE_MASK;
813f65ce4dSChris Zankel 	end = PAGE_ALIGN(end);
823f65ce4dSChris Zankel 
833f65ce4dSChris Zankel 	for (i = 0; i < sysmem.nr_banks; i++)
843f65ce4dSChris Zankel 		if (start < sysmem.bank[i].end
853f65ce4dSChris Zankel 		    && end >= sysmem.bank[i].start)
863f65ce4dSChris Zankel 			break;
873f65ce4dSChris Zankel 
883f65ce4dSChris Zankel 	if (i == sysmem.nr_banks) {
893f65ce4dSChris Zankel 		if (must_exist)
903f65ce4dSChris Zankel 			printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
913f65ce4dSChris Zankel 				"not in any region!\n", start, end);
923f65ce4dSChris Zankel 		return 0;
933f65ce4dSChris Zankel 	}
943f65ce4dSChris Zankel 
953f65ce4dSChris Zankel 	if (start > sysmem.bank[i].start) {
963f65ce4dSChris Zankel 		if (end < sysmem.bank[i].end) {
973f65ce4dSChris Zankel 			/* split entry */
983f65ce4dSChris Zankel 			if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
993f65ce4dSChris Zankel 				panic("meminfo overflow\n");
1003f65ce4dSChris Zankel 			sysmem.bank[sysmem.nr_banks].start = end;
1013f65ce4dSChris Zankel 			sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
1023f65ce4dSChris Zankel 			sysmem.nr_banks++;
1033f65ce4dSChris Zankel 		}
1043f65ce4dSChris Zankel 		sysmem.bank[i].end = start;
1053f65ce4dSChris Zankel 	} else {
1063f65ce4dSChris Zankel 		if (end < sysmem.bank[i].end)
1073f65ce4dSChris Zankel 			sysmem.bank[i].start = end;
1083f65ce4dSChris Zankel 		else {
1093f65ce4dSChris Zankel 			/* remove entry */
1103f65ce4dSChris Zankel 			sysmem.nr_banks--;
1113f65ce4dSChris Zankel 			sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
1123f65ce4dSChris Zankel 			sysmem.bank[i].end   = sysmem.bank[sysmem.nr_banks].end;
1133f65ce4dSChris Zankel 		}
1143f65ce4dSChris Zankel 	}
1153f65ce4dSChris Zankel 	return -1;
1163f65ce4dSChris Zankel }
1173f65ce4dSChris Zankel 
1183f65ce4dSChris Zankel 
1193f65ce4dSChris Zankel /*
1203f65ce4dSChris Zankel  * Initialize the bootmem system and give it all the memory we have available.
1213f65ce4dSChris Zankel  */
1223f65ce4dSChris Zankel 
1233f65ce4dSChris Zankel void __init bootmem_init(void)
1243f65ce4dSChris Zankel {
1253f65ce4dSChris Zankel 	unsigned long pfn;
1263f65ce4dSChris Zankel 	unsigned long bootmap_start, bootmap_size;
1273f65ce4dSChris Zankel 	int i;
1283f65ce4dSChris Zankel 
1293f65ce4dSChris Zankel 	max_low_pfn = max_pfn = 0;
1303f65ce4dSChris Zankel 	min_low_pfn = ~0;
1313f65ce4dSChris Zankel 
1323f65ce4dSChris Zankel 	for (i=0; i < sysmem.nr_banks; i++) {
1333f65ce4dSChris Zankel 		pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
1343f65ce4dSChris Zankel 		if (pfn < min_low_pfn)
1353f65ce4dSChris Zankel 			min_low_pfn = pfn;
1363f65ce4dSChris Zankel 		pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
1373f65ce4dSChris Zankel 		if (pfn > max_pfn)
1383f65ce4dSChris Zankel 			max_pfn = pfn;
1393f65ce4dSChris Zankel 	}
1403f65ce4dSChris Zankel 
1413f65ce4dSChris Zankel 	if (min_low_pfn > max_pfn)
1423f65ce4dSChris Zankel 		panic("No memory found!\n");
1433f65ce4dSChris Zankel 
144173d6681SChris Zankel 	max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ?
145173d6681SChris Zankel 		max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
1463f65ce4dSChris Zankel 
1473f65ce4dSChris Zankel 	/* Find an area to use for the bootmem bitmap. */
1483f65ce4dSChris Zankel 
1493f65ce4dSChris Zankel 	bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT;
1503f65ce4dSChris Zankel 	bootmap_start = ~0;
1513f65ce4dSChris Zankel 
1523f65ce4dSChris Zankel 	for (i=0; i<sysmem.nr_banks; i++)
1533f65ce4dSChris Zankel 		if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) {
1543f65ce4dSChris Zankel 			bootmap_start = sysmem.bank[i].start;
1553f65ce4dSChris Zankel 			break;
1563f65ce4dSChris Zankel 		}
1573f65ce4dSChris Zankel 
1583f65ce4dSChris Zankel 	if (bootmap_start == ~0UL)
1593f65ce4dSChris Zankel 		panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
1603f65ce4dSChris Zankel 
1613f65ce4dSChris Zankel 	/* Reserve the bootmem bitmap area */
1623f65ce4dSChris Zankel 
1633f65ce4dSChris Zankel 	mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
1643f65ce4dSChris Zankel 	bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn,
1653f65ce4dSChris Zankel 					 bootmap_start >> PAGE_SHIFT,
1663f65ce4dSChris Zankel 					 max_low_pfn);
1673f65ce4dSChris Zankel 
1683f65ce4dSChris Zankel 	/* Add all remaining memory pieces into the bootmem map */
1693f65ce4dSChris Zankel 
1703f65ce4dSChris Zankel 	for (i=0; i<sysmem.nr_banks; i++)
1713f65ce4dSChris Zankel 		free_bootmem(sysmem.bank[i].start,
1723f65ce4dSChris Zankel 			     sysmem.bank[i].end - sysmem.bank[i].start);
1733f65ce4dSChris Zankel 
1743f65ce4dSChris Zankel }
1753f65ce4dSChris Zankel 
1763f65ce4dSChris Zankel 
1773f65ce4dSChris Zankel void __init paging_init(void)
1783f65ce4dSChris Zankel {
1793f65ce4dSChris Zankel 	unsigned long zones_size[MAX_NR_ZONES];
1803f65ce4dSChris Zankel 	int i;
1813f65ce4dSChris Zankel 
1823f65ce4dSChris Zankel 	/* All pages are DMA-able, so we put them all in the DMA zone. */
1833f65ce4dSChris Zankel 
1843f65ce4dSChris Zankel 	zones_size[ZONE_DMA] = max_low_pfn;
1853f65ce4dSChris Zankel 	for (i = 1; i < MAX_NR_ZONES; i++)
1863f65ce4dSChris Zankel 		zones_size[i] = 0;
1873f65ce4dSChris Zankel 
1883f65ce4dSChris Zankel #ifdef CONFIG_HIGHMEM
1893f65ce4dSChris Zankel 	zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
1903f65ce4dSChris Zankel #endif
1913f65ce4dSChris Zankel 
1923f65ce4dSChris Zankel 	/* Initialize the kernel's page tables. */
1933f65ce4dSChris Zankel 
1943f65ce4dSChris Zankel 	memset(swapper_pg_dir, 0, PAGE_SIZE);
1953f65ce4dSChris Zankel 
1963f65ce4dSChris Zankel 	free_area_init(zones_size);
1973f65ce4dSChris Zankel }
1983f65ce4dSChris Zankel 
1993f65ce4dSChris Zankel /*
2003f65ce4dSChris Zankel  * Flush the mmu and reset associated register to default values.
2013f65ce4dSChris Zankel  */
2023f65ce4dSChris Zankel 
2033f65ce4dSChris Zankel void __init init_mmu (void)
2043f65ce4dSChris Zankel {
2053f65ce4dSChris Zankel 	/* Writing zeros to the <t>TLBCFG special registers ensure
2063f65ce4dSChris Zankel 	 * that valid values exist in the register.  For existing
2073f65ce4dSChris Zankel 	 * PGSZID<w> fields, zero selects the first element of the
2083f65ce4dSChris Zankel 	 * page-size array.  For nonexistant PGSZID<w> fields, zero is
2093f65ce4dSChris Zankel 	 * the best value to write.  Also, when changing PGSZID<w>
2103f65ce4dSChris Zankel 	 * fields, the corresponding TLB must be flushed.
2113f65ce4dSChris Zankel 	 */
2123f65ce4dSChris Zankel 	set_itlbcfg_register (0);
2133f65ce4dSChris Zankel 	set_dtlbcfg_register (0);
2143f65ce4dSChris Zankel 	flush_tlb_all ();
2153f65ce4dSChris Zankel 
2163f65ce4dSChris Zankel 	/* Set rasid register to a known value. */
2173f65ce4dSChris Zankel 
218173d6681SChris Zankel 	set_rasid_register (ASID_USER_FIRST);
2193f65ce4dSChris Zankel 
2203f65ce4dSChris Zankel 	/* Set PTEVADDR special register to the start of the page
2213f65ce4dSChris Zankel 	 * table, which is in kernel mappable space (ie. not
2223f65ce4dSChris Zankel 	 * statically mapped).  This register's value is undefined on
2233f65ce4dSChris Zankel 	 * reset.
2243f65ce4dSChris Zankel 	 */
2253f65ce4dSChris Zankel 	set_ptevaddr_register (PGTABLE_START);
2263f65ce4dSChris Zankel }
2273f65ce4dSChris Zankel 
2283f65ce4dSChris Zankel /*
2293f65ce4dSChris Zankel  * Initialize memory pages.
2303f65ce4dSChris Zankel  */
2313f65ce4dSChris Zankel 
2323f65ce4dSChris Zankel void __init mem_init(void)
2333f65ce4dSChris Zankel {
2343f65ce4dSChris Zankel 	unsigned long codesize, reservedpages, datasize, initsize;
2353f65ce4dSChris Zankel 	unsigned long highmemsize, tmp, ram;
2363f65ce4dSChris Zankel 
2373f65ce4dSChris Zankel 	max_mapnr = num_physpages = max_low_pfn;
2383f65ce4dSChris Zankel 	high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
2393f65ce4dSChris Zankel 	highmemsize = 0;
2403f65ce4dSChris Zankel 
241288a60cfSChris Zankel #ifdef CONFIG_HIGHMEM
2423f65ce4dSChris Zankel #error HIGHGMEM not implemented in init.c
2433f65ce4dSChris Zankel #endif
2443f65ce4dSChris Zankel 
2453f65ce4dSChris Zankel 	totalram_pages += free_all_bootmem();
2463f65ce4dSChris Zankel 
2473f65ce4dSChris Zankel 	reservedpages = ram = 0;
2483f65ce4dSChris Zankel 	for (tmp = 0; tmp < max_low_pfn; tmp++) {
2493f65ce4dSChris Zankel 		ram++;
2503f65ce4dSChris Zankel 		if (PageReserved(mem_map+tmp))
2513f65ce4dSChris Zankel 			reservedpages++;
2523f65ce4dSChris Zankel 	}
2533f65ce4dSChris Zankel 
2543f65ce4dSChris Zankel 	codesize =  (unsigned long) &_etext - (unsigned long) &_ftext;
2553f65ce4dSChris Zankel 	datasize =  (unsigned long) &_edata - (unsigned long) &_fdata;
2563f65ce4dSChris Zankel 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
2573f65ce4dSChris Zankel 
2583f65ce4dSChris Zankel 	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
2593f65ce4dSChris Zankel 	       "%ldk data, %ldk init %ldk highmem)\n",
2603f65ce4dSChris Zankel 	       (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
2613f65ce4dSChris Zankel 	       ram << (PAGE_SHIFT-10),
2623f65ce4dSChris Zankel 	       codesize >> 10,
2633f65ce4dSChris Zankel 	       reservedpages << (PAGE_SHIFT-10),
2643f65ce4dSChris Zankel 	       datasize >> 10,
2653f65ce4dSChris Zankel 	       initsize >> 10,
2663f65ce4dSChris Zankel 	       highmemsize >> 10);
2673f65ce4dSChris Zankel }
2683f65ce4dSChris Zankel 
2693f65ce4dSChris Zankel void
2703f65ce4dSChris Zankel free_reserved_mem(void *start, void *end)
2713f65ce4dSChris Zankel {
2723f65ce4dSChris Zankel 	for (; start < end; start += PAGE_SIZE) {
2733f65ce4dSChris Zankel 		ClearPageReserved(virt_to_page(start));
2747835e98bSNick Piggin 		init_page_count(virt_to_page(start));
2753f65ce4dSChris Zankel 		free_page((unsigned long)start);
2763f65ce4dSChris Zankel 		totalram_pages++;
2773f65ce4dSChris Zankel 	}
2783f65ce4dSChris Zankel }
2793f65ce4dSChris Zankel 
2803f65ce4dSChris Zankel #ifdef CONFIG_BLK_DEV_INITRD
2813f65ce4dSChris Zankel extern int initrd_is_mapped;
2823f65ce4dSChris Zankel 
2833f65ce4dSChris Zankel void free_initrd_mem(unsigned long start, unsigned long end)
2843f65ce4dSChris Zankel {
2853f65ce4dSChris Zankel 	if (initrd_is_mapped) {
2863f65ce4dSChris Zankel 		free_reserved_mem((void*)start, (void*)end);
2873f65ce4dSChris Zankel 		printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
2883f65ce4dSChris Zankel 	}
2893f65ce4dSChris Zankel }
2903f65ce4dSChris Zankel #endif
2913f65ce4dSChris Zankel 
2923f65ce4dSChris Zankel void free_initmem(void)
2933f65ce4dSChris Zankel {
2943f65ce4dSChris Zankel 	free_reserved_mem(&__init_begin, &__init_end);
2953f65ce4dSChris Zankel 	printk("Freeing unused kernel memory: %dk freed\n",
2963f65ce4dSChris Zankel 	       (&__init_end - &__init_begin) >> 10);
2973f65ce4dSChris Zankel }
2983f65ce4dSChris Zankel 
2993f65ce4dSChris Zankel void show_mem(void)
3003f65ce4dSChris Zankel {
3013f65ce4dSChris Zankel 	int i, free = 0, total = 0, reserved = 0;
3023f65ce4dSChris Zankel 	int shared = 0, cached = 0;
3033f65ce4dSChris Zankel 
3043f65ce4dSChris Zankel 	printk("Mem-info:\n");
3053f65ce4dSChris Zankel 	show_free_areas();
3063f65ce4dSChris Zankel 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
3073f65ce4dSChris Zankel 	i = max_mapnr;
3083f65ce4dSChris Zankel 	while (i-- > 0) {
3093f65ce4dSChris Zankel 		total++;
3103f65ce4dSChris Zankel 		if (PageReserved(mem_map+i))
3113f65ce4dSChris Zankel 			reserved++;
3123f65ce4dSChris Zankel 		else if (PageSwapCache(mem_map+i))
3133f65ce4dSChris Zankel 			cached++;
3143f65ce4dSChris Zankel 		else if (!page_count(mem_map + i))
3153f65ce4dSChris Zankel 			free++;
3163f65ce4dSChris Zankel 		else
3173f65ce4dSChris Zankel 			shared += page_count(mem_map + i) - 1;
3183f65ce4dSChris Zankel 	}
3193f65ce4dSChris Zankel 	printk("%d pages of RAM\n", total);
3203f65ce4dSChris Zankel 	printk("%d reserved pages\n", reserved);
3213f65ce4dSChris Zankel 	printk("%d pages shared\n", shared);
3223f65ce4dSChris Zankel 	printk("%d pages swap cached\n",cached);
3233f65ce4dSChris Zankel 	printk("%d free pages\n", free);
3243f65ce4dSChris Zankel }
3253f65ce4dSChris Zankel 
3263f65ce4dSChris Zankel /* ------------------------------------------------------------------------- */
3273f65ce4dSChris Zankel 
3283f65ce4dSChris Zankel #if (DCACHE_WAY_SIZE > PAGE_SIZE)
3293f65ce4dSChris Zankel 
3303f65ce4dSChris Zankel /*
3313f65ce4dSChris Zankel  * With cache aliasing, the page color of the page in kernel space and user
3323f65ce4dSChris Zankel  * space might mismatch. We temporarily map the page to a different virtual
3333f65ce4dSChris Zankel  * address with the same color and clear the page there.
3343f65ce4dSChris Zankel  */
3353f65ce4dSChris Zankel 
3363f65ce4dSChris Zankel void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page)
3373f65ce4dSChris Zankel {
3383f65ce4dSChris Zankel 
3393f65ce4dSChris Zankel   	/*  There shouldn't be any entries for this page. */
3403f65ce4dSChris Zankel 
3413f65ce4dSChris Zankel 	__flush_invalidate_dcache_page_phys(__pa(page_address(page)));
3423f65ce4dSChris Zankel 
3433f65ce4dSChris Zankel 	if (!PAGE_COLOR_EQ(vaddr, kaddr)) {
3443f65ce4dSChris Zankel 		unsigned long v, p;
3453f65ce4dSChris Zankel 
3463f65ce4dSChris Zankel 		/* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */
3473f65ce4dSChris Zankel 
3483f65ce4dSChris Zankel 		spin_lock(&tlb_lock);
3493f65ce4dSChris Zankel 
3503f65ce4dSChris Zankel 		p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL)));
3513f65ce4dSChris Zankel 		kaddr = (void*)PAGE_COLOR_MAP0(vaddr);
3523f65ce4dSChris Zankel 		v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0;
3533f65ce4dSChris Zankel 		__asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v));
3543f65ce4dSChris Zankel 
3553f65ce4dSChris Zankel 		clear_page(kaddr);
3563f65ce4dSChris Zankel 
3573f65ce4dSChris Zankel 		spin_unlock(&tlb_lock);
3583f65ce4dSChris Zankel 	} else {
3593f65ce4dSChris Zankel 		clear_page(kaddr);
3603f65ce4dSChris Zankel 	}
3613f65ce4dSChris Zankel 
3623f65ce4dSChris Zankel 	/* We need to make sure that i$ and d$ are coherent. */
3633f65ce4dSChris Zankel 
3643f65ce4dSChris Zankel 	clear_bit(PG_cache_clean, &page->flags);
3653f65ce4dSChris Zankel }
3663f65ce4dSChris Zankel 
3673f65ce4dSChris Zankel /*
3683f65ce4dSChris Zankel  * With cache aliasing, we have to make sure that the page color of the page
3693f65ce4dSChris Zankel  * in kernel space matches that of the virtual user address before we read
3703f65ce4dSChris Zankel  * the page. If the page color differ, we create a temporary DTLB entry with
3713f65ce4dSChris Zankel  * the corrent page color and use this 'temporary' address as the source.
3723f65ce4dSChris Zankel  * We then use the same approach as in clear_user_page and copy the data
3733f65ce4dSChris Zankel  * to the kernel space and clear the PG_cache_clean bit to synchronize caches
3743f65ce4dSChris Zankel  * later.
3753f65ce4dSChris Zankel  *
3763f65ce4dSChris Zankel  * Note:
3773f65ce4dSChris Zankel  * Instead of using another 'way' for the temporary DTLB entry, we could
3783f65ce4dSChris Zankel  * probably use the same entry that points to the kernel address (after
3793f65ce4dSChris Zankel  * saving the original value and restoring it when we are done).
3803f65ce4dSChris Zankel  */
3813f65ce4dSChris Zankel 
3823f65ce4dSChris Zankel void copy_user_page(void* to, void* from, unsigned long vaddr,
3833f65ce4dSChris Zankel     		    struct page* to_page)
3843f65ce4dSChris Zankel {
3853f65ce4dSChris Zankel 	/* There shouldn't be any entries for the new page. */
3863f65ce4dSChris Zankel 
3873f65ce4dSChris Zankel 	__flush_invalidate_dcache_page_phys(__pa(page_address(to_page)));
3883f65ce4dSChris Zankel 
3893f65ce4dSChris Zankel 	spin_lock(&tlb_lock);
3903f65ce4dSChris Zankel 
3913f65ce4dSChris Zankel 	if (!PAGE_COLOR_EQ(vaddr, from)) {
3923f65ce4dSChris Zankel 		unsigned long v, p, t;
3933f65ce4dSChris Zankel 
3943f65ce4dSChris Zankel 		__asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1"
3953f65ce4dSChris Zankel 				      : "=a"(p), "=a"(t) : "a"(from));
3963f65ce4dSChris Zankel 		from = (void*)PAGE_COLOR_MAP0(vaddr);
3973f65ce4dSChris Zankel 		v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0;
3983f65ce4dSChris Zankel 		__asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
3993f65ce4dSChris Zankel 	}
4003f65ce4dSChris Zankel 
4013f65ce4dSChris Zankel 	if (!PAGE_COLOR_EQ(vaddr, to)) {
4023f65ce4dSChris Zankel 		unsigned long v, p;
4033f65ce4dSChris Zankel 
4043f65ce4dSChris Zankel 		p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL)));
4053f65ce4dSChris Zankel 		to = (void*)PAGE_COLOR_MAP1(vaddr);
4063f65ce4dSChris Zankel 		v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1;
4073f65ce4dSChris Zankel 		__asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
4083f65ce4dSChris Zankel 	}
4093f65ce4dSChris Zankel 	copy_page(to, from);
4103f65ce4dSChris Zankel 
4113f65ce4dSChris Zankel 	spin_unlock(&tlb_lock);
4123f65ce4dSChris Zankel 
4133f65ce4dSChris Zankel 	/* We need to make sure that i$ and d$ are coherent. */
4143f65ce4dSChris Zankel 
4153f65ce4dSChris Zankel 	clear_bit(PG_cache_clean, &to_page->flags);
4163f65ce4dSChris Zankel }
4173f65ce4dSChris Zankel 
4183f65ce4dSChris Zankel 
4193f65ce4dSChris Zankel 
4203f65ce4dSChris Zankel /*
4213f65ce4dSChris Zankel  * Any time the kernel writes to a user page cache page, or it is about to
4223f65ce4dSChris Zankel  * read from a page cache page this routine is called.
4233f65ce4dSChris Zankel  *
4243f65ce4dSChris Zankel  * Note:
4253f65ce4dSChris Zankel  * The kernel currently only provides one architecture bit in the page
4263f65ce4dSChris Zankel  * flags that we use for I$/D$ coherency. Maybe, in future, we can
4273f65ce4dSChris Zankel  * use a sepearte bit for deferred dcache aliasing:
4283f65ce4dSChris Zankel  * If the page is not mapped yet, we only need to set a flag,
4293f65ce4dSChris Zankel  * if mapped, we need to invalidate the page.
4303f65ce4dSChris Zankel  */
4313f65ce4dSChris Zankel // FIXME: we probably need this for WB caches not only for Page Coloring..
4323f65ce4dSChris Zankel 
4333f65ce4dSChris Zankel void flush_dcache_page(struct page *page)
4343f65ce4dSChris Zankel {
4353f65ce4dSChris Zankel 	unsigned long addr = __pa(page_address(page));
4363f65ce4dSChris Zankel 	struct address_space *mapping = page_mapping(page);
4373f65ce4dSChris Zankel 
4383f65ce4dSChris Zankel 	__flush_invalidate_dcache_page_phys(addr);
4393f65ce4dSChris Zankel 
4403f65ce4dSChris Zankel 	if (!test_bit(PG_cache_clean, &page->flags))
4413f65ce4dSChris Zankel 		return;
4423f65ce4dSChris Zankel 
4433f65ce4dSChris Zankel 	/* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/
4443f65ce4dSChris Zankel #if 0
4453f65ce4dSChris Zankel 	if (mapping && !mapping_mapped(mapping))
4463f65ce4dSChris Zankel 		clear_bit(PG_cache_clean, &page->flags);
4473f65ce4dSChris Zankel 	else
4483f65ce4dSChris Zankel #endif
4493f65ce4dSChris Zankel 		__invalidate_icache_page_phys(addr);
4503f65ce4dSChris Zankel }
4513f65ce4dSChris Zankel 
4523f65ce4dSChris Zankel void flush_cache_range(struct vm_area_struct* vma, unsigned long s,
4533f65ce4dSChris Zankel 		       unsigned long e)
4543f65ce4dSChris Zankel {
4553f65ce4dSChris Zankel 	__flush_invalidate_cache_all();
4563f65ce4dSChris Zankel }
4573f65ce4dSChris Zankel 
4583f65ce4dSChris Zankel void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
4593f65ce4dSChris Zankel     		      unsigned long pfn)
4603f65ce4dSChris Zankel {
4613f65ce4dSChris Zankel 	struct page *page = pfn_to_page(pfn);
4623f65ce4dSChris Zankel 
4633f65ce4dSChris Zankel 	/* Remove any entry for the old mapping. */
4643f65ce4dSChris Zankel 
4653f65ce4dSChris Zankel 	if (current->active_mm == vma->vm_mm) {
4663f65ce4dSChris Zankel 		unsigned long addr = __pa(page_address(page));
4673f65ce4dSChris Zankel 		__flush_invalidate_dcache_page_phys(addr);
4683f65ce4dSChris Zankel 		if ((vma->vm_flags & VM_EXEC) != 0)
4693f65ce4dSChris Zankel 			__invalidate_icache_page_phys(addr);
4703f65ce4dSChris Zankel 	} else {
4713f65ce4dSChris Zankel 		BUG();
4723f65ce4dSChris Zankel 	}
4733f65ce4dSChris Zankel }
4743f65ce4dSChris Zankel 
4753f65ce4dSChris Zankel #endif	/* (DCACHE_WAY_SIZE > PAGE_SIZE) */
4763f65ce4dSChris Zankel 
4773f65ce4dSChris Zankel 
4783f65ce4dSChris Zankel pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr)
4793f65ce4dSChris Zankel {
4803f65ce4dSChris Zankel 	pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0);
4813f65ce4dSChris Zankel 	if (likely(pte)) {
4823f65ce4dSChris Zankel 	       	pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET);
4833f65ce4dSChris Zankel 		int i;
4843f65ce4dSChris Zankel 		for (i = 0; i < 1024; i++, ptep++)
4853f65ce4dSChris Zankel 			pte_clear(mm, addr, ptep);
4863f65ce4dSChris Zankel 	}
4873f65ce4dSChris Zankel 	return pte;
4883f65ce4dSChris Zankel }
4893f65ce4dSChris Zankel 
4903f65ce4dSChris Zankel struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr)
4913f65ce4dSChris Zankel {
4923f65ce4dSChris Zankel 	struct page *page;
4933f65ce4dSChris Zankel 
4943f65ce4dSChris Zankel 	page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0);
4953f65ce4dSChris Zankel 
4963f65ce4dSChris Zankel 	if (likely(page)) {
4973f65ce4dSChris Zankel 		pte_t* ptep = kmap_atomic(page, KM_USER0);
4983f65ce4dSChris Zankel 		int i;
4993f65ce4dSChris Zankel 
5003f65ce4dSChris Zankel 		for (i = 0; i < 1024; i++, ptep++)
5013f65ce4dSChris Zankel 			pte_clear(mm, addr, ptep);
5023f65ce4dSChris Zankel 
5033f65ce4dSChris Zankel 		kunmap_atomic(ptep, KM_USER0);
5043f65ce4dSChris Zankel 	}
5053f65ce4dSChris Zankel 	return page;
5063f65ce4dSChris Zankel }
5073f65ce4dSChris Zankel 
5083f65ce4dSChris Zankel 
5093f65ce4dSChris Zankel /*
5103f65ce4dSChris Zankel  * Handle D$/I$ coherency.
5113f65ce4dSChris Zankel  *
5123f65ce4dSChris Zankel  * Note:
5133f65ce4dSChris Zankel  * We only have one architecture bit for the page flags, so we cannot handle
5143f65ce4dSChris Zankel  * cache aliasing, yet.
5153f65ce4dSChris Zankel  */
5163f65ce4dSChris Zankel 
5173f65ce4dSChris Zankel void
5183f65ce4dSChris Zankel update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
5193f65ce4dSChris Zankel {
5203f65ce4dSChris Zankel 	unsigned long pfn = pte_pfn(pte);
5213f65ce4dSChris Zankel 	struct page *page;
5223f65ce4dSChris Zankel 	unsigned long vaddr = addr & PAGE_MASK;
5233f65ce4dSChris Zankel 
5243f65ce4dSChris Zankel 	if (!pfn_valid(pfn))
5253f65ce4dSChris Zankel 		return;
5263f65ce4dSChris Zankel 
5273f65ce4dSChris Zankel 	page = pfn_to_page(pfn);
5283f65ce4dSChris Zankel 
5293f65ce4dSChris Zankel 	invalidate_itlb_mapping(addr);
5303f65ce4dSChris Zankel 	invalidate_dtlb_mapping(addr);
5313f65ce4dSChris Zankel 
5323f65ce4dSChris Zankel 	/* We have a new mapping. Use it. */
5333f65ce4dSChris Zankel 
5343f65ce4dSChris Zankel 	write_dtlb_entry(pte, dtlb_probe(addr));
5353f65ce4dSChris Zankel 
5363f65ce4dSChris Zankel 	/* If the processor can execute from this page, synchronize D$/I$. */
5373f65ce4dSChris Zankel 
5383f65ce4dSChris Zankel 	if ((vma->vm_flags & VM_EXEC) != 0) {
5393f65ce4dSChris Zankel 
5403f65ce4dSChris Zankel 		write_itlb_entry(pte, itlb_probe(addr));
5413f65ce4dSChris Zankel 
5423f65ce4dSChris Zankel 		/* Synchronize caches, if not clean. */
5433f65ce4dSChris Zankel 
5443f65ce4dSChris Zankel 		if (!test_and_set_bit(PG_cache_clean, &page->flags)) {
5453f65ce4dSChris Zankel 			__flush_dcache_page(vaddr);
5463f65ce4dSChris Zankel 			__invalidate_icache_page(vaddr);
5473f65ce4dSChris Zankel 		}
5483f65ce4dSChris Zankel 	}
5493f65ce4dSChris Zankel }
5503f65ce4dSChris Zankel 
551