xref: /openbmc/linux/arch/xtensa/mm/init.c (revision 288a60cf)
13f65ce4dSChris Zankel /*
23f65ce4dSChris Zankel  * arch/xtensa/mm/init.c
33f65ce4dSChris Zankel  *
43f65ce4dSChris Zankel  * Derived from MIPS, PPC.
53f65ce4dSChris Zankel  *
63f65ce4dSChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
73f65ce4dSChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
83f65ce4dSChris Zankel  * for more details.
93f65ce4dSChris Zankel  *
103f65ce4dSChris Zankel  * Copyright (C) 2001 - 2005 Tensilica Inc.
113f65ce4dSChris Zankel  *
123f65ce4dSChris Zankel  * Chris Zankel	<chris@zankel.net>
133f65ce4dSChris Zankel  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
143f65ce4dSChris Zankel  * Marc Gauthier
153f65ce4dSChris Zankel  * Kevin Chea
163f65ce4dSChris Zankel  */
173f65ce4dSChris Zankel 
183f65ce4dSChris Zankel #include <linux/config.h>
193f65ce4dSChris Zankel #include <linux/init.h>
203f65ce4dSChris Zankel #include <linux/signal.h>
213f65ce4dSChris Zankel #include <linux/sched.h>
223f65ce4dSChris Zankel #include <linux/kernel.h>
233f65ce4dSChris Zankel #include <linux/errno.h>
243f65ce4dSChris Zankel #include <linux/string.h>
253f65ce4dSChris Zankel #include <linux/types.h>
263f65ce4dSChris Zankel #include <linux/ptrace.h>
273f65ce4dSChris Zankel #include <linux/bootmem.h>
283f65ce4dSChris Zankel #include <linux/swap.h>
293f65ce4dSChris Zankel 
303f65ce4dSChris Zankel #include <asm/pgtable.h>
313f65ce4dSChris Zankel #include <asm/bootparam.h>
323f65ce4dSChris Zankel #include <asm/mmu_context.h>
333f65ce4dSChris Zankel #include <asm/tlb.h>
343f65ce4dSChris Zankel #include <asm/tlbflush.h>
353f65ce4dSChris Zankel #include <asm/page.h>
363f65ce4dSChris Zankel #include <asm/pgalloc.h>
373f65ce4dSChris Zankel #include <asm/pgtable.h>
383f65ce4dSChris Zankel 
393f65ce4dSChris Zankel 
403f65ce4dSChris Zankel #define DEBUG 0
413f65ce4dSChris Zankel 
423f65ce4dSChris Zankel DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
433f65ce4dSChris Zankel //static DEFINE_SPINLOCK(tlb_lock);
443f65ce4dSChris Zankel 
453f65ce4dSChris Zankel /*
463f65ce4dSChris Zankel  * This flag is used to indicate that the page was mapped and modified in
473f65ce4dSChris Zankel  * kernel space, so the cache is probably dirty at that address.
483f65ce4dSChris Zankel  * If cache aliasing is enabled and the page color mismatches, update_mmu_cache
493f65ce4dSChris Zankel  * synchronizes the caches if this bit is set.
503f65ce4dSChris Zankel  */
513f65ce4dSChris Zankel 
523f65ce4dSChris Zankel #define PG_cache_clean PG_arch_1
533f65ce4dSChris Zankel 
543f65ce4dSChris Zankel /* References to section boundaries */
553f65ce4dSChris Zankel 
563f65ce4dSChris Zankel extern char _ftext, _etext, _fdata, _edata, _rodata_end;
573f65ce4dSChris Zankel extern char __init_begin, __init_end;
583f65ce4dSChris Zankel 
593f65ce4dSChris Zankel /*
603f65ce4dSChris Zankel  * mem_reserve(start, end, must_exist)
613f65ce4dSChris Zankel  *
623f65ce4dSChris Zankel  * Reserve some memory from the memory pool.
633f65ce4dSChris Zankel  *
643f65ce4dSChris Zankel  * Parameters:
653f65ce4dSChris Zankel  *  start	Start of region,
663f65ce4dSChris Zankel  *  end		End of region,
673f65ce4dSChris Zankel  *  must_exist	Must exist in memory pool.
683f65ce4dSChris Zankel  *
693f65ce4dSChris Zankel  * Returns:
703f65ce4dSChris Zankel  *  0 (memory area couldn't be mapped)
713f65ce4dSChris Zankel  * -1 (success)
723f65ce4dSChris Zankel  */
733f65ce4dSChris Zankel 
743f65ce4dSChris Zankel int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
753f65ce4dSChris Zankel {
763f65ce4dSChris Zankel 	int i;
773f65ce4dSChris Zankel 
783f65ce4dSChris Zankel 	if (start == end)
793f65ce4dSChris Zankel 		return 0;
803f65ce4dSChris Zankel 
813f65ce4dSChris Zankel 	start = start & PAGE_MASK;
823f65ce4dSChris Zankel 	end = PAGE_ALIGN(end);
833f65ce4dSChris Zankel 
843f65ce4dSChris Zankel 	for (i = 0; i < sysmem.nr_banks; i++)
853f65ce4dSChris Zankel 		if (start < sysmem.bank[i].end
863f65ce4dSChris Zankel 		    && end >= sysmem.bank[i].start)
873f65ce4dSChris Zankel 			break;
883f65ce4dSChris Zankel 
893f65ce4dSChris Zankel 	if (i == sysmem.nr_banks) {
903f65ce4dSChris Zankel 		if (must_exist)
913f65ce4dSChris Zankel 			printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
923f65ce4dSChris Zankel 				"not in any region!\n", start, end);
933f65ce4dSChris Zankel 		return 0;
943f65ce4dSChris Zankel 	}
953f65ce4dSChris Zankel 
963f65ce4dSChris Zankel 	if (start > sysmem.bank[i].start) {
973f65ce4dSChris Zankel 		if (end < sysmem.bank[i].end) {
983f65ce4dSChris Zankel 			/* split entry */
993f65ce4dSChris Zankel 			if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
1003f65ce4dSChris Zankel 				panic("meminfo overflow\n");
1013f65ce4dSChris Zankel 			sysmem.bank[sysmem.nr_banks].start = end;
1023f65ce4dSChris Zankel 			sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
1033f65ce4dSChris Zankel 			sysmem.nr_banks++;
1043f65ce4dSChris Zankel 		}
1053f65ce4dSChris Zankel 		sysmem.bank[i].end = start;
1063f65ce4dSChris Zankel 	} else {
1073f65ce4dSChris Zankel 		if (end < sysmem.bank[i].end)
1083f65ce4dSChris Zankel 			sysmem.bank[i].start = end;
1093f65ce4dSChris Zankel 		else {
1103f65ce4dSChris Zankel 			/* remove entry */
1113f65ce4dSChris Zankel 			sysmem.nr_banks--;
1123f65ce4dSChris Zankel 			sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
1133f65ce4dSChris Zankel 			sysmem.bank[i].end   = sysmem.bank[sysmem.nr_banks].end;
1143f65ce4dSChris Zankel 		}
1153f65ce4dSChris Zankel 	}
1163f65ce4dSChris Zankel 	return -1;
1173f65ce4dSChris Zankel }
1183f65ce4dSChris Zankel 
1193f65ce4dSChris Zankel 
1203f65ce4dSChris Zankel /*
1213f65ce4dSChris Zankel  * Initialize the bootmem system and give it all the memory we have available.
1223f65ce4dSChris Zankel  */
1233f65ce4dSChris Zankel 
1243f65ce4dSChris Zankel void __init bootmem_init(void)
1253f65ce4dSChris Zankel {
1263f65ce4dSChris Zankel 	unsigned long pfn;
1273f65ce4dSChris Zankel 	unsigned long bootmap_start, bootmap_size;
1283f65ce4dSChris Zankel 	int i;
1293f65ce4dSChris Zankel 
1303f65ce4dSChris Zankel 	max_low_pfn = max_pfn = 0;
1313f65ce4dSChris Zankel 	min_low_pfn = ~0;
1323f65ce4dSChris Zankel 
1333f65ce4dSChris Zankel 	for (i=0; i < sysmem.nr_banks; i++) {
1343f65ce4dSChris Zankel 		pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
1353f65ce4dSChris Zankel 		if (pfn < min_low_pfn)
1363f65ce4dSChris Zankel 			min_low_pfn = pfn;
1373f65ce4dSChris Zankel 		pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
1383f65ce4dSChris Zankel 		if (pfn > max_pfn)
1393f65ce4dSChris Zankel 			max_pfn = pfn;
1403f65ce4dSChris Zankel 	}
1413f65ce4dSChris Zankel 
1423f65ce4dSChris Zankel 	if (min_low_pfn > max_pfn)
1433f65ce4dSChris Zankel 		panic("No memory found!\n");
1443f65ce4dSChris Zankel 
1453f65ce4dSChris Zankel 	max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ?
1463f65ce4dSChris Zankel 		max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT;
1473f65ce4dSChris Zankel 
1483f65ce4dSChris Zankel 	/* Find an area to use for the bootmem bitmap. */
1493f65ce4dSChris Zankel 
1503f65ce4dSChris Zankel 	bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT;
1513f65ce4dSChris Zankel 	bootmap_start = ~0;
1523f65ce4dSChris Zankel 
1533f65ce4dSChris Zankel 	for (i=0; i<sysmem.nr_banks; i++)
1543f65ce4dSChris Zankel 		if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) {
1553f65ce4dSChris Zankel 			bootmap_start = sysmem.bank[i].start;
1563f65ce4dSChris Zankel 			break;
1573f65ce4dSChris Zankel 		}
1583f65ce4dSChris Zankel 
1593f65ce4dSChris Zankel 	if (bootmap_start == ~0UL)
1603f65ce4dSChris Zankel 		panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
1613f65ce4dSChris Zankel 
1623f65ce4dSChris Zankel 	/* Reserve the bootmem bitmap area */
1633f65ce4dSChris Zankel 
1643f65ce4dSChris Zankel 	mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
1653f65ce4dSChris Zankel 	bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn,
1663f65ce4dSChris Zankel 					 bootmap_start >> PAGE_SHIFT,
1673f65ce4dSChris Zankel 					 max_low_pfn);
1683f65ce4dSChris Zankel 
1693f65ce4dSChris Zankel 	/* Add all remaining memory pieces into the bootmem map */
1703f65ce4dSChris Zankel 
1713f65ce4dSChris Zankel 	for (i=0; i<sysmem.nr_banks; i++)
1723f65ce4dSChris Zankel 		free_bootmem(sysmem.bank[i].start,
1733f65ce4dSChris Zankel 			     sysmem.bank[i].end - sysmem.bank[i].start);
1743f65ce4dSChris Zankel 
1753f65ce4dSChris Zankel }
1763f65ce4dSChris Zankel 
1773f65ce4dSChris Zankel 
1783f65ce4dSChris Zankel void __init paging_init(void)
1793f65ce4dSChris Zankel {
1803f65ce4dSChris Zankel 	unsigned long zones_size[MAX_NR_ZONES];
1813f65ce4dSChris Zankel 	int i;
1823f65ce4dSChris Zankel 
1833f65ce4dSChris Zankel 	/* All pages are DMA-able, so we put them all in the DMA zone. */
1843f65ce4dSChris Zankel 
1853f65ce4dSChris Zankel 	zones_size[ZONE_DMA] = max_low_pfn;
1863f65ce4dSChris Zankel 	for (i = 1; i < MAX_NR_ZONES; i++)
1873f65ce4dSChris Zankel 		zones_size[i] = 0;
1883f65ce4dSChris Zankel 
1893f65ce4dSChris Zankel #ifdef CONFIG_HIGHMEM
1903f65ce4dSChris Zankel 	zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
1913f65ce4dSChris Zankel #endif
1923f65ce4dSChris Zankel 
1933f65ce4dSChris Zankel 	/* Initialize the kernel's page tables. */
1943f65ce4dSChris Zankel 
1953f65ce4dSChris Zankel 	memset(swapper_pg_dir, 0, PAGE_SIZE);
1963f65ce4dSChris Zankel 
1973f65ce4dSChris Zankel 	free_area_init(zones_size);
1983f65ce4dSChris Zankel }
1993f65ce4dSChris Zankel 
2003f65ce4dSChris Zankel /*
2013f65ce4dSChris Zankel  * Flush the mmu and reset associated register to default values.
2023f65ce4dSChris Zankel  */
2033f65ce4dSChris Zankel 
2043f65ce4dSChris Zankel void __init init_mmu (void)
2053f65ce4dSChris Zankel {
2063f65ce4dSChris Zankel 	/* Writing zeros to the <t>TLBCFG special registers ensure
2073f65ce4dSChris Zankel 	 * that valid values exist in the register.  For existing
2083f65ce4dSChris Zankel 	 * PGSZID<w> fields, zero selects the first element of the
2093f65ce4dSChris Zankel 	 * page-size array.  For nonexistant PGSZID<w> fields, zero is
2103f65ce4dSChris Zankel 	 * the best value to write.  Also, when changing PGSZID<w>
2113f65ce4dSChris Zankel 	 * fields, the corresponding TLB must be flushed.
2123f65ce4dSChris Zankel 	 */
2133f65ce4dSChris Zankel 	set_itlbcfg_register (0);
2143f65ce4dSChris Zankel 	set_dtlbcfg_register (0);
2153f65ce4dSChris Zankel 	flush_tlb_all ();
2163f65ce4dSChris Zankel 
2173f65ce4dSChris Zankel 	/* Set rasid register to a known value. */
2183f65ce4dSChris Zankel 
2193f65ce4dSChris Zankel 	set_rasid_register (ASID_ALL_RESERVED);
2203f65ce4dSChris Zankel 
2213f65ce4dSChris Zankel 	/* Set PTEVADDR special register to the start of the page
2223f65ce4dSChris Zankel 	 * table, which is in kernel mappable space (ie. not
2233f65ce4dSChris Zankel 	 * statically mapped).  This register's value is undefined on
2243f65ce4dSChris Zankel 	 * reset.
2253f65ce4dSChris Zankel 	 */
2263f65ce4dSChris Zankel 	set_ptevaddr_register (PGTABLE_START);
2273f65ce4dSChris Zankel }
2283f65ce4dSChris Zankel 
2293f65ce4dSChris Zankel /*
2303f65ce4dSChris Zankel  * Initialize memory pages.
2313f65ce4dSChris Zankel  */
2323f65ce4dSChris Zankel 
2333f65ce4dSChris Zankel void __init mem_init(void)
2343f65ce4dSChris Zankel {
2353f65ce4dSChris Zankel 	unsigned long codesize, reservedpages, datasize, initsize;
2363f65ce4dSChris Zankel 	unsigned long highmemsize, tmp, ram;
2373f65ce4dSChris Zankel 
2383f65ce4dSChris Zankel 	max_mapnr = num_physpages = max_low_pfn;
2393f65ce4dSChris Zankel 	high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
2403f65ce4dSChris Zankel 	highmemsize = 0;
2413f65ce4dSChris Zankel 
242288a60cfSChris Zankel #ifdef CONFIG_HIGHMEM
2433f65ce4dSChris Zankel #error HIGHGMEM not implemented in init.c
2443f65ce4dSChris Zankel #endif
2453f65ce4dSChris Zankel 
2463f65ce4dSChris Zankel 	totalram_pages += free_all_bootmem();
2473f65ce4dSChris Zankel 
2483f65ce4dSChris Zankel 	reservedpages = ram = 0;
2493f65ce4dSChris Zankel 	for (tmp = 0; tmp < max_low_pfn; tmp++) {
2503f65ce4dSChris Zankel 		ram++;
2513f65ce4dSChris Zankel 		if (PageReserved(mem_map+tmp))
2523f65ce4dSChris Zankel 			reservedpages++;
2533f65ce4dSChris Zankel 	}
2543f65ce4dSChris Zankel 
2553f65ce4dSChris Zankel 	codesize =  (unsigned long) &_etext - (unsigned long) &_ftext;
2563f65ce4dSChris Zankel 	datasize =  (unsigned long) &_edata - (unsigned long) &_fdata;
2573f65ce4dSChris Zankel 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
2583f65ce4dSChris Zankel 
2593f65ce4dSChris Zankel 	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
2603f65ce4dSChris Zankel 	       "%ldk data, %ldk init %ldk highmem)\n",
2613f65ce4dSChris Zankel 	       (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
2623f65ce4dSChris Zankel 	       ram << (PAGE_SHIFT-10),
2633f65ce4dSChris Zankel 	       codesize >> 10,
2643f65ce4dSChris Zankel 	       reservedpages << (PAGE_SHIFT-10),
2653f65ce4dSChris Zankel 	       datasize >> 10,
2663f65ce4dSChris Zankel 	       initsize >> 10,
2673f65ce4dSChris Zankel 	       highmemsize >> 10);
2683f65ce4dSChris Zankel }
2693f65ce4dSChris Zankel 
2703f65ce4dSChris Zankel void
2713f65ce4dSChris Zankel free_reserved_mem(void *start, void *end)
2723f65ce4dSChris Zankel {
2733f65ce4dSChris Zankel 	for (; start < end; start += PAGE_SIZE) {
2743f65ce4dSChris Zankel 		ClearPageReserved(virt_to_page(start));
2753f65ce4dSChris Zankel 		set_page_count(virt_to_page(start), 1);
2763f65ce4dSChris Zankel 		free_page((unsigned long)start);
2773f65ce4dSChris Zankel 		totalram_pages++;
2783f65ce4dSChris Zankel 	}
2793f65ce4dSChris Zankel }
2803f65ce4dSChris Zankel 
2813f65ce4dSChris Zankel #ifdef CONFIG_BLK_DEV_INITRD
2823f65ce4dSChris Zankel extern int initrd_is_mapped;
2833f65ce4dSChris Zankel 
2843f65ce4dSChris Zankel void free_initrd_mem(unsigned long start, unsigned long end)
2853f65ce4dSChris Zankel {
2863f65ce4dSChris Zankel 	if (initrd_is_mapped) {
2873f65ce4dSChris Zankel 		free_reserved_mem((void*)start, (void*)end);
2883f65ce4dSChris Zankel 		printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
2893f65ce4dSChris Zankel 	}
2903f65ce4dSChris Zankel }
2913f65ce4dSChris Zankel #endif
2923f65ce4dSChris Zankel 
2933f65ce4dSChris Zankel void free_initmem(void)
2943f65ce4dSChris Zankel {
2953f65ce4dSChris Zankel 	free_reserved_mem(&__init_begin, &__init_end);
2963f65ce4dSChris Zankel 	printk("Freeing unused kernel memory: %dk freed\n",
2973f65ce4dSChris Zankel 	       (&__init_end - &__init_begin) >> 10);
2983f65ce4dSChris Zankel }
2993f65ce4dSChris Zankel 
3003f65ce4dSChris Zankel void show_mem(void)
3013f65ce4dSChris Zankel {
3023f65ce4dSChris Zankel 	int i, free = 0, total = 0, reserved = 0;
3033f65ce4dSChris Zankel 	int shared = 0, cached = 0;
3043f65ce4dSChris Zankel 
3053f65ce4dSChris Zankel 	printk("Mem-info:\n");
3063f65ce4dSChris Zankel 	show_free_areas();
3073f65ce4dSChris Zankel 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
3083f65ce4dSChris Zankel 	i = max_mapnr;
3093f65ce4dSChris Zankel 	while (i-- > 0) {
3103f65ce4dSChris Zankel 		total++;
3113f65ce4dSChris Zankel 		if (PageReserved(mem_map+i))
3123f65ce4dSChris Zankel 			reserved++;
3133f65ce4dSChris Zankel 		else if (PageSwapCache(mem_map+i))
3143f65ce4dSChris Zankel 			cached++;
3153f65ce4dSChris Zankel 		else if (!page_count(mem_map + i))
3163f65ce4dSChris Zankel 			free++;
3173f65ce4dSChris Zankel 		else
3183f65ce4dSChris Zankel 			shared += page_count(mem_map + i) - 1;
3193f65ce4dSChris Zankel 	}
3203f65ce4dSChris Zankel 	printk("%d pages of RAM\n", total);
3213f65ce4dSChris Zankel 	printk("%d reserved pages\n", reserved);
3223f65ce4dSChris Zankel 	printk("%d pages shared\n", shared);
3233f65ce4dSChris Zankel 	printk("%d pages swap cached\n",cached);
3243f65ce4dSChris Zankel 	printk("%d free pages\n", free);
3253f65ce4dSChris Zankel }
3263f65ce4dSChris Zankel 
3273f65ce4dSChris Zankel /* ------------------------------------------------------------------------- */
3283f65ce4dSChris Zankel 
3293f65ce4dSChris Zankel #if (DCACHE_WAY_SIZE > PAGE_SIZE)
3303f65ce4dSChris Zankel 
3313f65ce4dSChris Zankel /*
3323f65ce4dSChris Zankel  * With cache aliasing, the page color of the page in kernel space and user
3333f65ce4dSChris Zankel  * space might mismatch. We temporarily map the page to a different virtual
3343f65ce4dSChris Zankel  * address with the same color and clear the page there.
3353f65ce4dSChris Zankel  */
3363f65ce4dSChris Zankel 
3373f65ce4dSChris Zankel void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page)
3383f65ce4dSChris Zankel {
3393f65ce4dSChris Zankel 
3403f65ce4dSChris Zankel   	/*  There shouldn't be any entries for this page. */
3413f65ce4dSChris Zankel 
3423f65ce4dSChris Zankel 	__flush_invalidate_dcache_page_phys(__pa(page_address(page)));
3433f65ce4dSChris Zankel 
3443f65ce4dSChris Zankel 	if (!PAGE_COLOR_EQ(vaddr, kaddr)) {
3453f65ce4dSChris Zankel 		unsigned long v, p;
3463f65ce4dSChris Zankel 
3473f65ce4dSChris Zankel 		/* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */
3483f65ce4dSChris Zankel 
3493f65ce4dSChris Zankel 		spin_lock(&tlb_lock);
3503f65ce4dSChris Zankel 
3513f65ce4dSChris Zankel 		p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL)));
3523f65ce4dSChris Zankel 		kaddr = (void*)PAGE_COLOR_MAP0(vaddr);
3533f65ce4dSChris Zankel 		v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0;
3543f65ce4dSChris Zankel 		__asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v));
3553f65ce4dSChris Zankel 
3563f65ce4dSChris Zankel 		clear_page(kaddr);
3573f65ce4dSChris Zankel 
3583f65ce4dSChris Zankel 		spin_unlock(&tlb_lock);
3593f65ce4dSChris Zankel 	} else {
3603f65ce4dSChris Zankel 		clear_page(kaddr);
3613f65ce4dSChris Zankel 	}
3623f65ce4dSChris Zankel 
3633f65ce4dSChris Zankel 	/* We need to make sure that i$ and d$ are coherent. */
3643f65ce4dSChris Zankel 
3653f65ce4dSChris Zankel 	clear_bit(PG_cache_clean, &page->flags);
3663f65ce4dSChris Zankel }
3673f65ce4dSChris Zankel 
3683f65ce4dSChris Zankel /*
3693f65ce4dSChris Zankel  * With cache aliasing, we have to make sure that the page color of the page
3703f65ce4dSChris Zankel  * in kernel space matches that of the virtual user address before we read
3713f65ce4dSChris Zankel  * the page. If the page color differ, we create a temporary DTLB entry with
3723f65ce4dSChris Zankel  * the corrent page color and use this 'temporary' address as the source.
3733f65ce4dSChris Zankel  * We then use the same approach as in clear_user_page and copy the data
3743f65ce4dSChris Zankel  * to the kernel space and clear the PG_cache_clean bit to synchronize caches
3753f65ce4dSChris Zankel  * later.
3763f65ce4dSChris Zankel  *
3773f65ce4dSChris Zankel  * Note:
3783f65ce4dSChris Zankel  * Instead of using another 'way' for the temporary DTLB entry, we could
3793f65ce4dSChris Zankel  * probably use the same entry that points to the kernel address (after
3803f65ce4dSChris Zankel  * saving the original value and restoring it when we are done).
3813f65ce4dSChris Zankel  */
3823f65ce4dSChris Zankel 
3833f65ce4dSChris Zankel void copy_user_page(void* to, void* from, unsigned long vaddr,
3843f65ce4dSChris Zankel     		    struct page* to_page)
3853f65ce4dSChris Zankel {
3863f65ce4dSChris Zankel 	/* There shouldn't be any entries for the new page. */
3873f65ce4dSChris Zankel 
3883f65ce4dSChris Zankel 	__flush_invalidate_dcache_page_phys(__pa(page_address(to_page)));
3893f65ce4dSChris Zankel 
3903f65ce4dSChris Zankel 	spin_lock(&tlb_lock);
3913f65ce4dSChris Zankel 
3923f65ce4dSChris Zankel 	if (!PAGE_COLOR_EQ(vaddr, from)) {
3933f65ce4dSChris Zankel 		unsigned long v, p, t;
3943f65ce4dSChris Zankel 
3953f65ce4dSChris Zankel 		__asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1"
3963f65ce4dSChris Zankel 				      : "=a"(p), "=a"(t) : "a"(from));
3973f65ce4dSChris Zankel 		from = (void*)PAGE_COLOR_MAP0(vaddr);
3983f65ce4dSChris Zankel 		v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0;
3993f65ce4dSChris Zankel 		__asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
4003f65ce4dSChris Zankel 	}
4013f65ce4dSChris Zankel 
4023f65ce4dSChris Zankel 	if (!PAGE_COLOR_EQ(vaddr, to)) {
4033f65ce4dSChris Zankel 		unsigned long v, p;
4043f65ce4dSChris Zankel 
4053f65ce4dSChris Zankel 		p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL)));
4063f65ce4dSChris Zankel 		to = (void*)PAGE_COLOR_MAP1(vaddr);
4073f65ce4dSChris Zankel 		v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1;
4083f65ce4dSChris Zankel 		__asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
4093f65ce4dSChris Zankel 	}
4103f65ce4dSChris Zankel 	copy_page(to, from);
4113f65ce4dSChris Zankel 
4123f65ce4dSChris Zankel 	spin_unlock(&tlb_lock);
4133f65ce4dSChris Zankel 
4143f65ce4dSChris Zankel 	/* We need to make sure that i$ and d$ are coherent. */
4153f65ce4dSChris Zankel 
4163f65ce4dSChris Zankel 	clear_bit(PG_cache_clean, &to_page->flags);
4173f65ce4dSChris Zankel }
4183f65ce4dSChris Zankel 
4193f65ce4dSChris Zankel 
4203f65ce4dSChris Zankel 
4213f65ce4dSChris Zankel /*
4223f65ce4dSChris Zankel  * Any time the kernel writes to a user page cache page, or it is about to
4233f65ce4dSChris Zankel  * read from a page cache page this routine is called.
4243f65ce4dSChris Zankel  *
4253f65ce4dSChris Zankel  * Note:
4263f65ce4dSChris Zankel  * The kernel currently only provides one architecture bit in the page
4273f65ce4dSChris Zankel  * flags that we use for I$/D$ coherency. Maybe, in future, we can
4283f65ce4dSChris Zankel  * use a sepearte bit for deferred dcache aliasing:
4293f65ce4dSChris Zankel  * If the page is not mapped yet, we only need to set a flag,
4303f65ce4dSChris Zankel  * if mapped, we need to invalidate the page.
4313f65ce4dSChris Zankel  */
4323f65ce4dSChris Zankel // FIXME: we probably need this for WB caches not only for Page Coloring..
4333f65ce4dSChris Zankel 
4343f65ce4dSChris Zankel void flush_dcache_page(struct page *page)
4353f65ce4dSChris Zankel {
4363f65ce4dSChris Zankel 	unsigned long addr = __pa(page_address(page));
4373f65ce4dSChris Zankel 	struct address_space *mapping = page_mapping(page);
4383f65ce4dSChris Zankel 
4393f65ce4dSChris Zankel 	__flush_invalidate_dcache_page_phys(addr);
4403f65ce4dSChris Zankel 
4413f65ce4dSChris Zankel 	if (!test_bit(PG_cache_clean, &page->flags))
4423f65ce4dSChris Zankel 		return;
4433f65ce4dSChris Zankel 
4443f65ce4dSChris Zankel 	/* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/
4453f65ce4dSChris Zankel #if 0
4463f65ce4dSChris Zankel 	if (mapping && !mapping_mapped(mapping))
4473f65ce4dSChris Zankel 		clear_bit(PG_cache_clean, &page->flags);
4483f65ce4dSChris Zankel 	else
4493f65ce4dSChris Zankel #endif
4503f65ce4dSChris Zankel 		__invalidate_icache_page_phys(addr);
4513f65ce4dSChris Zankel }
4523f65ce4dSChris Zankel 
4533f65ce4dSChris Zankel void flush_cache_range(struct vm_area_struct* vma, unsigned long s,
4543f65ce4dSChris Zankel 		       unsigned long e)
4553f65ce4dSChris Zankel {
4563f65ce4dSChris Zankel 	__flush_invalidate_cache_all();
4573f65ce4dSChris Zankel }
4583f65ce4dSChris Zankel 
4593f65ce4dSChris Zankel void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
4603f65ce4dSChris Zankel     		      unsigned long pfn)
4613f65ce4dSChris Zankel {
4623f65ce4dSChris Zankel 	struct page *page = pfn_to_page(pfn);
4633f65ce4dSChris Zankel 
4643f65ce4dSChris Zankel 	/* Remove any entry for the old mapping. */
4653f65ce4dSChris Zankel 
4663f65ce4dSChris Zankel 	if (current->active_mm == vma->vm_mm) {
4673f65ce4dSChris Zankel 		unsigned long addr = __pa(page_address(page));
4683f65ce4dSChris Zankel 		__flush_invalidate_dcache_page_phys(addr);
4693f65ce4dSChris Zankel 		if ((vma->vm_flags & VM_EXEC) != 0)
4703f65ce4dSChris Zankel 			__invalidate_icache_page_phys(addr);
4713f65ce4dSChris Zankel 	} else {
4723f65ce4dSChris Zankel 		BUG();
4733f65ce4dSChris Zankel 	}
4743f65ce4dSChris Zankel }
4753f65ce4dSChris Zankel 
4763f65ce4dSChris Zankel #endif	/* (DCACHE_WAY_SIZE > PAGE_SIZE) */
4773f65ce4dSChris Zankel 
4783f65ce4dSChris Zankel 
4793f65ce4dSChris Zankel pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr)
4803f65ce4dSChris Zankel {
4813f65ce4dSChris Zankel 	pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0);
4823f65ce4dSChris Zankel 	if (likely(pte)) {
4833f65ce4dSChris Zankel 	       	pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET);
4843f65ce4dSChris Zankel 		int i;
4853f65ce4dSChris Zankel 		for (i = 0; i < 1024; i++, ptep++)
4863f65ce4dSChris Zankel 			pte_clear(mm, addr, ptep);
4873f65ce4dSChris Zankel 	}
4883f65ce4dSChris Zankel 	return pte;
4893f65ce4dSChris Zankel }
4903f65ce4dSChris Zankel 
4913f65ce4dSChris Zankel struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr)
4923f65ce4dSChris Zankel {
4933f65ce4dSChris Zankel 	struct page *page;
4943f65ce4dSChris Zankel 
4953f65ce4dSChris Zankel 	page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0);
4963f65ce4dSChris Zankel 
4973f65ce4dSChris Zankel 	if (likely(page)) {
4983f65ce4dSChris Zankel 		pte_t* ptep = kmap_atomic(page, KM_USER0);
4993f65ce4dSChris Zankel 		int i;
5003f65ce4dSChris Zankel 
5013f65ce4dSChris Zankel 		for (i = 0; i < 1024; i++, ptep++)
5023f65ce4dSChris Zankel 			pte_clear(mm, addr, ptep);
5033f65ce4dSChris Zankel 
5043f65ce4dSChris Zankel 		kunmap_atomic(ptep, KM_USER0);
5053f65ce4dSChris Zankel 	}
5063f65ce4dSChris Zankel 	return page;
5073f65ce4dSChris Zankel }
5083f65ce4dSChris Zankel 
5093f65ce4dSChris Zankel 
5103f65ce4dSChris Zankel /*
5113f65ce4dSChris Zankel  * Handle D$/I$ coherency.
5123f65ce4dSChris Zankel  *
5133f65ce4dSChris Zankel  * Note:
5143f65ce4dSChris Zankel  * We only have one architecture bit for the page flags, so we cannot handle
5153f65ce4dSChris Zankel  * cache aliasing, yet.
5163f65ce4dSChris Zankel  */
5173f65ce4dSChris Zankel 
5183f65ce4dSChris Zankel void
5193f65ce4dSChris Zankel update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
5203f65ce4dSChris Zankel {
5213f65ce4dSChris Zankel 	unsigned long pfn = pte_pfn(pte);
5223f65ce4dSChris Zankel 	struct page *page;
5233f65ce4dSChris Zankel 	unsigned long vaddr = addr & PAGE_MASK;
5243f65ce4dSChris Zankel 
5253f65ce4dSChris Zankel 	if (!pfn_valid(pfn))
5263f65ce4dSChris Zankel 		return;
5273f65ce4dSChris Zankel 
5283f65ce4dSChris Zankel 	page = pfn_to_page(pfn);
5293f65ce4dSChris Zankel 
5303f65ce4dSChris Zankel 	invalidate_itlb_mapping(addr);
5313f65ce4dSChris Zankel 	invalidate_dtlb_mapping(addr);
5323f65ce4dSChris Zankel 
5333f65ce4dSChris Zankel 	/* We have a new mapping. Use it. */
5343f65ce4dSChris Zankel 
5353f65ce4dSChris Zankel 	write_dtlb_entry(pte, dtlb_probe(addr));
5363f65ce4dSChris Zankel 
5373f65ce4dSChris Zankel 	/* If the processor can execute from this page, synchronize D$/I$. */
5383f65ce4dSChris Zankel 
5393f65ce4dSChris Zankel 	if ((vma->vm_flags & VM_EXEC) != 0) {
5403f65ce4dSChris Zankel 
5413f65ce4dSChris Zankel 		write_itlb_entry(pte, itlb_probe(addr));
5423f65ce4dSChris Zankel 
5433f65ce4dSChris Zankel 		/* Synchronize caches, if not clean. */
5443f65ce4dSChris Zankel 
5453f65ce4dSChris Zankel 		if (!test_and_set_bit(PG_cache_clean, &page->flags)) {
5463f65ce4dSChris Zankel 			__flush_dcache_page(vaddr);
5473f65ce4dSChris Zankel 			__invalidate_icache_page(vaddr);
5483f65ce4dSChris Zankel 		}
5493f65ce4dSChris Zankel 	}
5503f65ce4dSChris Zankel }
5513f65ce4dSChris Zankel 
552