1f14f75b8SJes Sorensen /* 2*929f9727SDean Nelson * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved. 3f14f75b8SJes Sorensen * 4f14f75b8SJes Sorensen * This program is free software; you can redistribute it and/or modify it 5f14f75b8SJes Sorensen * under the terms of version 2 of the GNU General Public License 6f14f75b8SJes Sorensen * as published by the Free Software Foundation. 7f14f75b8SJes Sorensen * 8f14f75b8SJes Sorensen * A simple uncached page allocator using the generic allocator. This 9f14f75b8SJes Sorensen * allocator first utilizes the spare (spill) pages found in the EFI 10f14f75b8SJes Sorensen * memmap and will then start converting cached pages to uncached ones 11f14f75b8SJes Sorensen * at a granule at a time. Node awareness is implemented by having a 12f14f75b8SJes Sorensen * pool of pages per node. 13f14f75b8SJes Sorensen */ 14f14f75b8SJes Sorensen 15f14f75b8SJes Sorensen #include <linux/types.h> 16f14f75b8SJes Sorensen #include <linux/kernel.h> 17f14f75b8SJes Sorensen #include <linux/module.h> 18f14f75b8SJes Sorensen #include <linux/init.h> 19f14f75b8SJes Sorensen #include <linux/errno.h> 20f14f75b8SJes Sorensen #include <linux/string.h> 21f14f75b8SJes Sorensen #include <linux/slab.h> 22f14f75b8SJes Sorensen #include <linux/efi.h> 23f14f75b8SJes Sorensen #include <linux/genalloc.h> 24f14f75b8SJes Sorensen #include <asm/page.h> 25f14f75b8SJes Sorensen #include <asm/pal.h> 26f14f75b8SJes Sorensen #include <asm/system.h> 27f14f75b8SJes Sorensen #include <asm/pgtable.h> 28f14f75b8SJes Sorensen #include <asm/atomic.h> 29f14f75b8SJes Sorensen #include <asm/tlbflush.h> 30f14f75b8SJes Sorensen #include <asm/sn/arch.h> 31f14f75b8SJes Sorensen 32f14f75b8SJes Sorensen 33*929f9727SDean Nelson extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); 34f14f75b8SJes Sorensen 35f14f75b8SJes Sorensen #define MAX_UNCACHED_GRANULES 5 36f14f75b8SJes Sorensen static int allocated_granules; 37f14f75b8SJes Sorensen 38f14f75b8SJes Sorensen struct gen_pool *uncached_pool[MAX_NUMNODES]; 39f14f75b8SJes Sorensen 40f14f75b8SJes Sorensen 41f14f75b8SJes Sorensen static void uncached_ipi_visibility(void *data) 42f14f75b8SJes Sorensen { 43f14f75b8SJes Sorensen int status; 44f14f75b8SJes Sorensen 45f14f75b8SJes Sorensen status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 46f14f75b8SJes Sorensen if ((status != PAL_VISIBILITY_OK) && 47f14f75b8SJes Sorensen (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) 48f14f75b8SJes Sorensen printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " 493bd7f017SJes Sorensen "CPU %i\n", status, raw_smp_processor_id()); 50f14f75b8SJes Sorensen } 51f14f75b8SJes Sorensen 52f14f75b8SJes Sorensen 53f14f75b8SJes Sorensen static void uncached_ipi_mc_drain(void *data) 54f14f75b8SJes Sorensen { 55f14f75b8SJes Sorensen int status; 56*929f9727SDean Nelson 57f14f75b8SJes Sorensen status = ia64_pal_mc_drain(); 58f14f75b8SJes Sorensen if (status) 59f14f75b8SJes Sorensen printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " 603bd7f017SJes Sorensen "CPU %i\n", status, raw_smp_processor_id()); 61f14f75b8SJes Sorensen } 62f14f75b8SJes Sorensen 63f14f75b8SJes Sorensen 64*929f9727SDean Nelson /* 65*929f9727SDean Nelson * Add a new chunk of uncached memory pages to the specified pool. 66*929f9727SDean Nelson * 67*929f9727SDean Nelson * @pool: pool to add new chunk of uncached memory to 68*929f9727SDean Nelson * @nid: node id of node to allocate memory from, or -1 69*929f9727SDean Nelson * 70*929f9727SDean Nelson * This is accomplished by first allocating a granule of cached memory pages 71*929f9727SDean Nelson * and then converting them to uncached memory pages. 72*929f9727SDean Nelson */ 73*929f9727SDean Nelson static int uncached_add_chunk(struct gen_pool *pool, int nid) 74f14f75b8SJes Sorensen { 75f14f75b8SJes Sorensen struct page *page; 76f14f75b8SJes Sorensen int status, i; 77*929f9727SDean Nelson unsigned long c_addr, uc_addr; 78f14f75b8SJes Sorensen 79f14f75b8SJes Sorensen if (allocated_granules >= MAX_UNCACHED_GRANULES) 80*929f9727SDean Nelson return -1; 81f14f75b8SJes Sorensen 82*929f9727SDean Nelson /* attempt to allocate a granule's worth of cached memory pages */ 83*929f9727SDean Nelson 84*929f9727SDean Nelson page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, 85f14f75b8SJes Sorensen IA64_GRANULE_SHIFT-PAGE_SHIFT); 86f14f75b8SJes Sorensen if (!page) 87*929f9727SDean Nelson return -1; 88*929f9727SDean Nelson 89*929f9727SDean Nelson /* convert the memory pages from cached to uncached */ 90*929f9727SDean Nelson 91*929f9727SDean Nelson c_addr = (unsigned long)page_address(page); 92*929f9727SDean Nelson uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; 93f14f75b8SJes Sorensen 94f14f75b8SJes Sorensen /* 95f14f75b8SJes Sorensen * There's a small race here where it's possible for someone to 96f14f75b8SJes Sorensen * access the page through /dev/mem halfway through the conversion 97f14f75b8SJes Sorensen * to uncached - not sure it's really worth bothering about 98f14f75b8SJes Sorensen */ 99f14f75b8SJes Sorensen for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) 100f14f75b8SJes Sorensen SetPageUncached(&page[i]); 101f14f75b8SJes Sorensen 102*929f9727SDean Nelson flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); 103f14f75b8SJes Sorensen 104f14f75b8SJes Sorensen status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 105f14f75b8SJes Sorensen if (!status) { 106f14f75b8SJes Sorensen status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); 107f14f75b8SJes Sorensen if (status) 108*929f9727SDean Nelson goto failed; 109f14f75b8SJes Sorensen } 110f14f75b8SJes Sorensen 111*929f9727SDean Nelson preempt_disable(); 112*929f9727SDean Nelson 113f14f75b8SJes Sorensen if (ia64_platform_is("sn2")) 114*929f9727SDean Nelson sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); 115f14f75b8SJes Sorensen else 116*929f9727SDean Nelson flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); 117*929f9727SDean Nelson 118*929f9727SDean Nelson /* flush the just introduced uncached translation from the TLB */ 119*929f9727SDean Nelson local_flush_tlb_all(); 120*929f9727SDean Nelson 121*929f9727SDean Nelson preempt_enable(); 122f14f75b8SJes Sorensen 123f14f75b8SJes Sorensen ia64_pal_mc_drain(); 124f14f75b8SJes Sorensen status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); 125f14f75b8SJes Sorensen if (status) 126*929f9727SDean Nelson goto failed; 127f14f75b8SJes Sorensen 128*929f9727SDean Nelson /* 129*929f9727SDean Nelson * The chunk of memory pages has been converted to uncached so now we 130*929f9727SDean Nelson * can add it to the pool. 131*929f9727SDean Nelson */ 132*929f9727SDean Nelson status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); 133*929f9727SDean Nelson if (status) 134*929f9727SDean Nelson goto failed; 135f14f75b8SJes Sorensen 136f14f75b8SJes Sorensen allocated_granules++; 137*929f9727SDean Nelson return 0; 138*929f9727SDean Nelson 139*929f9727SDean Nelson /* failed to convert or add the chunk so give it back to the kernel */ 140*929f9727SDean Nelson failed: 141*929f9727SDean Nelson for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) 142*929f9727SDean Nelson ClearPageUncached(&page[i]); 143*929f9727SDean Nelson 144*929f9727SDean Nelson free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); 145*929f9727SDean Nelson return -1; 146f14f75b8SJes Sorensen } 147f14f75b8SJes Sorensen 148f14f75b8SJes Sorensen 149f14f75b8SJes Sorensen /* 150f14f75b8SJes Sorensen * uncached_alloc_page 151f14f75b8SJes Sorensen * 152*929f9727SDean Nelson * @starting_nid: node id of node to start with, or -1 153*929f9727SDean Nelson * 154f14f75b8SJes Sorensen * Allocate 1 uncached page. Allocates on the requested node. If no 155f14f75b8SJes Sorensen * uncached pages are available on the requested node, roundrobin starting 156*929f9727SDean Nelson * with the next higher node. 157f14f75b8SJes Sorensen */ 158*929f9727SDean Nelson unsigned long uncached_alloc_page(int starting_nid) 159f14f75b8SJes Sorensen { 160*929f9727SDean Nelson unsigned long uc_addr; 161*929f9727SDean Nelson struct gen_pool *pool; 162*929f9727SDean Nelson int nid; 163f14f75b8SJes Sorensen 164*929f9727SDean Nelson if (unlikely(starting_nid >= MAX_NUMNODES)) 165*929f9727SDean Nelson return 0; 166f14f75b8SJes Sorensen 167*929f9727SDean Nelson if (starting_nid < 0) 168*929f9727SDean Nelson starting_nid = numa_node_id(); 169*929f9727SDean Nelson nid = starting_nid; 170f14f75b8SJes Sorensen 171*929f9727SDean Nelson do { 172*929f9727SDean Nelson if (!node_online(nid)) 173f14f75b8SJes Sorensen continue; 174*929f9727SDean Nelson pool = uncached_pool[nid]; 175*929f9727SDean Nelson if (pool == NULL) 176*929f9727SDean Nelson continue; 177*929f9727SDean Nelson do { 178*929f9727SDean Nelson uc_addr = gen_pool_alloc(pool, PAGE_SIZE); 179*929f9727SDean Nelson if (uc_addr != 0) 180*929f9727SDean Nelson return uc_addr; 181*929f9727SDean Nelson } while (uncached_add_chunk(pool, nid) == 0); 182f14f75b8SJes Sorensen 183*929f9727SDean Nelson } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); 184*929f9727SDean Nelson 185*929f9727SDean Nelson return 0; 186f14f75b8SJes Sorensen } 187f14f75b8SJes Sorensen EXPORT_SYMBOL(uncached_alloc_page); 188f14f75b8SJes Sorensen 189f14f75b8SJes Sorensen 190f14f75b8SJes Sorensen /* 191f14f75b8SJes Sorensen * uncached_free_page 192f14f75b8SJes Sorensen * 193*929f9727SDean Nelson * @uc_addr: uncached address of page to free 194*929f9727SDean Nelson * 195f14f75b8SJes Sorensen * Free a single uncached page. 196f14f75b8SJes Sorensen */ 197*929f9727SDean Nelson void uncached_free_page(unsigned long uc_addr) 198f14f75b8SJes Sorensen { 199*929f9727SDean Nelson int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); 200*929f9727SDean Nelson struct gen_pool *pool = uncached_pool[nid]; 201f14f75b8SJes Sorensen 202*929f9727SDean Nelson if (unlikely(pool == NULL)) 203*929f9727SDean Nelson return; 204f14f75b8SJes Sorensen 205*929f9727SDean Nelson if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET) 206*929f9727SDean Nelson panic("uncached_free_page invalid address %lx\n", uc_addr); 207f14f75b8SJes Sorensen 208*929f9727SDean Nelson gen_pool_free(pool, uc_addr, PAGE_SIZE); 209f14f75b8SJes Sorensen } 210f14f75b8SJes Sorensen EXPORT_SYMBOL(uncached_free_page); 211f14f75b8SJes Sorensen 212f14f75b8SJes Sorensen 213f14f75b8SJes Sorensen /* 214f14f75b8SJes Sorensen * uncached_build_memmap, 215f14f75b8SJes Sorensen * 216*929f9727SDean Nelson * @uc_start: uncached starting address of a chunk of uncached memory 217*929f9727SDean Nelson * @uc_end: uncached ending address of a chunk of uncached memory 218*929f9727SDean Nelson * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc()) 219*929f9727SDean Nelson * 220f14f75b8SJes Sorensen * Called at boot time to build a map of pages that can be used for 221f14f75b8SJes Sorensen * memory special operations. 222f14f75b8SJes Sorensen */ 223*929f9727SDean Nelson static int __init uncached_build_memmap(unsigned long uc_start, 224*929f9727SDean Nelson unsigned long uc_end, void *arg) 225f14f75b8SJes Sorensen { 226*929f9727SDean Nelson int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); 227*929f9727SDean Nelson struct gen_pool *pool = uncached_pool[nid]; 228*929f9727SDean Nelson size_t size = uc_end - uc_start; 229f14f75b8SJes Sorensen 230386d1d50SJohn Hawkes touch_softlockup_watchdog(); 231f14f75b8SJes Sorensen 232*929f9727SDean Nelson if (pool != NULL) { 233*929f9727SDean Nelson memset((char *)uc_start, 0, size); 234*929f9727SDean Nelson (void) gen_pool_add(pool, uc_start, size, nid); 235f14f75b8SJes Sorensen } 236f14f75b8SJes Sorensen return 0; 237f14f75b8SJes Sorensen } 238f14f75b8SJes Sorensen 239f14f75b8SJes Sorensen 240*929f9727SDean Nelson static int __init uncached_init(void) 241*929f9727SDean Nelson { 242*929f9727SDean Nelson int nid; 243f14f75b8SJes Sorensen 244*929f9727SDean Nelson for_each_online_node(nid) { 245*929f9727SDean Nelson uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid); 246f14f75b8SJes Sorensen } 247f14f75b8SJes Sorensen 248*929f9727SDean Nelson efi_memmap_walk_uc(uncached_build_memmap, NULL); 249f14f75b8SJes Sorensen return 0; 250f14f75b8SJes Sorensen } 251f14f75b8SJes Sorensen 252f14f75b8SJes Sorensen __initcall(uncached_init); 253