1 /* 2 * SPU local store allocation routines 3 * 4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2, or (at your option) 9 * any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 #undef DEBUG 22 23 #include <linux/kernel.h> 24 #include <linux/mm.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/spu.h> 28 #include <asm/spu_csa.h> 29 #include <asm/mmu.h> 30 31 static int spu_alloc_lscsa_std(struct spu_state *csa) 32 { 33 struct spu_lscsa *lscsa; 34 unsigned char *p; 35 36 lscsa = vmalloc(sizeof(struct spu_lscsa)); 37 if (!lscsa) 38 return -ENOMEM; 39 memset(lscsa, 0, sizeof(struct spu_lscsa)); 40 csa->lscsa = lscsa; 41 42 /* Set LS pages reserved to allow for user-space mapping. */ 43 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE) 44 SetPageReserved(vmalloc_to_page(p)); 45 46 return 0; 47 } 48 49 static void spu_free_lscsa_std(struct spu_state *csa) 50 { 51 /* Clear reserved bit before vfree. */ 52 unsigned char *p; 53 54 if (csa->lscsa == NULL) 55 return; 56 57 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE) 58 ClearPageReserved(vmalloc_to_page(p)); 59 60 vfree(csa->lscsa); 61 } 62 63 #ifdef CONFIG_SPU_FS_64K_LS 64 65 #define SPU_64K_PAGE_SHIFT 16 66 #define SPU_64K_PAGE_ORDER (SPU_64K_PAGE_SHIFT - PAGE_SHIFT) 67 #define SPU_64K_PAGE_COUNT (1ul << SPU_64K_PAGE_ORDER) 68 69 int spu_alloc_lscsa(struct spu_state *csa) 70 { 71 struct page **pgarray; 72 unsigned char *p; 73 int i, j, n_4k; 74 75 /* Check availability of 64K pages */ 76 if (mmu_psize_defs[MMU_PAGE_64K].shift == 0) 77 goto fail; 78 79 csa->use_big_pages = 1; 80 81 pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n", 82 csa); 83 84 /* First try to allocate our 64K pages. We need 5 of them 85 * with the current implementation. In the future, we should try 86 * to separate the lscsa with the actual local store image, thus 87 * allowing us to require only 4 64K pages per context 88 */ 89 for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) { 90 /* XXX This is likely to fail, we should use a special pool 91 * similiar to what hugetlbfs does. 92 */ 93 csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL, 94 SPU_64K_PAGE_ORDER); 95 if (csa->lscsa_pages[i] == NULL) 96 goto fail; 97 } 98 99 pr_debug(" success ! creating vmap...\n"); 100 101 /* Now we need to create a vmalloc mapping of these for the kernel 102 * and SPU context switch code to use. Currently, we stick to a 103 * normal kernel vmalloc mapping, which in our case will be 4K 104 */ 105 n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES; 106 pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL); 107 if (pgarray == NULL) 108 goto fail; 109 for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) 110 for (j = 0; j < SPU_64K_PAGE_COUNT; j++) 111 /* We assume all the struct page's are contiguous 112 * which should be hopefully the case for an order 4 113 * allocation.. 114 */ 115 pgarray[i * SPU_64K_PAGE_COUNT + j] = 116 csa->lscsa_pages[i] + j; 117 csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL); 118 kfree(pgarray); 119 if (csa->lscsa == NULL) 120 goto fail; 121 122 memset(csa->lscsa, 0, sizeof(struct spu_lscsa)); 123 124 /* Set LS pages reserved to allow for user-space mapping. 125 * 126 * XXX isn't that a bit obsolete ? I think we should just 127 * make sure the page count is high enough. Anyway, won't harm 128 * for now 129 */ 130 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE) 131 SetPageReserved(vmalloc_to_page(p)); 132 133 pr_debug(" all good !\n"); 134 135 return 0; 136 fail: 137 pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n"); 138 spu_free_lscsa(csa); 139 return spu_alloc_lscsa_std(csa); 140 } 141 142 void spu_free_lscsa(struct spu_state *csa) 143 { 144 unsigned char *p; 145 int i; 146 147 if (!csa->use_big_pages) { 148 spu_free_lscsa_std(csa); 149 return; 150 } 151 csa->use_big_pages = 0; 152 153 if (csa->lscsa == NULL) 154 goto free_pages; 155 156 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE) 157 ClearPageReserved(vmalloc_to_page(p)); 158 159 vunmap(csa->lscsa); 160 csa->lscsa = NULL; 161 162 free_pages: 163 164 for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) 165 if (csa->lscsa_pages[i]) 166 __free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER); 167 } 168 169 #else /* CONFIG_SPU_FS_64K_LS */ 170 171 int spu_alloc_lscsa(struct spu_state *csa) 172 { 173 return spu_alloc_lscsa_std(csa); 174 } 175 176 void spu_free_lscsa(struct spu_state *csa) 177 { 178 spu_free_lscsa_std(csa); 179 } 180 181 #endif /* !defined(CONFIG_SPU_FS_64K_LS) */ 182