1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Google LLC 4 * Author: Quentin Perret <qperret@google.com> 5 */ 6 7 #include <asm/kvm_hyp.h> 8 #include <nvhe/gfp.h> 9 10 u64 __hyp_vmemmap; 11 12 /* 13 * Index the hyp_vmemmap to find a potential buddy page, but make no assumption 14 * about its current state. 15 * 16 * Example buddy-tree for a 4-pages physically contiguous pool: 17 * 18 * o : Page 3 19 * / 20 * o-o : Page 2 21 * / 22 * / o : Page 1 23 * / / 24 * o---o-o : Page 0 25 * Order 2 1 0 26 * 27 * Example of requests on this pool: 28 * __find_buddy_nocheck(pool, page 0, order 0) => page 1 29 * __find_buddy_nocheck(pool, page 0, order 1) => page 2 30 * __find_buddy_nocheck(pool, page 1, order 0) => page 0 31 * __find_buddy_nocheck(pool, page 2, order 0) => page 3 32 */ 33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, 34 struct hyp_page *p, 35 unsigned short order) 36 { 37 phys_addr_t addr = hyp_page_to_phys(p); 38 39 addr ^= (PAGE_SIZE << order); 40 41 /* 42 * Don't return a page outside the pool range -- it belongs to 43 * something else and may not be mapped in hyp_vmemmap. 44 */ 45 if (addr < pool->range_start || addr >= pool->range_end) 46 return NULL; 47 48 return hyp_phys_to_page(addr); 49 } 50 51 /* Find a buddy page currently available for allocation */ 52 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, 53 struct hyp_page *p, 54 unsigned short order) 55 { 56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); 57 58 if (!buddy || buddy->order != order || buddy->refcount) 59 return NULL; 60 61 return buddy; 62 63 } 64 65 /* 66 * Pages that are available for allocation are tracked in free-lists, so we use 67 * the pages themselves to store the list nodes to avoid wasting space. As the 68 * allocator always returns zeroed pages (which are zeroed on the hyp_put_page() 69 * path to optimize allocation speed), we also need to clean-up the list node in 70 * each page when we take it out of the list. 71 */ 72 static inline void page_remove_from_list(struct hyp_page *p) 73 { 74 struct list_head *node = hyp_page_to_virt(p); 75 76 __list_del_entry(node); 77 memset(node, 0, sizeof(*node)); 78 } 79 80 static inline void page_add_to_list(struct hyp_page *p, struct list_head *head) 81 { 82 struct list_head *node = hyp_page_to_virt(p); 83 84 INIT_LIST_HEAD(node); 85 list_add_tail(node, head); 86 } 87 88 static inline struct hyp_page *node_to_page(struct list_head *node) 89 { 90 return hyp_virt_to_page(node); 91 } 92 93 static void __hyp_attach_page(struct hyp_pool *pool, 94 struct hyp_page *p) 95 { 96 unsigned short order = p->order; 97 struct hyp_page *buddy; 98 99 memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order); 100 101 /* 102 * Only the first struct hyp_page of a high-order page (otherwise known 103 * as the 'head') should have p->order set. The non-head pages should 104 * have p->order = HYP_NO_ORDER. Here @p may no longer be the head 105 * after coallescing, so make sure to mark it HYP_NO_ORDER proactively. 106 */ 107 p->order = HYP_NO_ORDER; 108 for (; (order + 1) < pool->max_order; order++) { 109 buddy = __find_buddy_avail(pool, p, order); 110 if (!buddy) 111 break; 112 113 /* Take the buddy out of its list, and coallesce with @p */ 114 page_remove_from_list(buddy); 115 buddy->order = HYP_NO_ORDER; 116 p = min(p, buddy); 117 } 118 119 /* Mark the new head, and insert it */ 120 p->order = order; 121 page_add_to_list(p, &pool->free_area[order]); 122 } 123 124 static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, 125 struct hyp_page *p, 126 unsigned short order) 127 { 128 struct hyp_page *buddy; 129 130 page_remove_from_list(p); 131 while (p->order > order) { 132 /* 133 * The buddy of order n - 1 currently has HYP_NO_ORDER as it 134 * is covered by a higher-level page (whose head is @p). Use 135 * __find_buddy_nocheck() to find it and inject it in the 136 * free_list[n - 1], effectively splitting @p in half. 137 */ 138 p->order--; 139 buddy = __find_buddy_nocheck(pool, p, p->order); 140 buddy->order = p->order; 141 page_add_to_list(buddy, &pool->free_area[buddy->order]); 142 } 143 144 return p; 145 } 146 147 static inline void hyp_page_ref_inc(struct hyp_page *p) 148 { 149 BUG_ON(p->refcount == USHRT_MAX); 150 p->refcount++; 151 } 152 153 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p) 154 { 155 p->refcount--; 156 return (p->refcount == 0); 157 } 158 159 static inline void hyp_set_page_refcounted(struct hyp_page *p) 160 { 161 BUG_ON(p->refcount); 162 p->refcount = 1; 163 } 164 165 static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p) 166 { 167 if (hyp_page_ref_dec_and_test(p)) 168 __hyp_attach_page(pool, p); 169 } 170 171 /* 172 * Changes to the buddy tree and page refcounts must be done with the hyp_pool 173 * lock held. If a refcount change requires an update to the buddy tree (e.g. 174 * hyp_put_page()), both operations must be done within the same critical 175 * section to guarantee transient states (e.g. a page with null refcount but 176 * not yet attached to a free list) can't be observed by well-behaved readers. 177 */ 178 void hyp_put_page(struct hyp_pool *pool, void *addr) 179 { 180 struct hyp_page *p = hyp_virt_to_page(addr); 181 182 hyp_spin_lock(&pool->lock); 183 __hyp_put_page(pool, p); 184 hyp_spin_unlock(&pool->lock); 185 } 186 187 void hyp_get_page(struct hyp_pool *pool, void *addr) 188 { 189 struct hyp_page *p = hyp_virt_to_page(addr); 190 191 hyp_spin_lock(&pool->lock); 192 hyp_page_ref_inc(p); 193 hyp_spin_unlock(&pool->lock); 194 } 195 196 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order) 197 { 198 unsigned short i = order; 199 struct hyp_page *p; 200 201 hyp_spin_lock(&pool->lock); 202 203 /* Look for a high-enough-order page */ 204 while (i < pool->max_order && list_empty(&pool->free_area[i])) 205 i++; 206 if (i >= pool->max_order) { 207 hyp_spin_unlock(&pool->lock); 208 return NULL; 209 } 210 211 /* Extract it from the tree at the right order */ 212 p = node_to_page(pool->free_area[i].next); 213 p = __hyp_extract_page(pool, p, order); 214 215 hyp_set_page_refcounted(p); 216 hyp_spin_unlock(&pool->lock); 217 218 return hyp_page_to_virt(p); 219 } 220 221 int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, 222 unsigned int reserved_pages) 223 { 224 phys_addr_t phys = hyp_pfn_to_phys(pfn); 225 struct hyp_page *p; 226 int i; 227 228 hyp_spin_lock_init(&pool->lock); 229 pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT)); 230 for (i = 0; i < pool->max_order; i++) 231 INIT_LIST_HEAD(&pool->free_area[i]); 232 pool->range_start = phys; 233 pool->range_end = phys + (nr_pages << PAGE_SHIFT); 234 235 /* Init the vmemmap portion */ 236 p = hyp_phys_to_page(phys); 237 for (i = 0; i < nr_pages; i++) { 238 p[i].order = 0; 239 hyp_set_page_refcounted(&p[i]); 240 } 241 242 /* Attach the unused pages to the buddy tree */ 243 for (i = reserved_pages; i < nr_pages; i++) 244 __hyp_put_page(pool, &p[i]); 245 246 return 0; 247 } 248