1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Google LLC 4 * Author: Quentin Perret <qperret@google.com> 5 */ 6 7 #include <asm/kvm_hyp.h> 8 #include <nvhe/gfp.h> 9 10 u64 __hyp_vmemmap; 11 12 /* 13 * Index the hyp_vmemmap to find a potential buddy page, but make no assumption 14 * about its current state. 15 * 16 * Example buddy-tree for a 4-pages physically contiguous pool: 17 * 18 * o : Page 3 19 * / 20 * o-o : Page 2 21 * / 22 * / o : Page 1 23 * / / 24 * o---o-o : Page 0 25 * Order 2 1 0 26 * 27 * Example of requests on this pool: 28 * __find_buddy_nocheck(pool, page 0, order 0) => page 1 29 * __find_buddy_nocheck(pool, page 0, order 1) => page 2 30 * __find_buddy_nocheck(pool, page 1, order 0) => page 0 31 * __find_buddy_nocheck(pool, page 2, order 0) => page 3 32 */ 33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, 34 struct hyp_page *p, 35 unsigned int order) 36 { 37 phys_addr_t addr = hyp_page_to_phys(p); 38 39 addr ^= (PAGE_SIZE << order); 40 41 /* 42 * Don't return a page outside the pool range -- it belongs to 43 * something else and may not be mapped in hyp_vmemmap. 44 */ 45 if (addr < pool->range_start || addr >= pool->range_end) 46 return NULL; 47 48 return hyp_phys_to_page(addr); 49 } 50 51 /* Find a buddy page currently available for allocation */ 52 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, 53 struct hyp_page *p, 54 unsigned int order) 55 { 56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); 57 58 if (!buddy || buddy->order != order || list_empty(&buddy->node)) 59 return NULL; 60 61 return buddy; 62 63 } 64 65 static void __hyp_attach_page(struct hyp_pool *pool, 66 struct hyp_page *p) 67 { 68 unsigned int order = p->order; 69 struct hyp_page *buddy; 70 71 memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order); 72 73 /* 74 * Only the first struct hyp_page of a high-order page (otherwise known 75 * as the 'head') should have p->order set. The non-head pages should 76 * have p->order = HYP_NO_ORDER. Here @p may no longer be the head 77 * after coallescing, so make sure to mark it HYP_NO_ORDER proactively. 78 */ 79 p->order = HYP_NO_ORDER; 80 for (; (order + 1) < pool->max_order; order++) { 81 buddy = __find_buddy_avail(pool, p, order); 82 if (!buddy) 83 break; 84 85 /* Take the buddy out of its list, and coallesce with @p */ 86 list_del_init(&buddy->node); 87 buddy->order = HYP_NO_ORDER; 88 p = min(p, buddy); 89 } 90 91 /* Mark the new head, and insert it */ 92 p->order = order; 93 list_add_tail(&p->node, &pool->free_area[order]); 94 } 95 96 static void hyp_attach_page(struct hyp_page *p) 97 { 98 struct hyp_pool *pool = hyp_page_to_pool(p); 99 100 hyp_spin_lock(&pool->lock); 101 __hyp_attach_page(pool, p); 102 hyp_spin_unlock(&pool->lock); 103 } 104 105 static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, 106 struct hyp_page *p, 107 unsigned int order) 108 { 109 struct hyp_page *buddy; 110 111 list_del_init(&p->node); 112 while (p->order > order) { 113 /* 114 * The buddy of order n - 1 currently has HYP_NO_ORDER as it 115 * is covered by a higher-level page (whose head is @p). Use 116 * __find_buddy_nocheck() to find it and inject it in the 117 * free_list[n - 1], effectively splitting @p in half. 118 */ 119 p->order--; 120 buddy = __find_buddy_nocheck(pool, p, p->order); 121 buddy->order = p->order; 122 list_add_tail(&buddy->node, &pool->free_area[buddy->order]); 123 } 124 125 return p; 126 } 127 128 void hyp_put_page(void *addr) 129 { 130 struct hyp_page *p = hyp_virt_to_page(addr); 131 132 if (hyp_page_ref_dec_and_test(p)) 133 hyp_attach_page(p); 134 } 135 136 void hyp_get_page(void *addr) 137 { 138 struct hyp_page *p = hyp_virt_to_page(addr); 139 140 hyp_page_ref_inc(p); 141 } 142 143 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order) 144 { 145 unsigned int i = order; 146 struct hyp_page *p; 147 148 hyp_spin_lock(&pool->lock); 149 150 /* Look for a high-enough-order page */ 151 while (i < pool->max_order && list_empty(&pool->free_area[i])) 152 i++; 153 if (i >= pool->max_order) { 154 hyp_spin_unlock(&pool->lock); 155 return NULL; 156 } 157 158 /* Extract it from the tree at the right order */ 159 p = list_first_entry(&pool->free_area[i], struct hyp_page, node); 160 p = __hyp_extract_page(pool, p, order); 161 162 hyp_spin_unlock(&pool->lock); 163 hyp_set_page_refcounted(p); 164 165 return hyp_page_to_virt(p); 166 } 167 168 int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, 169 unsigned int reserved_pages) 170 { 171 phys_addr_t phys = hyp_pfn_to_phys(pfn); 172 struct hyp_page *p; 173 int i; 174 175 hyp_spin_lock_init(&pool->lock); 176 pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT)); 177 for (i = 0; i < pool->max_order; i++) 178 INIT_LIST_HEAD(&pool->free_area[i]); 179 pool->range_start = phys; 180 pool->range_end = phys + (nr_pages << PAGE_SHIFT); 181 182 /* Init the vmemmap portion */ 183 p = hyp_phys_to_page(phys); 184 memset(p, 0, sizeof(*p) * nr_pages); 185 for (i = 0; i < nr_pages; i++) { 186 p[i].pool = pool; 187 INIT_LIST_HEAD(&p[i].node); 188 } 189 190 /* Attach the unused pages to the buddy tree */ 191 for (i = reserved_pages; i < nr_pages; i++) 192 __hyp_attach_page(pool, &p[i]); 193 194 return 0; 195 } 196