18e17c662SQuentin Perret // SPDX-License-Identifier: GPL-2.0-only 28e17c662SQuentin Perret /* 38e17c662SQuentin Perret * Copyright (C) 2020 Google LLC 48e17c662SQuentin Perret * Author: Quentin Perret <qperret@google.com> 58e17c662SQuentin Perret */ 68e17c662SQuentin Perret 78e17c662SQuentin Perret #include <asm/kvm_hyp.h> 88e17c662SQuentin Perret #include <nvhe/gfp.h> 98e17c662SQuentin Perret 108e17c662SQuentin Perret u64 __hyp_vmemmap; 118e17c662SQuentin Perret 128e17c662SQuentin Perret /* 138e17c662SQuentin Perret * Index the hyp_vmemmap to find a potential buddy page, but make no assumption 148e17c662SQuentin Perret * about its current state. 158e17c662SQuentin Perret * 168e17c662SQuentin Perret * Example buddy-tree for a 4-pages physically contiguous pool: 178e17c662SQuentin Perret * 188e17c662SQuentin Perret * o : Page 3 198e17c662SQuentin Perret * / 208e17c662SQuentin Perret * o-o : Page 2 218e17c662SQuentin Perret * / 228e17c662SQuentin Perret * / o : Page 1 238e17c662SQuentin Perret * / / 248e17c662SQuentin Perret * o---o-o : Page 0 258e17c662SQuentin Perret * Order 2 1 0 268e17c662SQuentin Perret * 278e17c662SQuentin Perret * Example of requests on this pool: 288e17c662SQuentin Perret * __find_buddy_nocheck(pool, page 0, order 0) => page 1 298e17c662SQuentin Perret * __find_buddy_nocheck(pool, page 0, order 1) => page 2 308e17c662SQuentin Perret * __find_buddy_nocheck(pool, page 1, order 0) => page 0 318e17c662SQuentin Perret * __find_buddy_nocheck(pool, page 2, order 0) => page 3 328e17c662SQuentin Perret */ 338e17c662SQuentin Perret static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, 348e17c662SQuentin Perret struct hyp_page *p, 3587ec0606SQuentin Perret unsigned short order) 368e17c662SQuentin Perret { 378e17c662SQuentin Perret phys_addr_t addr = hyp_page_to_phys(p); 388e17c662SQuentin Perret 398e17c662SQuentin Perret addr ^= (PAGE_SIZE << order); 408e17c662SQuentin Perret 418e17c662SQuentin Perret /* 428e17c662SQuentin Perret * Don't return a page outside the pool range -- it belongs to 438e17c662SQuentin Perret * something else and may not be mapped in hyp_vmemmap. 448e17c662SQuentin Perret */ 458e17c662SQuentin Perret if (addr < pool->range_start || addr >= pool->range_end) 468e17c662SQuentin Perret return NULL; 478e17c662SQuentin Perret 488e17c662SQuentin Perret return hyp_phys_to_page(addr); 498e17c662SQuentin Perret } 508e17c662SQuentin Perret 518e17c662SQuentin Perret /* Find a buddy page currently available for allocation */ 528e17c662SQuentin Perret static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, 538e17c662SQuentin Perret struct hyp_page *p, 5487ec0606SQuentin Perret unsigned short order) 558e17c662SQuentin Perret { 568e17c662SQuentin Perret struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); 578e17c662SQuentin Perret 58581982deSQuentin Perret if (!buddy || buddy->order != order || buddy->refcount) 598e17c662SQuentin Perret return NULL; 608e17c662SQuentin Perret 618e17c662SQuentin Perret return buddy; 628e17c662SQuentin Perret 638e17c662SQuentin Perret } 648e17c662SQuentin Perret 65914cde58SQuentin Perret /* 66914cde58SQuentin Perret * Pages that are available for allocation are tracked in free-lists, so we use 67914cde58SQuentin Perret * the pages themselves to store the list nodes to avoid wasting space. As the 68914cde58SQuentin Perret * allocator always returns zeroed pages (which are zeroed on the hyp_put_page() 69914cde58SQuentin Perret * path to optimize allocation speed), we also need to clean-up the list node in 70914cde58SQuentin Perret * each page when we take it out of the list. 71914cde58SQuentin Perret */ 72914cde58SQuentin Perret static inline void page_remove_from_list(struct hyp_page *p) 73914cde58SQuentin Perret { 74914cde58SQuentin Perret struct list_head *node = hyp_page_to_virt(p); 75914cde58SQuentin Perret 76914cde58SQuentin Perret __list_del_entry(node); 77914cde58SQuentin Perret memset(node, 0, sizeof(*node)); 78914cde58SQuentin Perret } 79914cde58SQuentin Perret 80914cde58SQuentin Perret static inline void page_add_to_list(struct hyp_page *p, struct list_head *head) 81914cde58SQuentin Perret { 82914cde58SQuentin Perret struct list_head *node = hyp_page_to_virt(p); 83914cde58SQuentin Perret 84914cde58SQuentin Perret INIT_LIST_HEAD(node); 85914cde58SQuentin Perret list_add_tail(node, head); 86914cde58SQuentin Perret } 87914cde58SQuentin Perret 88914cde58SQuentin Perret static inline struct hyp_page *node_to_page(struct list_head *node) 89914cde58SQuentin Perret { 90914cde58SQuentin Perret return hyp_virt_to_page(node); 91914cde58SQuentin Perret } 92914cde58SQuentin Perret 938e17c662SQuentin Perret static void __hyp_attach_page(struct hyp_pool *pool, 948e17c662SQuentin Perret struct hyp_page *p) 958e17c662SQuentin Perret { 9687ec0606SQuentin Perret unsigned short order = p->order; 978e17c662SQuentin Perret struct hyp_page *buddy; 988e17c662SQuentin Perret 998e17c662SQuentin Perret memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order); 1008e17c662SQuentin Perret 1018e17c662SQuentin Perret /* 1028e17c662SQuentin Perret * Only the first struct hyp_page of a high-order page (otherwise known 1038e17c662SQuentin Perret * as the 'head') should have p->order set. The non-head pages should 1048e17c662SQuentin Perret * have p->order = HYP_NO_ORDER. Here @p may no longer be the head 1058e17c662SQuentin Perret * after coallescing, so make sure to mark it HYP_NO_ORDER proactively. 1068e17c662SQuentin Perret */ 1078e17c662SQuentin Perret p->order = HYP_NO_ORDER; 1088e17c662SQuentin Perret for (; (order + 1) < pool->max_order; order++) { 1098e17c662SQuentin Perret buddy = __find_buddy_avail(pool, p, order); 1108e17c662SQuentin Perret if (!buddy) 1118e17c662SQuentin Perret break; 1128e17c662SQuentin Perret 1138e17c662SQuentin Perret /* Take the buddy out of its list, and coallesce with @p */ 114914cde58SQuentin Perret page_remove_from_list(buddy); 1158e17c662SQuentin Perret buddy->order = HYP_NO_ORDER; 1168e17c662SQuentin Perret p = min(p, buddy); 1178e17c662SQuentin Perret } 1188e17c662SQuentin Perret 1198e17c662SQuentin Perret /* Mark the new head, and insert it */ 1208e17c662SQuentin Perret p->order = order; 121914cde58SQuentin Perret page_add_to_list(p, &pool->free_area[order]); 1228e17c662SQuentin Perret } 1238e17c662SQuentin Perret 1248e17c662SQuentin Perret static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, 1258e17c662SQuentin Perret struct hyp_page *p, 12687ec0606SQuentin Perret unsigned short order) 1278e17c662SQuentin Perret { 1288e17c662SQuentin Perret struct hyp_page *buddy; 1298e17c662SQuentin Perret 130914cde58SQuentin Perret page_remove_from_list(p); 1318e17c662SQuentin Perret while (p->order > order) { 1328e17c662SQuentin Perret /* 1338e17c662SQuentin Perret * The buddy of order n - 1 currently has HYP_NO_ORDER as it 1348e17c662SQuentin Perret * is covered by a higher-level page (whose head is @p). Use 1358e17c662SQuentin Perret * __find_buddy_nocheck() to find it and inject it in the 1368e17c662SQuentin Perret * free_list[n - 1], effectively splitting @p in half. 1378e17c662SQuentin Perret */ 1388e17c662SQuentin Perret p->order--; 1398e17c662SQuentin Perret buddy = __find_buddy_nocheck(pool, p, p->order); 1408e17c662SQuentin Perret buddy->order = p->order; 141914cde58SQuentin Perret page_add_to_list(buddy, &pool->free_area[buddy->order]); 1428e17c662SQuentin Perret } 1438e17c662SQuentin Perret 1448e17c662SQuentin Perret return p; 1458e17c662SQuentin Perret } 1468e17c662SQuentin Perret 1476cbf874eSQuentin Perret static inline void hyp_page_ref_inc(struct hyp_page *p) 1486cbf874eSQuentin Perret { 1496929586dSQuentin Perret BUG_ON(p->refcount == USHRT_MAX); 1506cbf874eSQuentin Perret p->refcount++; 1516cbf874eSQuentin Perret } 1526cbf874eSQuentin Perret 1536cbf874eSQuentin Perret static inline int hyp_page_ref_dec_and_test(struct hyp_page *p) 1546cbf874eSQuentin Perret { 1557615c2a5SQuentin Perret BUG_ON(!p->refcount); 1566cbf874eSQuentin Perret p->refcount--; 1576cbf874eSQuentin Perret return (p->refcount == 0); 1586cbf874eSQuentin Perret } 1596cbf874eSQuentin Perret 1606cbf874eSQuentin Perret static inline void hyp_set_page_refcounted(struct hyp_page *p) 1616cbf874eSQuentin Perret { 1626cbf874eSQuentin Perret BUG_ON(p->refcount); 1636cbf874eSQuentin Perret p->refcount = 1; 1646cbf874eSQuentin Perret } 1656cbf874eSQuentin Perret 166581982deSQuentin Perret static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p) 167581982deSQuentin Perret { 168581982deSQuentin Perret if (hyp_page_ref_dec_and_test(p)) 169581982deSQuentin Perret __hyp_attach_page(pool, p); 170581982deSQuentin Perret } 171581982deSQuentin Perret 1726cbf874eSQuentin Perret /* 1736cbf874eSQuentin Perret * Changes to the buddy tree and page refcounts must be done with the hyp_pool 1746cbf874eSQuentin Perret * lock held. If a refcount change requires an update to the buddy tree (e.g. 1756cbf874eSQuentin Perret * hyp_put_page()), both operations must be done within the same critical 1766cbf874eSQuentin Perret * section to guarantee transient states (e.g. a page with null refcount but 1776cbf874eSQuentin Perret * not yet attached to a free list) can't be observed by well-behaved readers. 1786cbf874eSQuentin Perret */ 179d978b9cfSQuentin Perret void hyp_put_page(struct hyp_pool *pool, void *addr) 1808e17c662SQuentin Perret { 1818e17c662SQuentin Perret struct hyp_page *p = hyp_virt_to_page(addr); 1828e17c662SQuentin Perret 1836cbf874eSQuentin Perret hyp_spin_lock(&pool->lock); 184581982deSQuentin Perret __hyp_put_page(pool, p); 1856cbf874eSQuentin Perret hyp_spin_unlock(&pool->lock); 1868e17c662SQuentin Perret } 1878e17c662SQuentin Perret 188d978b9cfSQuentin Perret void hyp_get_page(struct hyp_pool *pool, void *addr) 1898e17c662SQuentin Perret { 1908e17c662SQuentin Perret struct hyp_page *p = hyp_virt_to_page(addr); 1918e17c662SQuentin Perret 1926cbf874eSQuentin Perret hyp_spin_lock(&pool->lock); 1938e17c662SQuentin Perret hyp_page_ref_inc(p); 1946cbf874eSQuentin Perret hyp_spin_unlock(&pool->lock); 1958e17c662SQuentin Perret } 1968e17c662SQuentin Perret 1971d58a17eSQuentin Perret void hyp_split_page(struct hyp_page *p) 1981d58a17eSQuentin Perret { 1991d58a17eSQuentin Perret unsigned short order = p->order; 2001d58a17eSQuentin Perret unsigned int i; 2011d58a17eSQuentin Perret 2021d58a17eSQuentin Perret p->order = 0; 2031d58a17eSQuentin Perret for (i = 1; i < (1 << order); i++) { 2041d58a17eSQuentin Perret struct hyp_page *tail = p + i; 2051d58a17eSQuentin Perret 2061d58a17eSQuentin Perret tail->order = 0; 2071d58a17eSQuentin Perret hyp_set_page_refcounted(tail); 2081d58a17eSQuentin Perret } 2091d58a17eSQuentin Perret } 2101d58a17eSQuentin Perret 21187ec0606SQuentin Perret void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order) 2128e17c662SQuentin Perret { 21387ec0606SQuentin Perret unsigned short i = order; 2148e17c662SQuentin Perret struct hyp_page *p; 2158e17c662SQuentin Perret 2168e17c662SQuentin Perret hyp_spin_lock(&pool->lock); 2178e17c662SQuentin Perret 2188e17c662SQuentin Perret /* Look for a high-enough-order page */ 2198e17c662SQuentin Perret while (i < pool->max_order && list_empty(&pool->free_area[i])) 2208e17c662SQuentin Perret i++; 2218e17c662SQuentin Perret if (i >= pool->max_order) { 2228e17c662SQuentin Perret hyp_spin_unlock(&pool->lock); 2238e17c662SQuentin Perret return NULL; 2248e17c662SQuentin Perret } 2258e17c662SQuentin Perret 2268e17c662SQuentin Perret /* Extract it from the tree at the right order */ 227914cde58SQuentin Perret p = node_to_page(pool->free_area[i].next); 2288e17c662SQuentin Perret p = __hyp_extract_page(pool, p, order); 2298e17c662SQuentin Perret 2308e17c662SQuentin Perret hyp_set_page_refcounted(p); 2316cbf874eSQuentin Perret hyp_spin_unlock(&pool->lock); 2328e17c662SQuentin Perret 2338e17c662SQuentin Perret return hyp_page_to_virt(p); 2348e17c662SQuentin Perret } 2358e17c662SQuentin Perret 2368e17c662SQuentin Perret int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, 2378e17c662SQuentin Perret unsigned int reserved_pages) 2388e17c662SQuentin Perret { 2398e17c662SQuentin Perret phys_addr_t phys = hyp_pfn_to_phys(pfn); 2408e17c662SQuentin Perret struct hyp_page *p; 2418e17c662SQuentin Perret int i; 2428e17c662SQuentin Perret 2438e17c662SQuentin Perret hyp_spin_lock_init(&pool->lock); 244*34b43a88SQuentin Perret pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT)); 2458e17c662SQuentin Perret for (i = 0; i < pool->max_order; i++) 2468e17c662SQuentin Perret INIT_LIST_HEAD(&pool->free_area[i]); 2478e17c662SQuentin Perret pool->range_start = phys; 2488e17c662SQuentin Perret pool->range_end = phys + (nr_pages << PAGE_SHIFT); 2498e17c662SQuentin Perret 2508e17c662SQuentin Perret /* Init the vmemmap portion */ 2518e17c662SQuentin Perret p = hyp_phys_to_page(phys); 2528e17c662SQuentin Perret for (i = 0; i < nr_pages; i++) { 253581982deSQuentin Perret p[i].order = 0; 254581982deSQuentin Perret hyp_set_page_refcounted(&p[i]); 2558e17c662SQuentin Perret } 2568e17c662SQuentin Perret 2578e17c662SQuentin Perret /* Attach the unused pages to the buddy tree */ 2588e17c662SQuentin Perret for (i = reserved_pages; i < nr_pages; i++) 259581982deSQuentin Perret __hyp_put_page(pool, &p[i]); 2608e17c662SQuentin Perret 2618e17c662SQuentin Perret return 0; 2628e17c662SQuentin Perret } 263