xref: /openbmc/linux/arch/arm64/kvm/hyp/include/nvhe/gfp.h (revision 8e17c662)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __KVM_HYP_GFP_H
3 #define __KVM_HYP_GFP_H
4 
5 #include <linux/list.h>
6 
7 #include <nvhe/memory.h>
8 #include <nvhe/spinlock.h>
9 
10 #define HYP_NO_ORDER	UINT_MAX
11 
12 struct hyp_pool {
13 	/*
14 	 * Spinlock protecting concurrent changes to the memory pool as well as
15 	 * the struct hyp_page of the pool's pages until we have a proper atomic
16 	 * API at EL2.
17 	 */
18 	hyp_spinlock_t lock;
19 	struct list_head free_area[MAX_ORDER];
20 	phys_addr_t range_start;
21 	phys_addr_t range_end;
22 	unsigned int max_order;
23 };
24 
25 static inline void hyp_page_ref_inc(struct hyp_page *p)
26 {
27 	struct hyp_pool *pool = hyp_page_to_pool(p);
28 
29 	hyp_spin_lock(&pool->lock);
30 	p->refcount++;
31 	hyp_spin_unlock(&pool->lock);
32 }
33 
34 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
35 {
36 	struct hyp_pool *pool = hyp_page_to_pool(p);
37 	int ret;
38 
39 	hyp_spin_lock(&pool->lock);
40 	p->refcount--;
41 	ret = (p->refcount == 0);
42 	hyp_spin_unlock(&pool->lock);
43 
44 	return ret;
45 }
46 
47 static inline void hyp_set_page_refcounted(struct hyp_page *p)
48 {
49 	struct hyp_pool *pool = hyp_page_to_pool(p);
50 
51 	hyp_spin_lock(&pool->lock);
52 	if (p->refcount) {
53 		hyp_spin_unlock(&pool->lock);
54 		hyp_panic();
55 	}
56 	p->refcount = 1;
57 	hyp_spin_unlock(&pool->lock);
58 }
59 
60 /* Allocation */
61 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
62 void hyp_get_page(void *addr);
63 void hyp_put_page(void *addr);
64 
65 /* Used pages cannot be freed */
66 int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
67 		  unsigned int reserved_pages);
68 #endif /* __KVM_HYP_GFP_H */
69