xref: /openbmc/linux/arch/arm64/kvm/hyp/include/nvhe/gfp.h (revision 8e17c662)
1*8e17c662SQuentin Perret /* SPDX-License-Identifier: GPL-2.0-only */
2*8e17c662SQuentin Perret #ifndef __KVM_HYP_GFP_H
3*8e17c662SQuentin Perret #define __KVM_HYP_GFP_H
4*8e17c662SQuentin Perret 
5*8e17c662SQuentin Perret #include <linux/list.h>
6*8e17c662SQuentin Perret 
7*8e17c662SQuentin Perret #include <nvhe/memory.h>
8*8e17c662SQuentin Perret #include <nvhe/spinlock.h>
9*8e17c662SQuentin Perret 
10*8e17c662SQuentin Perret #define HYP_NO_ORDER	UINT_MAX
11*8e17c662SQuentin Perret 
12*8e17c662SQuentin Perret struct hyp_pool {
13*8e17c662SQuentin Perret 	/*
14*8e17c662SQuentin Perret 	 * Spinlock protecting concurrent changes to the memory pool as well as
15*8e17c662SQuentin Perret 	 * the struct hyp_page of the pool's pages until we have a proper atomic
16*8e17c662SQuentin Perret 	 * API at EL2.
17*8e17c662SQuentin Perret 	 */
18*8e17c662SQuentin Perret 	hyp_spinlock_t lock;
19*8e17c662SQuentin Perret 	struct list_head free_area[MAX_ORDER];
20*8e17c662SQuentin Perret 	phys_addr_t range_start;
21*8e17c662SQuentin Perret 	phys_addr_t range_end;
22*8e17c662SQuentin Perret 	unsigned int max_order;
23*8e17c662SQuentin Perret };
24*8e17c662SQuentin Perret 
25*8e17c662SQuentin Perret static inline void hyp_page_ref_inc(struct hyp_page *p)
26*8e17c662SQuentin Perret {
27*8e17c662SQuentin Perret 	struct hyp_pool *pool = hyp_page_to_pool(p);
28*8e17c662SQuentin Perret 
29*8e17c662SQuentin Perret 	hyp_spin_lock(&pool->lock);
30*8e17c662SQuentin Perret 	p->refcount++;
31*8e17c662SQuentin Perret 	hyp_spin_unlock(&pool->lock);
32*8e17c662SQuentin Perret }
33*8e17c662SQuentin Perret 
34*8e17c662SQuentin Perret static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
35*8e17c662SQuentin Perret {
36*8e17c662SQuentin Perret 	struct hyp_pool *pool = hyp_page_to_pool(p);
37*8e17c662SQuentin Perret 	int ret;
38*8e17c662SQuentin Perret 
39*8e17c662SQuentin Perret 	hyp_spin_lock(&pool->lock);
40*8e17c662SQuentin Perret 	p->refcount--;
41*8e17c662SQuentin Perret 	ret = (p->refcount == 0);
42*8e17c662SQuentin Perret 	hyp_spin_unlock(&pool->lock);
43*8e17c662SQuentin Perret 
44*8e17c662SQuentin Perret 	return ret;
45*8e17c662SQuentin Perret }
46*8e17c662SQuentin Perret 
47*8e17c662SQuentin Perret static inline void hyp_set_page_refcounted(struct hyp_page *p)
48*8e17c662SQuentin Perret {
49*8e17c662SQuentin Perret 	struct hyp_pool *pool = hyp_page_to_pool(p);
50*8e17c662SQuentin Perret 
51*8e17c662SQuentin Perret 	hyp_spin_lock(&pool->lock);
52*8e17c662SQuentin Perret 	if (p->refcount) {
53*8e17c662SQuentin Perret 		hyp_spin_unlock(&pool->lock);
54*8e17c662SQuentin Perret 		hyp_panic();
55*8e17c662SQuentin Perret 	}
56*8e17c662SQuentin Perret 	p->refcount = 1;
57*8e17c662SQuentin Perret 	hyp_spin_unlock(&pool->lock);
58*8e17c662SQuentin Perret }
59*8e17c662SQuentin Perret 
60*8e17c662SQuentin Perret /* Allocation */
61*8e17c662SQuentin Perret void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
62*8e17c662SQuentin Perret void hyp_get_page(void *addr);
63*8e17c662SQuentin Perret void hyp_put_page(void *addr);
64*8e17c662SQuentin Perret 
65*8e17c662SQuentin Perret /* Used pages cannot be freed */
66*8e17c662SQuentin Perret int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
67*8e17c662SQuentin Perret 		  unsigned int reserved_pages);
68*8e17c662SQuentin Perret #endif /* __KVM_HYP_GFP_H */
69