xref: /openbmc/linux/arch/x86/kvm/mmu/tdp_iter.h (revision c2fe645e)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #ifndef __KVM_X86_MMU_TDP_ITER_H
4 #define __KVM_X86_MMU_TDP_ITER_H
5 
6 #include <linux/kvm_host.h>
7 
8 #include "mmu.h"
9 
10 /*
11  * TDP MMU SPTEs are RCU protected to allow paging structures (non-leaf SPTEs)
12  * to be zapped while holding mmu_lock for read, and to allow TLB flushes to be
13  * batched without having to collect the list of zapped SPs.  Flows that can
14  * remove SPs must service pending TLB flushes prior to dropping RCU protection.
15  */
16 static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
17 {
18 	return READ_ONCE(*rcu_dereference(sptep));
19 }
20 static inline void kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 val)
21 {
22 	WRITE_ONCE(*rcu_dereference(sptep), val);
23 }
24 
25 /*
26  * A TDP iterator performs a pre-order walk over a TDP paging structure.
27  */
28 struct tdp_iter {
29 	/*
30 	 * The iterator will traverse the paging structure towards the mapping
31 	 * for this GFN.
32 	 */
33 	gfn_t next_last_level_gfn;
34 	/*
35 	 * The next_last_level_gfn at the time when the thread last
36 	 * yielded. Only yielding when the next_last_level_gfn !=
37 	 * yielded_gfn helps ensure forward progress.
38 	 */
39 	gfn_t yielded_gfn;
40 	/* Pointers to the page tables traversed to reach the current SPTE */
41 	tdp_ptep_t pt_path[PT64_ROOT_MAX_LEVEL];
42 	/* A pointer to the current SPTE */
43 	tdp_ptep_t sptep;
44 	/* The lowest GFN mapped by the current SPTE */
45 	gfn_t gfn;
46 	/* The level of the root page given to the iterator */
47 	int root_level;
48 	/* The lowest level the iterator should traverse to */
49 	int min_level;
50 	/* The iterator's current level within the paging structure */
51 	int level;
52 	/* The address space ID, i.e. SMM vs. regular. */
53 	int as_id;
54 	/* A snapshot of the value at sptep */
55 	u64 old_spte;
56 	/*
57 	 * Whether the iterator has a valid state. This will be false if the
58 	 * iterator walks off the end of the paging structure.
59 	 */
60 	bool valid;
61 	/*
62 	 * True if KVM dropped mmu_lock and yielded in the middle of a walk, in
63 	 * which case tdp_iter_next() needs to restart the walk at the root
64 	 * level instead of advancing to the next entry.
65 	 */
66 	bool yielded;
67 };
68 
69 /*
70  * Iterates over every SPTE mapping the GFN range [start, end) in a
71  * preorder traversal.
72  */
73 #define for_each_tdp_pte_min_level(iter, root, min_level, start, end) \
74 	for (tdp_iter_start(&iter, root, min_level, start); \
75 	     iter.valid && iter.gfn < end;		     \
76 	     tdp_iter_next(&iter))
77 
78 #define for_each_tdp_pte(iter, root, start, end) \
79 	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end)
80 
81 tdp_ptep_t spte_to_child_pt(u64 pte, int level);
82 
83 void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
84 		    int min_level, gfn_t next_last_level_gfn);
85 void tdp_iter_next(struct tdp_iter *iter);
86 void tdp_iter_restart(struct tdp_iter *iter);
87 
88 #endif /* __KVM_X86_MMU_TDP_ITER_H */
89