xref: /openbmc/linux/mm/shuffle.c (revision 2fa5ebe3)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright(c) 2018 Intel Corporation. All rights reserved.
3 
4 #include <linux/mm.h>
5 #include <linux/init.h>
6 #include <linux/mmzone.h>
7 #include <linux/random.h>
8 #include <linux/moduleparam.h>
9 #include "internal.h"
10 #include "shuffle.h"
11 
12 DEFINE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
13 
14 static bool shuffle_param;
15 
16 static __meminit int shuffle_param_set(const char *val,
17 		const struct kernel_param *kp)
18 {
19 	if (param_set_bool(val, kp))
20 		return -EINVAL;
21 	if (*(bool *)kp->arg)
22 		static_branch_enable(&page_alloc_shuffle_key);
23 	return 0;
24 }
25 
26 static const struct kernel_param_ops shuffle_param_ops = {
27 	.set = shuffle_param_set,
28 	.get = param_get_bool,
29 };
30 module_param_cb(shuffle, &shuffle_param_ops, &shuffle_param, 0400);
31 
32 /*
33  * For two pages to be swapped in the shuffle, they must be free (on a
34  * 'free_area' lru), have the same order, and have the same migratetype.
35  */
36 static struct page * __meminit shuffle_valid_page(struct zone *zone,
37 						  unsigned long pfn, int order)
38 {
39 	struct page *page = pfn_to_online_page(pfn);
40 
41 	/*
42 	 * Given we're dealing with randomly selected pfns in a zone we
43 	 * need to ask questions like...
44 	 */
45 
46 	/* ... is the page managed by the buddy? */
47 	if (!page)
48 		return NULL;
49 
50 	/* ... is the page assigned to the same zone? */
51 	if (page_zone(page) != zone)
52 		return NULL;
53 
54 	/* ...is the page free and currently on a free_area list? */
55 	if (!PageBuddy(page))
56 		return NULL;
57 
58 	/*
59 	 * ...is the page on the same list as the page we will
60 	 * shuffle it with?
61 	 */
62 	if (buddy_order(page) != order)
63 		return NULL;
64 
65 	return page;
66 }
67 
68 /*
69  * Fisher-Yates shuffle the freelist which prescribes iterating through an
70  * array, pfns in this case, and randomly swapping each entry with another in
71  * the span, end_pfn - start_pfn.
72  *
73  * To keep the implementation simple it does not attempt to correct for sources
74  * of bias in the distribution, like modulo bias or pseudo-random number
75  * generator bias. I.e. the expectation is that this shuffling raises the bar
76  * for attacks that exploit the predictability of page allocations, but need not
77  * be a perfect shuffle.
78  */
79 #define SHUFFLE_RETRY 10
80 void __meminit __shuffle_zone(struct zone *z)
81 {
82 	unsigned long i, flags;
83 	unsigned long start_pfn = z->zone_start_pfn;
84 	unsigned long end_pfn = zone_end_pfn(z);
85 	const int order = SHUFFLE_ORDER;
86 	const int order_pages = 1 << order;
87 
88 	spin_lock_irqsave(&z->lock, flags);
89 	start_pfn = ALIGN(start_pfn, order_pages);
90 	for (i = start_pfn; i < end_pfn; i += order_pages) {
91 		unsigned long j;
92 		int migratetype, retry;
93 		struct page *page_i, *page_j;
94 
95 		/*
96 		 * We expect page_i, in the sub-range of a zone being added
97 		 * (@start_pfn to @end_pfn), to more likely be valid compared to
98 		 * page_j randomly selected in the span @zone_start_pfn to
99 		 * @spanned_pages.
100 		 */
101 		page_i = shuffle_valid_page(z, i, order);
102 		if (!page_i)
103 			continue;
104 
105 		for (retry = 0; retry < SHUFFLE_RETRY; retry++) {
106 			/*
107 			 * Pick a random order aligned page in the zone span as
108 			 * a swap target. If the selected pfn is a hole, retry
109 			 * up to SHUFFLE_RETRY attempts find a random valid pfn
110 			 * in the zone.
111 			 */
112 			j = z->zone_start_pfn +
113 				ALIGN_DOWN(get_random_long() % z->spanned_pages,
114 						order_pages);
115 			page_j = shuffle_valid_page(z, j, order);
116 			if (page_j && page_j != page_i)
117 				break;
118 		}
119 		if (retry >= SHUFFLE_RETRY) {
120 			pr_debug("%s: failed to swap %#lx\n", __func__, i);
121 			continue;
122 		}
123 
124 		/*
125 		 * Each migratetype corresponds to its own list, make sure the
126 		 * types match otherwise we're moving pages to lists where they
127 		 * do not belong.
128 		 */
129 		migratetype = get_pageblock_migratetype(page_i);
130 		if (get_pageblock_migratetype(page_j) != migratetype) {
131 			pr_debug("%s: migratetype mismatch %#lx\n", __func__, i);
132 			continue;
133 		}
134 
135 		list_swap(&page_i->lru, &page_j->lru);
136 
137 		pr_debug("%s: swap: %#lx -> %#lx\n", __func__, i, j);
138 
139 		/* take it easy on the zone lock */
140 		if ((i % (100 * order_pages)) == 0) {
141 			spin_unlock_irqrestore(&z->lock, flags);
142 			cond_resched();
143 			spin_lock_irqsave(&z->lock, flags);
144 		}
145 	}
146 	spin_unlock_irqrestore(&z->lock, flags);
147 }
148 
149 /*
150  * __shuffle_free_memory - reduce the predictability of the page allocator
151  * @pgdat: node page data
152  */
153 void __meminit __shuffle_free_memory(pg_data_t *pgdat)
154 {
155 	struct zone *z;
156 
157 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
158 		shuffle_zone(z);
159 }
160 
161 bool shuffle_pick_tail(void)
162 {
163 	static u64 rand;
164 	static u8 rand_bits;
165 	bool ret;
166 
167 	/*
168 	 * The lack of locking is deliberate. If 2 threads race to
169 	 * update the rand state it just adds to the entropy.
170 	 */
171 	if (rand_bits == 0) {
172 		rand_bits = 64;
173 		rand = get_random_u64();
174 	}
175 
176 	ret = rand & 1;
177 
178 	rand_bits--;
179 	rand >>= 1;
180 
181 	return ret;
182 }
183