1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
4  */
5 
6 /*
7  * livepatch-shadow-mod.c - Shadow variables, buggy module demo
8  *
9  * Purpose
10  * -------
11  *
12  * As a demonstration of livepatch shadow variable API, this module
13  * introduces memory leak behavior that livepatch modules
14  * livepatch-shadow-fix1.ko and livepatch-shadow-fix2.ko correct and
15  * enhance.
16  *
17  * WARNING - even though the livepatch-shadow-fix modules patch the
18  * memory leak, please load these modules at your own risk -- some
19  * amount of memory may leaked before the bug is patched.
20  *
21  *
22  * Usage
23  * -----
24  *
25  * Step 1 - Load the buggy demonstration module:
26  *
27  *   insmod samples/livepatch/livepatch-shadow-mod.ko
28  *
29  * Watch dmesg output for a few moments to see new dummy being allocated
30  * and a periodic cleanup check.  (Note: a small amount of memory is
31  * being leaked.)
32  *
33  *
34  * Step 2 - Load livepatch fix1:
35  *
36  *   insmod samples/livepatch/livepatch-shadow-fix1.ko
37  *
38  * Continue watching dmesg and note that now livepatch_fix1_dummy_free()
39  * and livepatch_fix1_dummy_alloc() are logging messages about leaked
40  * memory and eventually leaks prevented.
41  *
42  *
43  * Step 3 - Load livepatch fix2 (on top of fix1):
44  *
45  *   insmod samples/livepatch/livepatch-shadow-fix2.ko
46  *
47  * This module extends functionality through shadow variables, as a new
48  * "check" counter is added to the dummy structure.  Periodic dmesg
49  * messages will log these as dummies are cleaned up.
50  *
51  *
52  * Step 4 - Cleanup
53  *
54  * Unwind the demonstration by disabling the livepatch fix modules, then
55  * removing them and the demo module:
56  *
57  *   echo 0 > /sys/kernel/livepatch/livepatch_shadow_fix2/enabled
58  *   echo 0 > /sys/kernel/livepatch/livepatch_shadow_fix1/enabled
59  *   rmmod livepatch-shadow-fix2
60  *   rmmod livepatch-shadow-fix1
61  *   rmmod livepatch-shadow-mod
62  */
63 
64 
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/sched.h>
68 #include <linux/slab.h>
69 #include <linux/stat.h>
70 #include <linux/workqueue.h>
71 
72 MODULE_LICENSE("GPL");
73 MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
74 MODULE_DESCRIPTION("Buggy module for shadow variable demo");
75 
76 /* Allocate new dummies every second */
77 #define ALLOC_PERIOD	1
78 /* Check for expired dummies after a few new ones have been allocated */
79 #define CLEANUP_PERIOD	(3 * ALLOC_PERIOD)
80 /* Dummies expire after a few cleanup instances */
81 #define EXPIRE_PERIOD	(4 * CLEANUP_PERIOD)
82 
83 /*
84  * Keep a list of all the dummies so we can clean up any residual ones
85  * on module exit
86  */
87 static LIST_HEAD(dummy_list);
88 static DEFINE_MUTEX(dummy_list_mutex);
89 
90 struct dummy {
91 	struct list_head list;
92 	unsigned long jiffies_expire;
93 };
94 
dummy_alloc(void)95 static __used noinline struct dummy *dummy_alloc(void)
96 {
97 	struct dummy *d;
98 	int *leak;
99 
100 	d = kzalloc(sizeof(*d), GFP_KERNEL);
101 	if (!d)
102 		return NULL;
103 
104 	d->jiffies_expire = jiffies +
105 		msecs_to_jiffies(1000 * EXPIRE_PERIOD);
106 
107 	/* Oops, forgot to save leak! */
108 	leak = kzalloc(sizeof(*leak), GFP_KERNEL);
109 	if (!leak) {
110 		kfree(d);
111 		return NULL;
112 	}
113 
114 	pr_info("%s: dummy @ %p, expires @ %lx\n",
115 		__func__, d, d->jiffies_expire);
116 
117 	return d;
118 }
119 
dummy_free(struct dummy * d)120 static __used noinline void dummy_free(struct dummy *d)
121 {
122 	pr_info("%s: dummy @ %p, expired = %lx\n",
123 		__func__, d, d->jiffies_expire);
124 
125 	kfree(d);
126 }
127 
dummy_check(struct dummy * d,unsigned long jiffies)128 static __used noinline bool dummy_check(struct dummy *d,
129 					   unsigned long jiffies)
130 {
131 	return time_after(jiffies, d->jiffies_expire);
132 }
133 
134 /*
135  * alloc_work_func: allocates new dummy structures, allocates additional
136  *                  memory, aptly named "leak", but doesn't keep
137  *                  permanent record of it.
138  */
139 
140 static void alloc_work_func(struct work_struct *work);
141 static DECLARE_DELAYED_WORK(alloc_dwork, alloc_work_func);
142 
alloc_work_func(struct work_struct * work)143 static void alloc_work_func(struct work_struct *work)
144 {
145 	struct dummy *d;
146 
147 	d = dummy_alloc();
148 	if (!d)
149 		return;
150 
151 	mutex_lock(&dummy_list_mutex);
152 	list_add(&d->list, &dummy_list);
153 	mutex_unlock(&dummy_list_mutex);
154 
155 	schedule_delayed_work(&alloc_dwork,
156 		msecs_to_jiffies(1000 * ALLOC_PERIOD));
157 }
158 
159 /*
160  * cleanup_work_func: frees dummy structures.  Without knownledge of
161  *                    "leak", it leaks the additional memory that
162  *                    alloc_work_func created.
163  */
164 
165 static void cleanup_work_func(struct work_struct *work);
166 static DECLARE_DELAYED_WORK(cleanup_dwork, cleanup_work_func);
167 
cleanup_work_func(struct work_struct * work)168 static void cleanup_work_func(struct work_struct *work)
169 {
170 	struct dummy *d, *tmp;
171 	unsigned long j;
172 
173 	j = jiffies;
174 	pr_info("%s: jiffies = %lx\n", __func__, j);
175 
176 	mutex_lock(&dummy_list_mutex);
177 	list_for_each_entry_safe(d, tmp, &dummy_list, list) {
178 
179 		/* Kick out and free any expired dummies */
180 		if (dummy_check(d, j)) {
181 			list_del(&d->list);
182 			dummy_free(d);
183 		}
184 	}
185 	mutex_unlock(&dummy_list_mutex);
186 
187 	schedule_delayed_work(&cleanup_dwork,
188 		msecs_to_jiffies(1000 * CLEANUP_PERIOD));
189 }
190 
livepatch_shadow_mod_init(void)191 static int livepatch_shadow_mod_init(void)
192 {
193 	schedule_delayed_work(&alloc_dwork,
194 		msecs_to_jiffies(1000 * ALLOC_PERIOD));
195 	schedule_delayed_work(&cleanup_dwork,
196 		msecs_to_jiffies(1000 * CLEANUP_PERIOD));
197 
198 	return 0;
199 }
200 
livepatch_shadow_mod_exit(void)201 static void livepatch_shadow_mod_exit(void)
202 {
203 	struct dummy *d, *tmp;
204 
205 	/* Wait for any dummies at work */
206 	cancel_delayed_work_sync(&alloc_dwork);
207 	cancel_delayed_work_sync(&cleanup_dwork);
208 
209 	/* Cleanup residual dummies */
210 	list_for_each_entry_safe(d, tmp, &dummy_list, list) {
211 		list_del(&d->list);
212 		dummy_free(d);
213 	}
214 }
215 
216 module_init(livepatch_shadow_mod_init);
217 module_exit(livepatch_shadow_mod_exit);
218