1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #define pr_fmt(fmt) "ref_tracker: " fmt
4
5 #include <linux/export.h>
6 #include <linux/list_sort.h>
7 #include <linux/ref_tracker.h>
8 #include <linux/slab.h>
9 #include <linux/stacktrace.h>
10 #include <linux/stackdepot.h>
11
12 #define REF_TRACKER_STACK_ENTRIES 16
13 #define STACK_BUF_SIZE 1024
14
15 struct ref_tracker {
16 struct list_head head; /* anchor into dir->list or dir->quarantine */
17 bool dead;
18 depot_stack_handle_t alloc_stack_handle;
19 depot_stack_handle_t free_stack_handle;
20 };
21
22 struct ref_tracker_dir_stats {
23 int total;
24 int count;
25 struct {
26 depot_stack_handle_t stack_handle;
27 unsigned int count;
28 } stacks[];
29 };
30
31 static struct ref_tracker_dir_stats *
ref_tracker_get_stats(struct ref_tracker_dir * dir,unsigned int limit)32 ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
33 {
34 struct ref_tracker_dir_stats *stats;
35 struct ref_tracker *tracker;
36
37 stats = kmalloc(struct_size(stats, stacks, limit),
38 GFP_NOWAIT | __GFP_NOWARN);
39 if (!stats)
40 return ERR_PTR(-ENOMEM);
41 stats->total = 0;
42 stats->count = 0;
43
44 list_for_each_entry(tracker, &dir->list, head) {
45 depot_stack_handle_t stack = tracker->alloc_stack_handle;
46 int i;
47
48 ++stats->total;
49 for (i = 0; i < stats->count; ++i)
50 if (stats->stacks[i].stack_handle == stack)
51 break;
52 if (i >= limit)
53 continue;
54 if (i >= stats->count) {
55 stats->stacks[i].stack_handle = stack;
56 stats->stacks[i].count = 0;
57 ++stats->count;
58 }
59 ++stats->stacks[i].count;
60 }
61
62 return stats;
63 }
64
65 struct ostream {
66 char *buf;
67 int size, used;
68 };
69
70 #define pr_ostream(stream, fmt, args...) \
71 ({ \
72 struct ostream *_s = (stream); \
73 \
74 if (!_s->buf) { \
75 pr_err(fmt, ##args); \
76 } else { \
77 int ret, len = _s->size - _s->used; \
78 ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \
79 _s->used += min(ret, len); \
80 } \
81 })
82
83 static void
__ref_tracker_dir_pr_ostream(struct ref_tracker_dir * dir,unsigned int display_limit,struct ostream * s)84 __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
85 unsigned int display_limit, struct ostream *s)
86 {
87 struct ref_tracker_dir_stats *stats;
88 unsigned int i = 0, skipped;
89 depot_stack_handle_t stack;
90 char *sbuf;
91
92 lockdep_assert_held(&dir->lock);
93
94 if (list_empty(&dir->list))
95 return;
96
97 stats = ref_tracker_get_stats(dir, display_limit);
98 if (IS_ERR(stats)) {
99 pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n",
100 dir->name, dir, stats);
101 return;
102 }
103
104 sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN);
105
106 for (i = 0, skipped = stats->total; i < stats->count; ++i) {
107 stack = stats->stacks[i].stack_handle;
108 if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4))
109 sbuf[0] = 0;
110 pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir,
111 stats->stacks[i].count, stats->total, sbuf);
112 skipped -= stats->stacks[i].count;
113 }
114
115 if (skipped)
116 pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n",
117 dir->name, dir, skipped, stats->total);
118
119 kfree(sbuf);
120
121 kfree(stats);
122 }
123
ref_tracker_dir_print_locked(struct ref_tracker_dir * dir,unsigned int display_limit)124 void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
125 unsigned int display_limit)
126 {
127 struct ostream os = {};
128
129 __ref_tracker_dir_pr_ostream(dir, display_limit, &os);
130 }
131 EXPORT_SYMBOL(ref_tracker_dir_print_locked);
132
ref_tracker_dir_print(struct ref_tracker_dir * dir,unsigned int display_limit)133 void ref_tracker_dir_print(struct ref_tracker_dir *dir,
134 unsigned int display_limit)
135 {
136 unsigned long flags;
137
138 spin_lock_irqsave(&dir->lock, flags);
139 ref_tracker_dir_print_locked(dir, display_limit);
140 spin_unlock_irqrestore(&dir->lock, flags);
141 }
142 EXPORT_SYMBOL(ref_tracker_dir_print);
143
ref_tracker_dir_snprint(struct ref_tracker_dir * dir,char * buf,size_t size)144 int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size)
145 {
146 struct ostream os = { .buf = buf, .size = size };
147 unsigned long flags;
148
149 spin_lock_irqsave(&dir->lock, flags);
150 __ref_tracker_dir_pr_ostream(dir, 16, &os);
151 spin_unlock_irqrestore(&dir->lock, flags);
152
153 return os.used;
154 }
155 EXPORT_SYMBOL(ref_tracker_dir_snprint);
156
ref_tracker_dir_exit(struct ref_tracker_dir * dir)157 void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
158 {
159 struct ref_tracker *tracker, *n;
160 unsigned long flags;
161 bool leak = false;
162
163 dir->dead = true;
164 spin_lock_irqsave(&dir->lock, flags);
165 list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
166 list_del(&tracker->head);
167 kfree(tracker);
168 dir->quarantine_avail++;
169 }
170 if (!list_empty(&dir->list)) {
171 ref_tracker_dir_print_locked(dir, 16);
172 leak = true;
173 list_for_each_entry_safe(tracker, n, &dir->list, head) {
174 list_del(&tracker->head);
175 kfree(tracker);
176 }
177 }
178 spin_unlock_irqrestore(&dir->lock, flags);
179 WARN_ON_ONCE(leak);
180 WARN_ON_ONCE(refcount_read(&dir->untracked) != 1);
181 WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1);
182 }
183 EXPORT_SYMBOL(ref_tracker_dir_exit);
184
ref_tracker_alloc(struct ref_tracker_dir * dir,struct ref_tracker ** trackerp,gfp_t gfp)185 int ref_tracker_alloc(struct ref_tracker_dir *dir,
186 struct ref_tracker **trackerp,
187 gfp_t gfp)
188 {
189 unsigned long entries[REF_TRACKER_STACK_ENTRIES];
190 struct ref_tracker *tracker;
191 unsigned int nr_entries;
192 gfp_t gfp_mask = gfp | __GFP_NOWARN;
193 unsigned long flags;
194
195 WARN_ON_ONCE(dir->dead);
196
197 if (!trackerp) {
198 refcount_inc(&dir->no_tracker);
199 return 0;
200 }
201 if (gfp & __GFP_DIRECT_RECLAIM)
202 gfp_mask |= __GFP_NOFAIL;
203 *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
204 if (unlikely(!tracker)) {
205 pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
206 refcount_inc(&dir->untracked);
207 return -ENOMEM;
208 }
209 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
210 tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp);
211
212 spin_lock_irqsave(&dir->lock, flags);
213 list_add(&tracker->head, &dir->list);
214 spin_unlock_irqrestore(&dir->lock, flags);
215 return 0;
216 }
217 EXPORT_SYMBOL_GPL(ref_tracker_alloc);
218
ref_tracker_free(struct ref_tracker_dir * dir,struct ref_tracker ** trackerp)219 int ref_tracker_free(struct ref_tracker_dir *dir,
220 struct ref_tracker **trackerp)
221 {
222 unsigned long entries[REF_TRACKER_STACK_ENTRIES];
223 depot_stack_handle_t stack_handle;
224 struct ref_tracker *tracker;
225 unsigned int nr_entries;
226 unsigned long flags;
227
228 WARN_ON_ONCE(dir->dead);
229
230 if (!trackerp) {
231 refcount_dec(&dir->no_tracker);
232 return 0;
233 }
234 tracker = *trackerp;
235 if (!tracker) {
236 refcount_dec(&dir->untracked);
237 return -EEXIST;
238 }
239 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
240 stack_handle = stack_depot_save(entries, nr_entries,
241 GFP_NOWAIT | __GFP_NOWARN);
242
243 spin_lock_irqsave(&dir->lock, flags);
244 if (tracker->dead) {
245 pr_err("reference already released.\n");
246 if (tracker->alloc_stack_handle) {
247 pr_err("allocated in:\n");
248 stack_depot_print(tracker->alloc_stack_handle);
249 }
250 if (tracker->free_stack_handle) {
251 pr_err("freed in:\n");
252 stack_depot_print(tracker->free_stack_handle);
253 }
254 spin_unlock_irqrestore(&dir->lock, flags);
255 WARN_ON_ONCE(1);
256 return -EINVAL;
257 }
258 tracker->dead = true;
259
260 tracker->free_stack_handle = stack_handle;
261
262 list_move_tail(&tracker->head, &dir->quarantine);
263 if (!dir->quarantine_avail) {
264 tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head);
265 list_del(&tracker->head);
266 } else {
267 dir->quarantine_avail--;
268 tracker = NULL;
269 }
270 spin_unlock_irqrestore(&dir->lock, flags);
271
272 kfree(tracker);
273 return 0;
274 }
275 EXPORT_SYMBOL_GPL(ref_tracker_free);
276