xref: /openbmc/linux/kernel/bpf/bpf_task_storage.c (revision 249592bf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Facebook
4  * Copyright 2020 Google LLC.
5  */
6 
7 #include <linux/pid.h>
8 #include <linux/sched.h>
9 #include <linux/rculist.h>
10 #include <linux/list.h>
11 #include <linux/hash.h>
12 #include <linux/types.h>
13 #include <linux/spinlock.h>
14 #include <linux/bpf.h>
15 #include <linux/bpf_local_storage.h>
16 #include <linux/filter.h>
17 #include <uapi/linux/btf.h>
18 #include <linux/btf_ids.h>
19 #include <linux/fdtable.h>
20 
21 DEFINE_BPF_STORAGE_CACHE(task_cache);
22 
23 static DEFINE_PER_CPU(int, bpf_task_storage_busy);
24 
25 static void bpf_task_storage_lock(void)
26 {
27 	migrate_disable();
28 	__this_cpu_inc(bpf_task_storage_busy);
29 }
30 
31 static void bpf_task_storage_unlock(void)
32 {
33 	__this_cpu_dec(bpf_task_storage_busy);
34 	migrate_enable();
35 }
36 
37 static bool bpf_task_storage_trylock(void)
38 {
39 	migrate_disable();
40 	if (unlikely(__this_cpu_inc_return(bpf_task_storage_busy) != 1)) {
41 		__this_cpu_dec(bpf_task_storage_busy);
42 		migrate_enable();
43 		return false;
44 	}
45 	return true;
46 }
47 
48 static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
49 {
50 	struct task_struct *task = owner;
51 
52 	return &task->bpf_storage;
53 }
54 
55 static struct bpf_local_storage_data *
56 task_storage_lookup(struct task_struct *task, struct bpf_map *map,
57 		    bool cacheit_lockit)
58 {
59 	struct bpf_local_storage *task_storage;
60 	struct bpf_local_storage_map *smap;
61 
62 	task_storage = rcu_dereference(task->bpf_storage);
63 	if (!task_storage)
64 		return NULL;
65 
66 	smap = (struct bpf_local_storage_map *)map;
67 	return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
68 }
69 
70 void bpf_task_storage_free(struct task_struct *task)
71 {
72 	struct bpf_local_storage_elem *selem;
73 	struct bpf_local_storage *local_storage;
74 	bool free_task_storage = false;
75 	struct hlist_node *n;
76 	unsigned long flags;
77 
78 	rcu_read_lock();
79 
80 	local_storage = rcu_dereference(task->bpf_storage);
81 	if (!local_storage) {
82 		rcu_read_unlock();
83 		return;
84 	}
85 
86 	/* Neither the bpf_prog nor the bpf-map's syscall
87 	 * could be modifying the local_storage->list now.
88 	 * Thus, no elem can be added-to or deleted-from the
89 	 * local_storage->list by the bpf_prog or by the bpf-map's syscall.
90 	 *
91 	 * It is racing with bpf_local_storage_map_free() alone
92 	 * when unlinking elem from the local_storage->list and
93 	 * the map's bucket->list.
94 	 */
95 	bpf_task_storage_lock();
96 	raw_spin_lock_irqsave(&local_storage->lock, flags);
97 	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
98 		/* Always unlink from map before unlinking from
99 		 * local_storage.
100 		 */
101 		bpf_selem_unlink_map(selem);
102 		free_task_storage = bpf_selem_unlink_storage_nolock(
103 			local_storage, selem, false);
104 	}
105 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
106 	bpf_task_storage_unlock();
107 	rcu_read_unlock();
108 
109 	/* free_task_storage should always be true as long as
110 	 * local_storage->list was non-empty.
111 	 */
112 	if (free_task_storage)
113 		kfree_rcu(local_storage, rcu);
114 }
115 
116 static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
117 {
118 	struct bpf_local_storage_data *sdata;
119 	struct task_struct *task;
120 	unsigned int f_flags;
121 	struct pid *pid;
122 	int fd, err;
123 
124 	fd = *(int *)key;
125 	pid = pidfd_get_pid(fd, &f_flags);
126 	if (IS_ERR(pid))
127 		return ERR_CAST(pid);
128 
129 	/* We should be in an RCU read side critical section, it should be safe
130 	 * to call pid_task.
131 	 */
132 	WARN_ON_ONCE(!rcu_read_lock_held());
133 	task = pid_task(pid, PIDTYPE_PID);
134 	if (!task) {
135 		err = -ENOENT;
136 		goto out;
137 	}
138 
139 	bpf_task_storage_lock();
140 	sdata = task_storage_lookup(task, map, true);
141 	bpf_task_storage_unlock();
142 	put_pid(pid);
143 	return sdata ? sdata->data : NULL;
144 out:
145 	put_pid(pid);
146 	return ERR_PTR(err);
147 }
148 
149 static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
150 					    void *value, u64 map_flags)
151 {
152 	struct bpf_local_storage_data *sdata;
153 	struct task_struct *task;
154 	unsigned int f_flags;
155 	struct pid *pid;
156 	int fd, err;
157 
158 	fd = *(int *)key;
159 	pid = pidfd_get_pid(fd, &f_flags);
160 	if (IS_ERR(pid))
161 		return PTR_ERR(pid);
162 
163 	/* We should be in an RCU read side critical section, it should be safe
164 	 * to call pid_task.
165 	 */
166 	WARN_ON_ONCE(!rcu_read_lock_held());
167 	task = pid_task(pid, PIDTYPE_PID);
168 	if (!task) {
169 		err = -ENOENT;
170 		goto out;
171 	}
172 
173 	bpf_task_storage_lock();
174 	sdata = bpf_local_storage_update(
175 		task, (struct bpf_local_storage_map *)map, value, map_flags);
176 	bpf_task_storage_unlock();
177 
178 	err = PTR_ERR_OR_ZERO(sdata);
179 out:
180 	put_pid(pid);
181 	return err;
182 }
183 
184 static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
185 {
186 	struct bpf_local_storage_data *sdata;
187 
188 	sdata = task_storage_lookup(task, map, false);
189 	if (!sdata)
190 		return -ENOENT;
191 
192 	bpf_selem_unlink(SELEM(sdata));
193 
194 	return 0;
195 }
196 
197 static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
198 {
199 	struct task_struct *task;
200 	unsigned int f_flags;
201 	struct pid *pid;
202 	int fd, err;
203 
204 	fd = *(int *)key;
205 	pid = pidfd_get_pid(fd, &f_flags);
206 	if (IS_ERR(pid))
207 		return PTR_ERR(pid);
208 
209 	/* We should be in an RCU read side critical section, it should be safe
210 	 * to call pid_task.
211 	 */
212 	WARN_ON_ONCE(!rcu_read_lock_held());
213 	task = pid_task(pid, PIDTYPE_PID);
214 	if (!task) {
215 		err = -ENOENT;
216 		goto out;
217 	}
218 
219 	bpf_task_storage_lock();
220 	err = task_storage_delete(task, map);
221 	bpf_task_storage_unlock();
222 out:
223 	put_pid(pid);
224 	return err;
225 }
226 
227 BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
228 	   task, void *, value, u64, flags)
229 {
230 	struct bpf_local_storage_data *sdata;
231 
232 	if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
233 		return (unsigned long)NULL;
234 
235 	if (!task)
236 		return (unsigned long)NULL;
237 
238 	if (!bpf_task_storage_trylock())
239 		return (unsigned long)NULL;
240 
241 	sdata = task_storage_lookup(task, map, true);
242 	if (sdata)
243 		goto unlock;
244 
245 	/* only allocate new storage, when the task is refcounted */
246 	if (refcount_read(&task->usage) &&
247 	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
248 		sdata = bpf_local_storage_update(
249 			task, (struct bpf_local_storage_map *)map, value,
250 			BPF_NOEXIST);
251 
252 unlock:
253 	bpf_task_storage_unlock();
254 	return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL :
255 		(unsigned long)sdata->data;
256 }
257 
258 BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
259 	   task)
260 {
261 	int ret;
262 
263 	if (!task)
264 		return -EINVAL;
265 
266 	if (!bpf_task_storage_trylock())
267 		return -EBUSY;
268 
269 	/* This helper must only be called from places where the lifetime of the task
270 	 * is guaranteed. Either by being refcounted or by being protected
271 	 * by an RCU read-side critical section.
272 	 */
273 	ret = task_storage_delete(task, map);
274 	bpf_task_storage_unlock();
275 	return ret;
276 }
277 
278 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
279 {
280 	return -ENOTSUPP;
281 }
282 
283 static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
284 {
285 	struct bpf_local_storage_map *smap;
286 
287 	smap = bpf_local_storage_map_alloc(attr);
288 	if (IS_ERR(smap))
289 		return ERR_CAST(smap);
290 
291 	smap->cache_idx = bpf_local_storage_cache_idx_get(&task_cache);
292 	return &smap->map;
293 }
294 
295 static void task_storage_map_free(struct bpf_map *map)
296 {
297 	struct bpf_local_storage_map *smap;
298 
299 	smap = (struct bpf_local_storage_map *)map;
300 	bpf_local_storage_cache_idx_free(&task_cache, smap->cache_idx);
301 	bpf_local_storage_map_free(smap, &bpf_task_storage_busy);
302 }
303 
304 static int task_storage_map_btf_id;
305 const struct bpf_map_ops task_storage_map_ops = {
306 	.map_meta_equal = bpf_map_meta_equal,
307 	.map_alloc_check = bpf_local_storage_map_alloc_check,
308 	.map_alloc = task_storage_map_alloc,
309 	.map_free = task_storage_map_free,
310 	.map_get_next_key = notsupp_get_next_key,
311 	.map_lookup_elem = bpf_pid_task_storage_lookup_elem,
312 	.map_update_elem = bpf_pid_task_storage_update_elem,
313 	.map_delete_elem = bpf_pid_task_storage_delete_elem,
314 	.map_check_btf = bpf_local_storage_map_check_btf,
315 	.map_btf_name = "bpf_local_storage_map",
316 	.map_btf_id = &task_storage_map_btf_id,
317 	.map_owner_storage_ptr = task_storage_ptr,
318 };
319 
320 BTF_ID_LIST_SINGLE(bpf_task_storage_btf_ids, struct, task_struct)
321 
322 const struct bpf_func_proto bpf_task_storage_get_proto = {
323 	.func = bpf_task_storage_get,
324 	.gpl_only = false,
325 	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
326 	.arg1_type = ARG_CONST_MAP_PTR,
327 	.arg2_type = ARG_PTR_TO_BTF_ID,
328 	.arg2_btf_id = &bpf_task_storage_btf_ids[0],
329 	.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
330 	.arg4_type = ARG_ANYTHING,
331 };
332 
333 const struct bpf_func_proto bpf_task_storage_delete_proto = {
334 	.func = bpf_task_storage_delete,
335 	.gpl_only = false,
336 	.ret_type = RET_INTEGER,
337 	.arg1_type = ARG_CONST_MAP_PTR,
338 	.arg2_type = ARG_PTR_TO_BTF_ID,
339 	.arg2_btf_id = &bpf_task_storage_btf_ids[0],
340 };
341