xref: /openbmc/linux/net/xdp/xskmap.c (revision 132db935)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XSKMAP used for AF_XDP sockets
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #include <linux/bpf.h>
7 #include <linux/capability.h>
8 #include <net/xdp_sock.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 
12 #include "xsk.h"
13 
14 int xsk_map_inc(struct xsk_map *map)
15 {
16 	bpf_map_inc(&map->map);
17 	return 0;
18 }
19 
20 void xsk_map_put(struct xsk_map *map)
21 {
22 	bpf_map_put(&map->map);
23 }
24 
25 static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
26 					       struct xdp_sock **map_entry)
27 {
28 	struct xsk_map_node *node;
29 	int err;
30 
31 	node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN);
32 	if (!node)
33 		return ERR_PTR(-ENOMEM);
34 
35 	err = xsk_map_inc(map);
36 	if (err) {
37 		kfree(node);
38 		return ERR_PTR(err);
39 	}
40 
41 	node->map = map;
42 	node->map_entry = map_entry;
43 	return node;
44 }
45 
46 static void xsk_map_node_free(struct xsk_map_node *node)
47 {
48 	xsk_map_put(node->map);
49 	kfree(node);
50 }
51 
52 static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
53 {
54 	spin_lock_bh(&xs->map_list_lock);
55 	list_add_tail(&node->node, &xs->map_list);
56 	spin_unlock_bh(&xs->map_list_lock);
57 }
58 
59 static void xsk_map_sock_delete(struct xdp_sock *xs,
60 				struct xdp_sock **map_entry)
61 {
62 	struct xsk_map_node *n, *tmp;
63 
64 	spin_lock_bh(&xs->map_list_lock);
65 	list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
66 		if (map_entry == n->map_entry) {
67 			list_del(&n->node);
68 			xsk_map_node_free(n);
69 		}
70 	}
71 	spin_unlock_bh(&xs->map_list_lock);
72 }
73 
74 static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
75 {
76 	struct bpf_map_memory mem;
77 	int err, numa_node;
78 	struct xsk_map *m;
79 	u64 size;
80 
81 	if (!capable(CAP_NET_ADMIN))
82 		return ERR_PTR(-EPERM);
83 
84 	if (attr->max_entries == 0 || attr->key_size != 4 ||
85 	    attr->value_size != 4 ||
86 	    attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
87 		return ERR_PTR(-EINVAL);
88 
89 	numa_node = bpf_map_attr_numa_node(attr);
90 	size = struct_size(m, xsk_map, attr->max_entries);
91 
92 	err = bpf_map_charge_init(&mem, size);
93 	if (err < 0)
94 		return ERR_PTR(err);
95 
96 	m = bpf_map_area_alloc(size, numa_node);
97 	if (!m) {
98 		bpf_map_charge_finish(&mem);
99 		return ERR_PTR(-ENOMEM);
100 	}
101 
102 	bpf_map_init_from_attr(&m->map, attr);
103 	bpf_map_charge_move(&m->map.memory, &mem);
104 	spin_lock_init(&m->lock);
105 
106 	return &m->map;
107 }
108 
109 static void xsk_map_free(struct bpf_map *map)
110 {
111 	struct xsk_map *m = container_of(map, struct xsk_map, map);
112 
113 	bpf_clear_redirect_map(map);
114 	synchronize_net();
115 	bpf_map_area_free(m);
116 }
117 
118 static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
119 {
120 	struct xsk_map *m = container_of(map, struct xsk_map, map);
121 	u32 index = key ? *(u32 *)key : U32_MAX;
122 	u32 *next = next_key;
123 
124 	if (index >= m->map.max_entries) {
125 		*next = 0;
126 		return 0;
127 	}
128 
129 	if (index == m->map.max_entries - 1)
130 		return -ENOENT;
131 	*next = index + 1;
132 	return 0;
133 }
134 
135 static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
136 {
137 	const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
138 	struct bpf_insn *insn = insn_buf;
139 
140 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
141 	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
142 	*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
143 	*insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
144 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
145 	*insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
146 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
147 	*insn++ = BPF_MOV64_IMM(ret, 0);
148 	return insn - insn_buf;
149 }
150 
151 static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
152 {
153 	WARN_ON_ONCE(!rcu_read_lock_held());
154 	return __xsk_map_lookup_elem(map, *(u32 *)key);
155 }
156 
157 static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
158 {
159 	return ERR_PTR(-EOPNOTSUPP);
160 }
161 
162 static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
163 			       u64 map_flags)
164 {
165 	struct xsk_map *m = container_of(map, struct xsk_map, map);
166 	struct xdp_sock *xs, *old_xs, **map_entry;
167 	u32 i = *(u32 *)key, fd = *(u32 *)value;
168 	struct xsk_map_node *node;
169 	struct socket *sock;
170 	int err;
171 
172 	if (unlikely(map_flags > BPF_EXIST))
173 		return -EINVAL;
174 	if (unlikely(i >= m->map.max_entries))
175 		return -E2BIG;
176 
177 	sock = sockfd_lookup(fd, &err);
178 	if (!sock)
179 		return err;
180 
181 	if (sock->sk->sk_family != PF_XDP) {
182 		sockfd_put(sock);
183 		return -EOPNOTSUPP;
184 	}
185 
186 	xs = (struct xdp_sock *)sock->sk;
187 
188 	if (!xsk_is_setup_for_bpf_map(xs)) {
189 		sockfd_put(sock);
190 		return -EOPNOTSUPP;
191 	}
192 
193 	map_entry = &m->xsk_map[i];
194 	node = xsk_map_node_alloc(m, map_entry);
195 	if (IS_ERR(node)) {
196 		sockfd_put(sock);
197 		return PTR_ERR(node);
198 	}
199 
200 	spin_lock_bh(&m->lock);
201 	old_xs = READ_ONCE(*map_entry);
202 	if (old_xs == xs) {
203 		err = 0;
204 		goto out;
205 	} else if (old_xs && map_flags == BPF_NOEXIST) {
206 		err = -EEXIST;
207 		goto out;
208 	} else if (!old_xs && map_flags == BPF_EXIST) {
209 		err = -ENOENT;
210 		goto out;
211 	}
212 	xsk_map_sock_add(xs, node);
213 	WRITE_ONCE(*map_entry, xs);
214 	if (old_xs)
215 		xsk_map_sock_delete(old_xs, map_entry);
216 	spin_unlock_bh(&m->lock);
217 	sockfd_put(sock);
218 	return 0;
219 
220 out:
221 	spin_unlock_bh(&m->lock);
222 	sockfd_put(sock);
223 	xsk_map_node_free(node);
224 	return err;
225 }
226 
227 static int xsk_map_delete_elem(struct bpf_map *map, void *key)
228 {
229 	struct xsk_map *m = container_of(map, struct xsk_map, map);
230 	struct xdp_sock *old_xs, **map_entry;
231 	int k = *(u32 *)key;
232 
233 	if (k >= map->max_entries)
234 		return -EINVAL;
235 
236 	spin_lock_bh(&m->lock);
237 	map_entry = &m->xsk_map[k];
238 	old_xs = xchg(map_entry, NULL);
239 	if (old_xs)
240 		xsk_map_sock_delete(old_xs, map_entry);
241 	spin_unlock_bh(&m->lock);
242 
243 	return 0;
244 }
245 
246 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
247 			     struct xdp_sock **map_entry)
248 {
249 	spin_lock_bh(&map->lock);
250 	if (READ_ONCE(*map_entry) == xs) {
251 		WRITE_ONCE(*map_entry, NULL);
252 		xsk_map_sock_delete(xs, map_entry);
253 	}
254 	spin_unlock_bh(&map->lock);
255 }
256 
257 const struct bpf_map_ops xsk_map_ops = {
258 	.map_alloc = xsk_map_alloc,
259 	.map_free = xsk_map_free,
260 	.map_get_next_key = xsk_map_get_next_key,
261 	.map_lookup_elem = xsk_map_lookup_elem,
262 	.map_gen_lookup = xsk_map_gen_lookup,
263 	.map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
264 	.map_update_elem = xsk_map_update_elem,
265 	.map_delete_elem = xsk_map_delete_elem,
266 	.map_check_btf = map_check_no_btf,
267 };
268