1 /* Copyright (c) 2017 Facebook 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #include <linux/bpf.h> 8 #include <linux/slab.h> 9 #include <linux/vmalloc.h> 10 #include <linux/etherdevice.h> 11 #include <linux/filter.h> 12 #include <linux/sched/signal.h> 13 #include <net/bpf_sk_storage.h> 14 #include <net/sock.h> 15 #include <net/tcp.h> 16 17 #define CREATE_TRACE_POINTS 18 #include <trace/events/bpf_test_run.h> 19 20 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, 21 u32 *retval, u32 *time) 22 { 23 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL }; 24 enum bpf_cgroup_storage_type stype; 25 u64 time_start, time_spent = 0; 26 int ret = 0; 27 u32 i; 28 29 for_each_cgroup_storage_type(stype) { 30 storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 31 if (IS_ERR(storage[stype])) { 32 storage[stype] = NULL; 33 for_each_cgroup_storage_type(stype) 34 bpf_cgroup_storage_free(storage[stype]); 35 return -ENOMEM; 36 } 37 } 38 39 if (!repeat) 40 repeat = 1; 41 42 rcu_read_lock(); 43 preempt_disable(); 44 time_start = ktime_get_ns(); 45 for (i = 0; i < repeat; i++) { 46 bpf_cgroup_storage_set(storage); 47 *retval = BPF_PROG_RUN(prog, ctx); 48 49 if (signal_pending(current)) { 50 ret = -EINTR; 51 break; 52 } 53 54 if (need_resched()) { 55 time_spent += ktime_get_ns() - time_start; 56 preempt_enable(); 57 rcu_read_unlock(); 58 59 cond_resched(); 60 61 rcu_read_lock(); 62 preempt_disable(); 63 time_start = ktime_get_ns(); 64 } 65 } 66 time_spent += ktime_get_ns() - time_start; 67 preempt_enable(); 68 rcu_read_unlock(); 69 70 do_div(time_spent, repeat); 71 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 72 73 for_each_cgroup_storage_type(stype) 74 bpf_cgroup_storage_free(storage[stype]); 75 76 return ret; 77 } 78 79 static int bpf_test_finish(const union bpf_attr *kattr, 80 union bpf_attr __user *uattr, const void *data, 81 u32 size, u32 retval, u32 duration) 82 { 83 void __user *data_out = u64_to_user_ptr(kattr->test.data_out); 84 int err = -EFAULT; 85 u32 copy_size = size; 86 87 /* Clamp copy if the user has provided a size hint, but copy the full 88 * buffer if not to retain old behaviour. 89 */ 90 if (kattr->test.data_size_out && 91 copy_size > kattr->test.data_size_out) { 92 copy_size = kattr->test.data_size_out; 93 err = -ENOSPC; 94 } 95 96 if (data_out && copy_to_user(data_out, data, copy_size)) 97 goto out; 98 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) 99 goto out; 100 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 101 goto out; 102 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) 103 goto out; 104 if (err != -ENOSPC) 105 err = 0; 106 out: 107 trace_bpf_test_finish(&err); 108 return err; 109 } 110 111 static void *bpf_test_init(const union bpf_attr *kattr, u32 size, 112 u32 headroom, u32 tailroom) 113 { 114 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 115 void *data; 116 117 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) 118 return ERR_PTR(-EINVAL); 119 120 data = kzalloc(size + headroom + tailroom, GFP_USER); 121 if (!data) 122 return ERR_PTR(-ENOMEM); 123 124 if (copy_from_user(data + headroom, data_in, size)) { 125 kfree(data); 126 return ERR_PTR(-EFAULT); 127 } 128 return data; 129 } 130 131 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) 132 { 133 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); 134 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 135 u32 size = kattr->test.ctx_size_in; 136 void *data; 137 int err; 138 139 if (!data_in && !data_out) 140 return NULL; 141 142 data = kzalloc(max_size, GFP_USER); 143 if (!data) 144 return ERR_PTR(-ENOMEM); 145 146 if (data_in) { 147 err = bpf_check_uarg_tail_zero(data_in, max_size, size); 148 if (err) { 149 kfree(data); 150 return ERR_PTR(err); 151 } 152 153 size = min_t(u32, max_size, size); 154 if (copy_from_user(data, data_in, size)) { 155 kfree(data); 156 return ERR_PTR(-EFAULT); 157 } 158 } 159 return data; 160 } 161 162 static int bpf_ctx_finish(const union bpf_attr *kattr, 163 union bpf_attr __user *uattr, const void *data, 164 u32 size) 165 { 166 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); 167 int err = -EFAULT; 168 u32 copy_size = size; 169 170 if (!data || !data_out) 171 return 0; 172 173 if (copy_size > kattr->test.ctx_size_out) { 174 copy_size = kattr->test.ctx_size_out; 175 err = -ENOSPC; 176 } 177 178 if (copy_to_user(data_out, data, copy_size)) 179 goto out; 180 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) 181 goto out; 182 if (err != -ENOSPC) 183 err = 0; 184 out: 185 return err; 186 } 187 188 /** 189 * range_is_zero - test whether buffer is initialized 190 * @buf: buffer to check 191 * @from: check from this position 192 * @to: check up until (excluding) this position 193 * 194 * This function returns true if the there is a non-zero byte 195 * in the buf in the range [from,to). 196 */ 197 static inline bool range_is_zero(void *buf, size_t from, size_t to) 198 { 199 return !memchr_inv((u8 *)buf + from, 0, to - from); 200 } 201 202 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) 203 { 204 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 205 206 if (!__skb) 207 return 0; 208 209 /* make sure the fields we don't use are zeroed */ 210 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, priority))) 211 return -EINVAL; 212 213 /* priority is allowed */ 214 215 if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) + 216 FIELD_SIZEOF(struct __sk_buff, priority), 217 offsetof(struct __sk_buff, cb))) 218 return -EINVAL; 219 220 /* cb is allowed */ 221 222 if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) + 223 FIELD_SIZEOF(struct __sk_buff, cb), 224 sizeof(struct __sk_buff))) 225 return -EINVAL; 226 227 skb->priority = __skb->priority; 228 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); 229 230 return 0; 231 } 232 233 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) 234 { 235 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; 236 237 if (!__skb) 238 return; 239 240 __skb->priority = skb->priority; 241 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); 242 } 243 244 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 245 union bpf_attr __user *uattr) 246 { 247 bool is_l2 = false, is_direct_pkt_access = false; 248 u32 size = kattr->test.data_size_in; 249 u32 repeat = kattr->test.repeat; 250 struct __sk_buff *ctx = NULL; 251 u32 retval, duration; 252 int hh_len = ETH_HLEN; 253 struct sk_buff *skb; 254 struct sock *sk; 255 void *data; 256 int ret; 257 258 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, 259 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 260 if (IS_ERR(data)) 261 return PTR_ERR(data); 262 263 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); 264 if (IS_ERR(ctx)) { 265 kfree(data); 266 return PTR_ERR(ctx); 267 } 268 269 switch (prog->type) { 270 case BPF_PROG_TYPE_SCHED_CLS: 271 case BPF_PROG_TYPE_SCHED_ACT: 272 is_l2 = true; 273 /* fall through */ 274 case BPF_PROG_TYPE_LWT_IN: 275 case BPF_PROG_TYPE_LWT_OUT: 276 case BPF_PROG_TYPE_LWT_XMIT: 277 is_direct_pkt_access = true; 278 break; 279 default: 280 break; 281 } 282 283 sk = kzalloc(sizeof(struct sock), GFP_USER); 284 if (!sk) { 285 kfree(data); 286 kfree(ctx); 287 return -ENOMEM; 288 } 289 sock_net_set(sk, current->nsproxy->net_ns); 290 sock_init_data(NULL, sk); 291 292 skb = build_skb(data, 0); 293 if (!skb) { 294 kfree(data); 295 kfree(ctx); 296 kfree(sk); 297 return -ENOMEM; 298 } 299 skb->sk = sk; 300 301 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 302 __skb_put(skb, size); 303 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev); 304 skb_reset_network_header(skb); 305 306 if (is_l2) 307 __skb_push(skb, hh_len); 308 if (is_direct_pkt_access) 309 bpf_compute_data_pointers(skb); 310 ret = convert___skb_to_skb(skb, ctx); 311 if (ret) 312 goto out; 313 ret = bpf_test_run(prog, skb, repeat, &retval, &duration); 314 if (ret) 315 goto out; 316 if (!is_l2) { 317 if (skb_headroom(skb) < hh_len) { 318 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 319 320 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { 321 ret = -ENOMEM; 322 goto out; 323 } 324 } 325 memset(__skb_push(skb, hh_len), 0, hh_len); 326 } 327 convert_skb_to___skb(skb, ctx); 328 329 size = skb->len; 330 /* bpf program can never convert linear skb to non-linear */ 331 if (WARN_ON_ONCE(skb_is_nonlinear(skb))) 332 size = skb_headlen(skb); 333 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration); 334 if (!ret) 335 ret = bpf_ctx_finish(kattr, uattr, ctx, 336 sizeof(struct __sk_buff)); 337 out: 338 kfree_skb(skb); 339 bpf_sk_storage_free(sk); 340 kfree(sk); 341 kfree(ctx); 342 return ret; 343 } 344 345 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 346 union bpf_attr __user *uattr) 347 { 348 u32 size = kattr->test.data_size_in; 349 u32 repeat = kattr->test.repeat; 350 struct netdev_rx_queue *rxqueue; 351 struct xdp_buff xdp = {}; 352 u32 retval, duration; 353 void *data; 354 int ret; 355 356 if (kattr->test.ctx_in || kattr->test.ctx_out) 357 return -EINVAL; 358 359 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0); 360 if (IS_ERR(data)) 361 return PTR_ERR(data); 362 363 xdp.data_hard_start = data; 364 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN; 365 xdp.data_meta = xdp.data; 366 xdp.data_end = xdp.data + size; 367 368 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 369 xdp.rxq = &rxqueue->xdp_rxq; 370 371 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration); 372 if (ret) 373 goto out; 374 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || 375 xdp.data_end != xdp.data + size) 376 size = xdp.data_end - xdp.data; 377 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); 378 out: 379 kfree(data); 380 return ret; 381 } 382 383 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, 384 const union bpf_attr *kattr, 385 union bpf_attr __user *uattr) 386 { 387 u32 size = kattr->test.data_size_in; 388 struct bpf_flow_dissector ctx = {}; 389 u32 repeat = kattr->test.repeat; 390 struct bpf_flow_keys flow_keys; 391 u64 time_start, time_spent = 0; 392 const struct ethhdr *eth; 393 u32 retval, duration; 394 void *data; 395 int ret; 396 u32 i; 397 398 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) 399 return -EINVAL; 400 401 if (kattr->test.ctx_in || kattr->test.ctx_out) 402 return -EINVAL; 403 404 if (size < ETH_HLEN) 405 return -EINVAL; 406 407 data = bpf_test_init(kattr, size, 0, 0); 408 if (IS_ERR(data)) 409 return PTR_ERR(data); 410 411 eth = (struct ethhdr *)data; 412 413 if (!repeat) 414 repeat = 1; 415 416 ctx.flow_keys = &flow_keys; 417 ctx.data = data; 418 ctx.data_end = (__u8 *)data + size; 419 420 rcu_read_lock(); 421 preempt_disable(); 422 time_start = ktime_get_ns(); 423 for (i = 0; i < repeat; i++) { 424 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, 425 size); 426 427 if (signal_pending(current)) { 428 preempt_enable(); 429 rcu_read_unlock(); 430 431 ret = -EINTR; 432 goto out; 433 } 434 435 if (need_resched()) { 436 time_spent += ktime_get_ns() - time_start; 437 preempt_enable(); 438 rcu_read_unlock(); 439 440 cond_resched(); 441 442 rcu_read_lock(); 443 preempt_disable(); 444 time_start = ktime_get_ns(); 445 } 446 } 447 time_spent += ktime_get_ns() - time_start; 448 preempt_enable(); 449 rcu_read_unlock(); 450 451 do_div(time_spent, repeat); 452 duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 453 454 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), 455 retval, duration); 456 457 out: 458 kfree(data); 459 return ret; 460 } 461