test_run.c (dcb40590e69e306030e944a39d0e4bf54247fb68) | test_run.c (b5a36b1e1b138285ea0df34bf96c759e1e30fafd) |
---|---|
1/* Copyright (c) 2017 Facebook 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7#include <linux/bpf.h> 8#include <linux/slab.h> --- 14 unchanged lines hidden (view full) --- 23 bpf_cgroup_storage_set(storage); 24 ret = BPF_PROG_RUN(prog, ctx); 25 rcu_read_unlock(); 26 preempt_enable(); 27 28 return ret; 29} 30 | 1/* Copyright (c) 2017 Facebook 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7#include <linux/bpf.h> 8#include <linux/slab.h> --- 14 unchanged lines hidden (view full) --- 23 bpf_cgroup_storage_set(storage); 24 ret = BPF_PROG_RUN(prog, ctx); 25 rcu_read_unlock(); 26 preempt_enable(); 27 28 return ret; 29} 30 |
31static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret, 32 u32 *time) | 31static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) |
33{ 34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 35 enum bpf_cgroup_storage_type stype; 36 u64 time_start, time_spent = 0; | 32{ 33 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 34 enum bpf_cgroup_storage_type stype; 35 u64 time_start, time_spent = 0; |
37 u32 i; | 36 u32 ret = 0, i; |
38 39 for_each_cgroup_storage_type(stype) { 40 storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 41 if (IS_ERR(storage[stype])) { 42 storage[stype] = NULL; 43 for_each_cgroup_storage_type(stype) 44 bpf_cgroup_storage_free(storage[stype]); 45 return -ENOMEM; 46 } 47 } 48 49 if (!repeat) 50 repeat = 1; 51 time_start = ktime_get_ns(); 52 for (i = 0; i < repeat; i++) { | 37 38 for_each_cgroup_storage_type(stype) { 39 storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 40 if (IS_ERR(storage[stype])) { 41 storage[stype] = NULL; 42 for_each_cgroup_storage_type(stype) 43 bpf_cgroup_storage_free(storage[stype]); 44 return -ENOMEM; 45 } 46 } 47 48 if (!repeat) 49 repeat = 1; 50 time_start = ktime_get_ns(); 51 for (i = 0; i < repeat; i++) { |
53 *ret = bpf_test_run_one(prog, ctx, storage); | 52 ret = bpf_test_run_one(prog, ctx, storage); |
54 if (need_resched()) { 55 if (signal_pending(current)) 56 break; 57 time_spent += ktime_get_ns() - time_start; 58 cond_resched(); 59 time_start = ktime_get_ns(); 60 } 61 } 62 time_spent += ktime_get_ns() - time_start; 63 do_div(time_spent, repeat); 64 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 65 66 for_each_cgroup_storage_type(stype) 67 bpf_cgroup_storage_free(storage[stype]); 68 | 53 if (need_resched()) { 54 if (signal_pending(current)) 55 break; 56 time_spent += ktime_get_ns() - time_start; 57 cond_resched(); 58 time_start = ktime_get_ns(); 59 } 60 } 61 time_spent += ktime_get_ns() - time_start; 62 do_div(time_spent, repeat); 63 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 64 65 for_each_cgroup_storage_type(stype) 66 bpf_cgroup_storage_free(storage[stype]); 67 |
69 return 0; | 68 return ret; |
70} 71 72static int bpf_test_finish(const union bpf_attr *kattr, 73 union bpf_attr __user *uattr, const void *data, 74 u32 size, u32 retval, u32 duration) 75{ 76 void __user *data_out = u64_to_user_ptr(kattr->test.data_out); 77 int err = -EFAULT; | 69} 70 71static int bpf_test_finish(const union bpf_attr *kattr, 72 union bpf_attr __user *uattr, const void *data, 73 u32 size, u32 retval, u32 duration) 74{ 75 void __user *data_out = u64_to_user_ptr(kattr->test.data_out); 76 int err = -EFAULT; |
77 u32 copy_size = size; |
|
78 | 78 |
79 if (data_out && copy_to_user(data_out, data, size)) | 79 /* Clamp copy if the user has provided a size hint, but copy the full 80 * buffer if not to retain old behaviour. 81 */ 82 if (kattr->test.data_size_out && 83 copy_size > kattr->test.data_size_out) { 84 copy_size = kattr->test.data_size_out; 85 err = -ENOSPC; 86 } 87 88 if (data_out && copy_to_user(data_out, data, copy_size)) |
80 goto out; 81 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) 82 goto out; 83 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 84 goto out; 85 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) 86 goto out; | 89 goto out; 90 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) 91 goto out; 92 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 93 goto out; 94 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) 95 goto out; |
87 err = 0; | 96 if (err != -ENOSPC) 97 err = 0; |
88out: 89 return err; 90} 91 92static void *bpf_test_init(const union bpf_attr *kattr, u32 size, 93 u32 headroom, u32 tailroom) 94{ 95 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); --- 65 unchanged lines hidden (view full) --- 161 __skb_put(skb, size); 162 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev); 163 skb_reset_network_header(skb); 164 165 if (is_l2) 166 __skb_push(skb, hh_len); 167 if (is_direct_pkt_access) 168 bpf_compute_data_pointers(skb); | 98out: 99 return err; 100} 101 102static void *bpf_test_init(const union bpf_attr *kattr, u32 size, 103 u32 headroom, u32 tailroom) 104{ 105 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); --- 65 unchanged lines hidden (view full) --- 171 __skb_put(skb, size); 172 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev); 173 skb_reset_network_header(skb); 174 175 if (is_l2) 176 __skb_push(skb, hh_len); 177 if (is_direct_pkt_access) 178 bpf_compute_data_pointers(skb); |
169 ret = bpf_test_run(prog, skb, repeat, &retval, &duration); 170 if (ret) { 171 kfree_skb(skb); 172 kfree(sk); 173 return ret; 174 } | 179 retval = bpf_test_run(prog, skb, repeat, &duration); |
175 if (!is_l2) { 176 if (skb_headroom(skb) < hh_len) { 177 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 178 179 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { 180 kfree_skb(skb); 181 kfree(sk); 182 return -ENOMEM; --- 30 unchanged lines hidden (view full) --- 213 xdp.data_hard_start = data; 214 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN; 215 xdp.data_meta = xdp.data; 216 xdp.data_end = xdp.data + size; 217 218 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 219 xdp.rxq = &rxqueue->xdp_rxq; 220 | 180 if (!is_l2) { 181 if (skb_headroom(skb) < hh_len) { 182 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 183 184 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { 185 kfree_skb(skb); 186 kfree(sk); 187 return -ENOMEM; --- 30 unchanged lines hidden (view full) --- 218 xdp.data_hard_start = data; 219 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN; 220 xdp.data_meta = xdp.data; 221 xdp.data_end = xdp.data + size; 222 223 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 224 xdp.rxq = &rxqueue->xdp_rxq; 225 |
221 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration); 222 if (ret) 223 goto out; | 226 retval = bpf_test_run(prog, &xdp, repeat, &duration); |
224 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || 225 xdp.data_end != xdp.data + size) 226 size = xdp.data_end - xdp.data; 227 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); | 227 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || 228 xdp.data_end != xdp.data + size) 229 size = xdp.data_end - xdp.data; 230 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); |
228out: | |
229 kfree(data); 230 return ret; 231} | 231 kfree(data); 232 return ret; 233} |