1 /* Copyright (c) 2017 Facebook 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #include <linux/bpf.h> 8 #include <linux/slab.h> 9 #include <linux/vmalloc.h> 10 #include <linux/etherdevice.h> 11 #include <linux/filter.h> 12 #include <linux/sched/signal.h> 13 14 static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, 15 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) 16 { 17 u32 ret; 18 19 preempt_disable(); 20 rcu_read_lock(); 21 bpf_cgroup_storage_set(storage); 22 ret = BPF_PROG_RUN(prog, ctx); 23 rcu_read_unlock(); 24 preempt_enable(); 25 26 return ret; 27 } 28 29 static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) 30 { 31 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 32 enum bpf_cgroup_storage_type stype; 33 u64 time_start, time_spent = 0; 34 u32 ret = 0, i; 35 36 for_each_cgroup_storage_type(stype) { 37 storage[stype] = bpf_cgroup_storage_alloc(prog, stype); 38 if (IS_ERR(storage[stype])) { 39 storage[stype] = NULL; 40 for_each_cgroup_storage_type(stype) 41 bpf_cgroup_storage_free(storage[stype]); 42 return -ENOMEM; 43 } 44 } 45 46 if (!repeat) 47 repeat = 1; 48 time_start = ktime_get_ns(); 49 for (i = 0; i < repeat; i++) { 50 ret = bpf_test_run_one(prog, ctx, storage); 51 if (need_resched()) { 52 if (signal_pending(current)) 53 break; 54 time_spent += ktime_get_ns() - time_start; 55 cond_resched(); 56 time_start = ktime_get_ns(); 57 } 58 } 59 time_spent += ktime_get_ns() - time_start; 60 do_div(time_spent, repeat); 61 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 62 63 for_each_cgroup_storage_type(stype) 64 bpf_cgroup_storage_free(storage[stype]); 65 66 return ret; 67 } 68 69 static int bpf_test_finish(const union bpf_attr *kattr, 70 union bpf_attr __user *uattr, const void *data, 71 u32 size, u32 retval, u32 duration) 72 { 73 void __user *data_out = u64_to_user_ptr(kattr->test.data_out); 74 int err = -EFAULT; 75 76 if (data_out && copy_to_user(data_out, data, size)) 77 goto out; 78 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) 79 goto out; 80 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) 81 goto out; 82 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) 83 goto out; 84 err = 0; 85 out: 86 return err; 87 } 88 89 static void *bpf_test_init(const union bpf_attr *kattr, u32 size, 90 u32 headroom, u32 tailroom) 91 { 92 void __user *data_in = u64_to_user_ptr(kattr->test.data_in); 93 void *data; 94 95 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) 96 return ERR_PTR(-EINVAL); 97 98 data = kzalloc(size + headroom + tailroom, GFP_USER); 99 if (!data) 100 return ERR_PTR(-ENOMEM); 101 102 if (copy_from_user(data + headroom, data_in, size)) { 103 kfree(data); 104 return ERR_PTR(-EFAULT); 105 } 106 return data; 107 } 108 109 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 110 union bpf_attr __user *uattr) 111 { 112 bool is_l2 = false, is_direct_pkt_access = false; 113 u32 size = kattr->test.data_size_in; 114 u32 repeat = kattr->test.repeat; 115 u32 retval, duration; 116 int hh_len = ETH_HLEN; 117 struct sk_buff *skb; 118 void *data; 119 int ret; 120 121 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, 122 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 123 if (IS_ERR(data)) 124 return PTR_ERR(data); 125 126 switch (prog->type) { 127 case BPF_PROG_TYPE_SCHED_CLS: 128 case BPF_PROG_TYPE_SCHED_ACT: 129 is_l2 = true; 130 /* fall through */ 131 case BPF_PROG_TYPE_LWT_IN: 132 case BPF_PROG_TYPE_LWT_OUT: 133 case BPF_PROG_TYPE_LWT_XMIT: 134 is_direct_pkt_access = true; 135 break; 136 default: 137 break; 138 } 139 140 skb = build_skb(data, 0); 141 if (!skb) { 142 kfree(data); 143 return -ENOMEM; 144 } 145 146 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 147 __skb_put(skb, size); 148 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev); 149 skb_reset_network_header(skb); 150 151 if (is_l2) 152 __skb_push(skb, hh_len); 153 if (is_direct_pkt_access) 154 bpf_compute_data_pointers(skb); 155 retval = bpf_test_run(prog, skb, repeat, &duration); 156 if (!is_l2) { 157 if (skb_headroom(skb) < hh_len) { 158 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); 159 160 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { 161 kfree_skb(skb); 162 return -ENOMEM; 163 } 164 } 165 memset(__skb_push(skb, hh_len), 0, hh_len); 166 } 167 168 size = skb->len; 169 /* bpf program can never convert linear skb to non-linear */ 170 if (WARN_ON_ONCE(skb_is_nonlinear(skb))) 171 size = skb_headlen(skb); 172 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration); 173 kfree_skb(skb); 174 return ret; 175 } 176 177 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 178 union bpf_attr __user *uattr) 179 { 180 u32 size = kattr->test.data_size_in; 181 u32 repeat = kattr->test.repeat; 182 struct netdev_rx_queue *rxqueue; 183 struct xdp_buff xdp = {}; 184 u32 retval, duration; 185 void *data; 186 int ret; 187 188 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0); 189 if (IS_ERR(data)) 190 return PTR_ERR(data); 191 192 xdp.data_hard_start = data; 193 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN; 194 xdp.data_meta = xdp.data; 195 xdp.data_end = xdp.data + size; 196 197 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); 198 xdp.rxq = &rxqueue->xdp_rxq; 199 200 retval = bpf_test_run(prog, &xdp, repeat, &duration); 201 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || 202 xdp.data_end != xdp.data + size) 203 size = xdp.data_end - xdp.data; 204 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); 205 kfree(data); 206 return ret; 207 } 208