xref: /openbmc/linux/net/bpf/test_run.c (revision 71b91a506bb05f9aef3acd57af2e835d85721942)
11cf1cae9SAlexei Starovoitov /* Copyright (c) 2017 Facebook
21cf1cae9SAlexei Starovoitov  *
31cf1cae9SAlexei Starovoitov  * This program is free software; you can redistribute it and/or
41cf1cae9SAlexei Starovoitov  * modify it under the terms of version 2 of the GNU General Public
51cf1cae9SAlexei Starovoitov  * License as published by the Free Software Foundation.
61cf1cae9SAlexei Starovoitov  */
71cf1cae9SAlexei Starovoitov #include <linux/bpf.h>
81cf1cae9SAlexei Starovoitov #include <linux/slab.h>
91cf1cae9SAlexei Starovoitov #include <linux/vmalloc.h>
101cf1cae9SAlexei Starovoitov #include <linux/etherdevice.h>
111cf1cae9SAlexei Starovoitov #include <linux/filter.h>
121cf1cae9SAlexei Starovoitov #include <linux/sched/signal.h>
132cb494a3SSong Liu #include <net/sock.h>
142cb494a3SSong Liu #include <net/tcp.h>
151cf1cae9SAlexei Starovoitov 
16df1a2cb7SStanislav Fomichev static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17df1a2cb7SStanislav Fomichev 			u32 *retval, u32 *time)
181cf1cae9SAlexei Starovoitov {
19*71b91a50SBo YU 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
208bad74f9SRoman Gushchin 	enum bpf_cgroup_storage_type stype;
211cf1cae9SAlexei Starovoitov 	u64 time_start, time_spent = 0;
22df1a2cb7SStanislav Fomichev 	int ret = 0;
23dcb40590SRoman Gushchin 	u32 i;
241cf1cae9SAlexei Starovoitov 
258bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype) {
268bad74f9SRoman Gushchin 		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
278bad74f9SRoman Gushchin 		if (IS_ERR(storage[stype])) {
288bad74f9SRoman Gushchin 			storage[stype] = NULL;
298bad74f9SRoman Gushchin 			for_each_cgroup_storage_type(stype)
308bad74f9SRoman Gushchin 				bpf_cgroup_storage_free(storage[stype]);
318bad74f9SRoman Gushchin 			return -ENOMEM;
328bad74f9SRoman Gushchin 		}
338bad74f9SRoman Gushchin 	}
34f42ee093SRoman Gushchin 
351cf1cae9SAlexei Starovoitov 	if (!repeat)
361cf1cae9SAlexei Starovoitov 		repeat = 1;
37df1a2cb7SStanislav Fomichev 
38df1a2cb7SStanislav Fomichev 	rcu_read_lock();
39df1a2cb7SStanislav Fomichev 	preempt_disable();
401cf1cae9SAlexei Starovoitov 	time_start = ktime_get_ns();
411cf1cae9SAlexei Starovoitov 	for (i = 0; i < repeat; i++) {
42df1a2cb7SStanislav Fomichev 		bpf_cgroup_storage_set(storage);
43df1a2cb7SStanislav Fomichev 		*retval = BPF_PROG_RUN(prog, ctx);
44df1a2cb7SStanislav Fomichev 
45df1a2cb7SStanislav Fomichev 		if (signal_pending(current)) {
46df1a2cb7SStanislav Fomichev 			ret = -EINTR;
471cf1cae9SAlexei Starovoitov 			break;
48df1a2cb7SStanislav Fomichev 		}
49df1a2cb7SStanislav Fomichev 
50df1a2cb7SStanislav Fomichev 		if (need_resched()) {
511cf1cae9SAlexei Starovoitov 			time_spent += ktime_get_ns() - time_start;
52df1a2cb7SStanislav Fomichev 			preempt_enable();
53df1a2cb7SStanislav Fomichev 			rcu_read_unlock();
54df1a2cb7SStanislav Fomichev 
551cf1cae9SAlexei Starovoitov 			cond_resched();
56df1a2cb7SStanislav Fomichev 
57df1a2cb7SStanislav Fomichev 			rcu_read_lock();
58df1a2cb7SStanislav Fomichev 			preempt_disable();
591cf1cae9SAlexei Starovoitov 			time_start = ktime_get_ns();
601cf1cae9SAlexei Starovoitov 		}
611cf1cae9SAlexei Starovoitov 	}
621cf1cae9SAlexei Starovoitov 	time_spent += ktime_get_ns() - time_start;
63df1a2cb7SStanislav Fomichev 	preempt_enable();
64df1a2cb7SStanislav Fomichev 	rcu_read_unlock();
65df1a2cb7SStanislav Fomichev 
661cf1cae9SAlexei Starovoitov 	do_div(time_spent, repeat);
671cf1cae9SAlexei Starovoitov 	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
681cf1cae9SAlexei Starovoitov 
698bad74f9SRoman Gushchin 	for_each_cgroup_storage_type(stype)
708bad74f9SRoman Gushchin 		bpf_cgroup_storage_free(storage[stype]);
71f42ee093SRoman Gushchin 
72df1a2cb7SStanislav Fomichev 	return ret;
731cf1cae9SAlexei Starovoitov }
741cf1cae9SAlexei Starovoitov 
7578e52272SDavid Miller static int bpf_test_finish(const union bpf_attr *kattr,
7678e52272SDavid Miller 			   union bpf_attr __user *uattr, const void *data,
771cf1cae9SAlexei Starovoitov 			   u32 size, u32 retval, u32 duration)
781cf1cae9SAlexei Starovoitov {
7978e52272SDavid Miller 	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
801cf1cae9SAlexei Starovoitov 	int err = -EFAULT;
81b5a36b1eSLorenz Bauer 	u32 copy_size = size;
821cf1cae9SAlexei Starovoitov 
83b5a36b1eSLorenz Bauer 	/* Clamp copy if the user has provided a size hint, but copy the full
84b5a36b1eSLorenz Bauer 	 * buffer if not to retain old behaviour.
85b5a36b1eSLorenz Bauer 	 */
86b5a36b1eSLorenz Bauer 	if (kattr->test.data_size_out &&
87b5a36b1eSLorenz Bauer 	    copy_size > kattr->test.data_size_out) {
88b5a36b1eSLorenz Bauer 		copy_size = kattr->test.data_size_out;
89b5a36b1eSLorenz Bauer 		err = -ENOSPC;
90b5a36b1eSLorenz Bauer 	}
91b5a36b1eSLorenz Bauer 
92b5a36b1eSLorenz Bauer 	if (data_out && copy_to_user(data_out, data, copy_size))
931cf1cae9SAlexei Starovoitov 		goto out;
941cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
951cf1cae9SAlexei Starovoitov 		goto out;
961cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
971cf1cae9SAlexei Starovoitov 		goto out;
981cf1cae9SAlexei Starovoitov 	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
991cf1cae9SAlexei Starovoitov 		goto out;
100b5a36b1eSLorenz Bauer 	if (err != -ENOSPC)
1011cf1cae9SAlexei Starovoitov 		err = 0;
1021cf1cae9SAlexei Starovoitov out:
1031cf1cae9SAlexei Starovoitov 	return err;
1041cf1cae9SAlexei Starovoitov }
1051cf1cae9SAlexei Starovoitov 
1061cf1cae9SAlexei Starovoitov static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
1071cf1cae9SAlexei Starovoitov 			   u32 headroom, u32 tailroom)
1081cf1cae9SAlexei Starovoitov {
1091cf1cae9SAlexei Starovoitov 	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1101cf1cae9SAlexei Starovoitov 	void *data;
1111cf1cae9SAlexei Starovoitov 
1121cf1cae9SAlexei Starovoitov 	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
1131cf1cae9SAlexei Starovoitov 		return ERR_PTR(-EINVAL);
1141cf1cae9SAlexei Starovoitov 
1151cf1cae9SAlexei Starovoitov 	data = kzalloc(size + headroom + tailroom, GFP_USER);
1161cf1cae9SAlexei Starovoitov 	if (!data)
1171cf1cae9SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
1181cf1cae9SAlexei Starovoitov 
1191cf1cae9SAlexei Starovoitov 	if (copy_from_user(data + headroom, data_in, size)) {
1201cf1cae9SAlexei Starovoitov 		kfree(data);
1211cf1cae9SAlexei Starovoitov 		return ERR_PTR(-EFAULT);
1221cf1cae9SAlexei Starovoitov 	}
1231cf1cae9SAlexei Starovoitov 	return data;
1241cf1cae9SAlexei Starovoitov }
1251cf1cae9SAlexei Starovoitov 
1261cf1cae9SAlexei Starovoitov int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1271cf1cae9SAlexei Starovoitov 			  union bpf_attr __user *uattr)
1281cf1cae9SAlexei Starovoitov {
1291cf1cae9SAlexei Starovoitov 	bool is_l2 = false, is_direct_pkt_access = false;
1301cf1cae9SAlexei Starovoitov 	u32 size = kattr->test.data_size_in;
1311cf1cae9SAlexei Starovoitov 	u32 repeat = kattr->test.repeat;
1321cf1cae9SAlexei Starovoitov 	u32 retval, duration;
1336e6fddc7SDaniel Borkmann 	int hh_len = ETH_HLEN;
1341cf1cae9SAlexei Starovoitov 	struct sk_buff *skb;
1352cb494a3SSong Liu 	struct sock *sk;
1361cf1cae9SAlexei Starovoitov 	void *data;
1371cf1cae9SAlexei Starovoitov 	int ret;
1381cf1cae9SAlexei Starovoitov 
139586f8525SDavid Miller 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
1401cf1cae9SAlexei Starovoitov 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1411cf1cae9SAlexei Starovoitov 	if (IS_ERR(data))
1421cf1cae9SAlexei Starovoitov 		return PTR_ERR(data);
1431cf1cae9SAlexei Starovoitov 
1441cf1cae9SAlexei Starovoitov 	switch (prog->type) {
1451cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_SCHED_CLS:
1461cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_SCHED_ACT:
1471cf1cae9SAlexei Starovoitov 		is_l2 = true;
1481cf1cae9SAlexei Starovoitov 		/* fall through */
1491cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_IN:
1501cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_OUT:
1511cf1cae9SAlexei Starovoitov 	case BPF_PROG_TYPE_LWT_XMIT:
1521cf1cae9SAlexei Starovoitov 		is_direct_pkt_access = true;
1531cf1cae9SAlexei Starovoitov 		break;
1541cf1cae9SAlexei Starovoitov 	default:
1551cf1cae9SAlexei Starovoitov 		break;
1561cf1cae9SAlexei Starovoitov 	}
1571cf1cae9SAlexei Starovoitov 
1582cb494a3SSong Liu 	sk = kzalloc(sizeof(struct sock), GFP_USER);
1592cb494a3SSong Liu 	if (!sk) {
1601cf1cae9SAlexei Starovoitov 		kfree(data);
1611cf1cae9SAlexei Starovoitov 		return -ENOMEM;
1621cf1cae9SAlexei Starovoitov 	}
1632cb494a3SSong Liu 	sock_net_set(sk, current->nsproxy->net_ns);
1642cb494a3SSong Liu 	sock_init_data(NULL, sk);
1652cb494a3SSong Liu 
1662cb494a3SSong Liu 	skb = build_skb(data, 0);
1672cb494a3SSong Liu 	if (!skb) {
1682cb494a3SSong Liu 		kfree(data);
1692cb494a3SSong Liu 		kfree(sk);
1702cb494a3SSong Liu 		return -ENOMEM;
1712cb494a3SSong Liu 	}
1722cb494a3SSong Liu 	skb->sk = sk;
1731cf1cae9SAlexei Starovoitov 
174586f8525SDavid Miller 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1751cf1cae9SAlexei Starovoitov 	__skb_put(skb, size);
1761cf1cae9SAlexei Starovoitov 	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
1771cf1cae9SAlexei Starovoitov 	skb_reset_network_header(skb);
1781cf1cae9SAlexei Starovoitov 
1791cf1cae9SAlexei Starovoitov 	if (is_l2)
1806e6fddc7SDaniel Borkmann 		__skb_push(skb, hh_len);
1811cf1cae9SAlexei Starovoitov 	if (is_direct_pkt_access)
1826aaae2b6SDaniel Borkmann 		bpf_compute_data_pointers(skb);
183dcb40590SRoman Gushchin 	ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
184dcb40590SRoman Gushchin 	if (ret) {
185dcb40590SRoman Gushchin 		kfree_skb(skb);
186dcb40590SRoman Gushchin 		kfree(sk);
187dcb40590SRoman Gushchin 		return ret;
188dcb40590SRoman Gushchin 	}
1896e6fddc7SDaniel Borkmann 	if (!is_l2) {
1906e6fddc7SDaniel Borkmann 		if (skb_headroom(skb) < hh_len) {
1916e6fddc7SDaniel Borkmann 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1926e6fddc7SDaniel Borkmann 
1936e6fddc7SDaniel Borkmann 			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1946e6fddc7SDaniel Borkmann 				kfree_skb(skb);
1952cb494a3SSong Liu 				kfree(sk);
1966e6fddc7SDaniel Borkmann 				return -ENOMEM;
1976e6fddc7SDaniel Borkmann 			}
1986e6fddc7SDaniel Borkmann 		}
1996e6fddc7SDaniel Borkmann 		memset(__skb_push(skb, hh_len), 0, hh_len);
2006e6fddc7SDaniel Borkmann 	}
2016e6fddc7SDaniel Borkmann 
2021cf1cae9SAlexei Starovoitov 	size = skb->len;
2031cf1cae9SAlexei Starovoitov 	/* bpf program can never convert linear skb to non-linear */
2041cf1cae9SAlexei Starovoitov 	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
2051cf1cae9SAlexei Starovoitov 		size = skb_headlen(skb);
20678e52272SDavid Miller 	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
2071cf1cae9SAlexei Starovoitov 	kfree_skb(skb);
2082cb494a3SSong Liu 	kfree(sk);
2091cf1cae9SAlexei Starovoitov 	return ret;
2101cf1cae9SAlexei Starovoitov }
2111cf1cae9SAlexei Starovoitov 
2121cf1cae9SAlexei Starovoitov int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
2131cf1cae9SAlexei Starovoitov 			  union bpf_attr __user *uattr)
2141cf1cae9SAlexei Starovoitov {
2151cf1cae9SAlexei Starovoitov 	u32 size = kattr->test.data_size_in;
2161cf1cae9SAlexei Starovoitov 	u32 repeat = kattr->test.repeat;
21765073a67SDaniel Borkmann 	struct netdev_rx_queue *rxqueue;
2181cf1cae9SAlexei Starovoitov 	struct xdp_buff xdp = {};
2191cf1cae9SAlexei Starovoitov 	u32 retval, duration;
2201cf1cae9SAlexei Starovoitov 	void *data;
2211cf1cae9SAlexei Starovoitov 	int ret;
2221cf1cae9SAlexei Starovoitov 
223586f8525SDavid Miller 	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
2241cf1cae9SAlexei Starovoitov 	if (IS_ERR(data))
2251cf1cae9SAlexei Starovoitov 		return PTR_ERR(data);
2261cf1cae9SAlexei Starovoitov 
2271cf1cae9SAlexei Starovoitov 	xdp.data_hard_start = data;
228586f8525SDavid Miller 	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
229de8f3a83SDaniel Borkmann 	xdp.data_meta = xdp.data;
2301cf1cae9SAlexei Starovoitov 	xdp.data_end = xdp.data + size;
2311cf1cae9SAlexei Starovoitov 
23265073a67SDaniel Borkmann 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
23365073a67SDaniel Borkmann 	xdp.rxq = &rxqueue->xdp_rxq;
23465073a67SDaniel Borkmann 
235dcb40590SRoman Gushchin 	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
236dcb40590SRoman Gushchin 	if (ret)
237dcb40590SRoman Gushchin 		goto out;
238587b80ccSNikita V. Shirokov 	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
239587b80ccSNikita V. Shirokov 	    xdp.data_end != xdp.data + size)
2401cf1cae9SAlexei Starovoitov 		size = xdp.data_end - xdp.data;
24178e52272SDavid Miller 	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
242dcb40590SRoman Gushchin out:
2431cf1cae9SAlexei Starovoitov 	kfree(data);
2441cf1cae9SAlexei Starovoitov 	return ret;
2451cf1cae9SAlexei Starovoitov }
246b7a1848eSStanislav Fomichev 
247b7a1848eSStanislav Fomichev int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
248b7a1848eSStanislav Fomichev 				     const union bpf_attr *kattr,
249b7a1848eSStanislav Fomichev 				     union bpf_attr __user *uattr)
250b7a1848eSStanislav Fomichev {
251b7a1848eSStanislav Fomichev 	u32 size = kattr->test.data_size_in;
252b7a1848eSStanislav Fomichev 	u32 repeat = kattr->test.repeat;
253b7a1848eSStanislav Fomichev 	struct bpf_flow_keys flow_keys;
254b7a1848eSStanislav Fomichev 	u64 time_start, time_spent = 0;
255b7a1848eSStanislav Fomichev 	struct bpf_skb_data_end *cb;
256b7a1848eSStanislav Fomichev 	u32 retval, duration;
257b7a1848eSStanislav Fomichev 	struct sk_buff *skb;
258b7a1848eSStanislav Fomichev 	struct sock *sk;
259b7a1848eSStanislav Fomichev 	void *data;
260b7a1848eSStanislav Fomichev 	int ret;
261b7a1848eSStanislav Fomichev 	u32 i;
262b7a1848eSStanislav Fomichev 
263b7a1848eSStanislav Fomichev 	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
264b7a1848eSStanislav Fomichev 		return -EINVAL;
265b7a1848eSStanislav Fomichev 
266b7a1848eSStanislav Fomichev 	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
267b7a1848eSStanislav Fomichev 			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
268b7a1848eSStanislav Fomichev 	if (IS_ERR(data))
269b7a1848eSStanislav Fomichev 		return PTR_ERR(data);
270b7a1848eSStanislav Fomichev 
271b7a1848eSStanislav Fomichev 	sk = kzalloc(sizeof(*sk), GFP_USER);
272b7a1848eSStanislav Fomichev 	if (!sk) {
273b7a1848eSStanislav Fomichev 		kfree(data);
274b7a1848eSStanislav Fomichev 		return -ENOMEM;
275b7a1848eSStanislav Fomichev 	}
276b7a1848eSStanislav Fomichev 	sock_net_set(sk, current->nsproxy->net_ns);
277b7a1848eSStanislav Fomichev 	sock_init_data(NULL, sk);
278b7a1848eSStanislav Fomichev 
279b7a1848eSStanislav Fomichev 	skb = build_skb(data, 0);
280b7a1848eSStanislav Fomichev 	if (!skb) {
281b7a1848eSStanislav Fomichev 		kfree(data);
282b7a1848eSStanislav Fomichev 		kfree(sk);
283b7a1848eSStanislav Fomichev 		return -ENOMEM;
284b7a1848eSStanislav Fomichev 	}
285b7a1848eSStanislav Fomichev 	skb->sk = sk;
286b7a1848eSStanislav Fomichev 
287b7a1848eSStanislav Fomichev 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
288b7a1848eSStanislav Fomichev 	__skb_put(skb, size);
289b7a1848eSStanislav Fomichev 	skb->protocol = eth_type_trans(skb,
290b7a1848eSStanislav Fomichev 				       current->nsproxy->net_ns->loopback_dev);
291b7a1848eSStanislav Fomichev 	skb_reset_network_header(skb);
292b7a1848eSStanislav Fomichev 
293b7a1848eSStanislav Fomichev 	cb = (struct bpf_skb_data_end *)skb->cb;
294b7a1848eSStanislav Fomichev 	cb->qdisc_cb.flow_keys = &flow_keys;
295b7a1848eSStanislav Fomichev 
296b7a1848eSStanislav Fomichev 	if (!repeat)
297b7a1848eSStanislav Fomichev 		repeat = 1;
298b7a1848eSStanislav Fomichev 
299a439184dSStanislav Fomichev 	rcu_read_lock();
300a439184dSStanislav Fomichev 	preempt_disable();
301b7a1848eSStanislav Fomichev 	time_start = ktime_get_ns();
302b7a1848eSStanislav Fomichev 	for (i = 0; i < repeat; i++) {
303b7a1848eSStanislav Fomichev 		retval = __skb_flow_bpf_dissect(prog, skb,
304b7a1848eSStanislav Fomichev 						&flow_keys_dissector,
305b7a1848eSStanislav Fomichev 						&flow_keys);
306a439184dSStanislav Fomichev 
307a439184dSStanislav Fomichev 		if (signal_pending(current)) {
308b7a1848eSStanislav Fomichev 			preempt_enable();
309a439184dSStanislav Fomichev 			rcu_read_unlock();
310a439184dSStanislav Fomichev 
311a439184dSStanislav Fomichev 			ret = -EINTR;
312a439184dSStanislav Fomichev 			goto out;
313a439184dSStanislav Fomichev 		}
314b7a1848eSStanislav Fomichev 
315b7a1848eSStanislav Fomichev 		if (need_resched()) {
316b7a1848eSStanislav Fomichev 			time_spent += ktime_get_ns() - time_start;
317a439184dSStanislav Fomichev 			preempt_enable();
318a439184dSStanislav Fomichev 			rcu_read_unlock();
319a439184dSStanislav Fomichev 
320b7a1848eSStanislav Fomichev 			cond_resched();
321a439184dSStanislav Fomichev 
322a439184dSStanislav Fomichev 			rcu_read_lock();
323a439184dSStanislav Fomichev 			preempt_disable();
324b7a1848eSStanislav Fomichev 			time_start = ktime_get_ns();
325b7a1848eSStanislav Fomichev 		}
326b7a1848eSStanislav Fomichev 	}
327b7a1848eSStanislav Fomichev 	time_spent += ktime_get_ns() - time_start;
328a439184dSStanislav Fomichev 	preempt_enable();
329a439184dSStanislav Fomichev 	rcu_read_unlock();
330a439184dSStanislav Fomichev 
331b7a1848eSStanislav Fomichev 	do_div(time_spent, repeat);
332b7a1848eSStanislav Fomichev 	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
333b7a1848eSStanislav Fomichev 
334b7a1848eSStanislav Fomichev 	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
335b7a1848eSStanislav Fomichev 			      retval, duration);
336b7a1848eSStanislav Fomichev 
337a439184dSStanislav Fomichev out:
338b7a1848eSStanislav Fomichev 	kfree_skb(skb);
339b7a1848eSStanislav Fomichev 	kfree(sk);
340b7a1848eSStanislav Fomichev 	return ret;
341b7a1848eSStanislav Fomichev }
342