1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/inetdevice.h>
5 #include <linux/etherdevice.h>
6 #include <linux/mm.h>
7 #include <linux/bpf.h>
8 #include <linux/bpf_trace.h>
9 #include <net/xdp.h>
10 
11 #include "mana.h"
12 
13 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
14 {
15 	u16 txq_idx = skb_get_queue_mapping(skb);
16 	struct netdev_queue *ndevtxq;
17 	int rc;
18 
19 	__skb_push(skb, ETH_HLEN);
20 
21 	ndevtxq = netdev_get_tx_queue(ndev, txq_idx);
22 	__netif_tx_lock(ndevtxq, smp_processor_id());
23 
24 	rc = mana_start_xmit(skb, ndev);
25 
26 	__netif_tx_unlock(ndevtxq);
27 
28 	if (dev_xmit_complete(rc))
29 		return;
30 
31 	dev_kfree_skb_any(skb);
32 	ndev->stats.tx_dropped++;
33 }
34 
35 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
36 		 struct xdp_buff *xdp, void *buf_va, uint pkt_len)
37 {
38 	struct bpf_prog *prog;
39 	u32 act = XDP_PASS;
40 
41 	rcu_read_lock();
42 	prog = rcu_dereference(rxq->bpf_prog);
43 
44 	if (!prog)
45 		goto out;
46 
47 	xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
48 	xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
49 
50 	act = bpf_prog_run_xdp(prog, xdp);
51 
52 	switch (act) {
53 	case XDP_PASS:
54 	case XDP_TX:
55 	case XDP_DROP:
56 		break;
57 
58 	case XDP_ABORTED:
59 		trace_xdp_exception(ndev, prog, act);
60 		break;
61 
62 	default:
63 		bpf_warn_invalid_xdp_action(ndev, prog, act);
64 	}
65 
66 out:
67 	rcu_read_unlock();
68 
69 	return act;
70 }
71 
72 static unsigned int mana_xdp_fraglen(unsigned int len)
73 {
74 	return SKB_DATA_ALIGN(len) +
75 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
76 }
77 
78 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
79 {
80 	ASSERT_RTNL();
81 
82 	return apc->bpf_prog;
83 }
84 
85 static struct bpf_prog *mana_chn_xdp_get(struct mana_port_context *apc)
86 {
87 	return rtnl_dereference(apc->rxqs[0]->bpf_prog);
88 }
89 
90 /* Set xdp program on channels */
91 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog)
92 {
93 	struct bpf_prog *old_prog = mana_chn_xdp_get(apc);
94 	unsigned int num_queues = apc->num_queues;
95 	int i;
96 
97 	ASSERT_RTNL();
98 
99 	if (old_prog == prog)
100 		return;
101 
102 	if (prog)
103 		bpf_prog_add(prog, num_queues);
104 
105 	for (i = 0; i < num_queues; i++)
106 		rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog);
107 
108 	if (old_prog)
109 		for (i = 0; i < num_queues; i++)
110 			bpf_prog_put(old_prog);
111 }
112 
113 static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
114 			struct netlink_ext_ack *extack)
115 {
116 	struct mana_port_context *apc = netdev_priv(ndev);
117 	struct bpf_prog *old_prog;
118 	int buf_max;
119 
120 	old_prog = mana_xdp_get(apc);
121 
122 	if (!old_prog && !prog)
123 		return 0;
124 
125 	buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
126 	if (prog && buf_max > PAGE_SIZE) {
127 		netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
128 			   ndev->mtu, buf_max);
129 		NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
130 
131 		return -EOPNOTSUPP;
132 	}
133 
134 	/* One refcnt of the prog is hold by the caller already, so
135 	 * don't increase refcnt for this one.
136 	 */
137 	apc->bpf_prog = prog;
138 
139 	if (old_prog)
140 		bpf_prog_put(old_prog);
141 
142 	if (apc->port_is_up)
143 		mana_chn_setxdp(apc, prog);
144 
145 	return 0;
146 }
147 
148 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
149 {
150 	struct netlink_ext_ack *extack = bpf->extack;
151 	int ret;
152 
153 	switch (bpf->command) {
154 	case XDP_SETUP_PROG:
155 		return mana_xdp_set(ndev, bpf->prog, extack);
156 
157 	default:
158 		return -EOPNOTSUPP;
159 	}
160 
161 	return ret;
162 }
163