xref: /openbmc/linux/net/xdp/xsk_diag.c (revision 0760aad038b5a032c31ea124feed63d88627d2f1)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets monitoring support
3  *
4  * Copyright(c) 2019 Intel Corporation.
5  *
6  * Author: Björn Töpel <bjorn.topel@intel.com>
7  */
8 
9 #include <linux/module.h>
10 #include <net/xdp_sock.h>
11 #include <linux/xdp_diag.h>
12 #include <linux/sock_diag.h>
13 
14 #include "xsk_queue.h"
15 #include "xsk.h"
16 
17 static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb)
18 {
19 	struct xdp_diag_info di = {};
20 
21 	di.ifindex = xs->dev ? xs->dev->ifindex : 0;
22 	di.queue_id = xs->queue_id;
23 	return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di);
24 }
25 
26 static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
27 			     struct sk_buff *nlskb)
28 {
29 	struct xdp_diag_ring dr = {};
30 
31 	dr.entries = queue->nentries;
32 	return nla_put(nlskb, nl_type, sizeof(dr), &dr);
33 }
34 
35 static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
36 				  struct sk_buff *nlskb)
37 {
38 	int err = 0;
39 
40 	if (xs->rx)
41 		err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb);
42 	if (!err && xs->tx)
43 		err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb);
44 	return err;
45 }
46 
47 static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
48 {
49 	struct xsk_buff_pool *pool = xs->pool;
50 	struct xdp_umem *umem = xs->umem;
51 	struct xdp_diag_umem du = {};
52 	int err;
53 
54 	if (!umem)
55 		return 0;
56 
57 	du.id = umem->id;
58 	du.size = umem->size;
59 	du.num_pages = umem->npgs;
60 	du.chunk_size = umem->chunk_size;
61 	du.headroom = umem->headroom;
62 	du.ifindex = pool->netdev ? pool->netdev->ifindex : 0;
63 	du.queue_id = pool->queue_id;
64 	du.flags = 0;
65 	if (umem->zc)
66 		du.flags |= XDP_DU_F_ZEROCOPY;
67 	du.refs = refcount_read(&umem->users);
68 
69 	err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
70 
71 	if (!err && pool->fq)
72 		err = xsk_diag_put_ring(pool->fq,
73 					XDP_DIAG_UMEM_FILL_RING, nlskb);
74 	if (!err && pool->cq) {
75 		err = xsk_diag_put_ring(pool->cq, XDP_DIAG_UMEM_COMPLETION_RING,
76 					nlskb);
77 	}
78 	return err;
79 }
80 
81 static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
82 {
83 	struct xdp_diag_stats du = {};
84 
85 	du.n_rx_dropped = xs->rx_dropped;
86 	du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
87 	du.n_rx_full = xs->rx_queue_full;
88 	du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
89 	du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
90 	du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
91 	return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
92 }
93 
94 static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
95 			 struct xdp_diag_req *req,
96 			 struct user_namespace *user_ns,
97 			 u32 portid, u32 seq, u32 flags, int sk_ino)
98 {
99 	struct xdp_sock *xs = xdp_sk(sk);
100 	struct xdp_diag_msg *msg;
101 	struct nlmsghdr *nlh;
102 
103 	nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg),
104 			flags);
105 	if (!nlh)
106 		return -EMSGSIZE;
107 
108 	msg = nlmsg_data(nlh);
109 	memset(msg, 0, sizeof(*msg));
110 	msg->xdiag_family = AF_XDP;
111 	msg->xdiag_type = sk->sk_type;
112 	msg->xdiag_ino = sk_ino;
113 	sock_diag_save_cookie(sk, msg->xdiag_cookie);
114 
115 	mutex_lock(&xs->mutex);
116 	if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
117 		goto out_nlmsg_trim;
118 
119 	if ((req->xdiag_show & XDP_SHOW_INFO) &&
120 	    nla_put_u32(nlskb, XDP_DIAG_UID,
121 			from_kuid_munged(user_ns, sock_i_uid(sk))))
122 		goto out_nlmsg_trim;
123 
124 	if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
125 	    xsk_diag_put_rings_cfg(xs, nlskb))
126 		goto out_nlmsg_trim;
127 
128 	if ((req->xdiag_show & XDP_SHOW_UMEM) &&
129 	    xsk_diag_put_umem(xs, nlskb))
130 		goto out_nlmsg_trim;
131 
132 	if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
133 	    sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
134 		goto out_nlmsg_trim;
135 
136 	if ((req->xdiag_show & XDP_SHOW_STATS) &&
137 	    xsk_diag_put_stats(xs, nlskb))
138 		goto out_nlmsg_trim;
139 
140 	mutex_unlock(&xs->mutex);
141 	nlmsg_end(nlskb, nlh);
142 	return 0;
143 
144 out_nlmsg_trim:
145 	mutex_unlock(&xs->mutex);
146 	nlmsg_cancel(nlskb, nlh);
147 	return -EMSGSIZE;
148 }
149 
150 static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
151 {
152 	struct xdp_diag_req *req = nlmsg_data(cb->nlh);
153 	struct net *net = sock_net(nlskb->sk);
154 	int num = 0, s_num = cb->args[0];
155 	struct sock *sk;
156 
157 	mutex_lock(&net->xdp.lock);
158 
159 	sk_for_each(sk, &net->xdp.list) {
160 		if (!net_eq(sock_net(sk), net))
161 			continue;
162 		if (num++ < s_num)
163 			continue;
164 
165 		if (xsk_diag_fill(sk, nlskb, req,
166 				  sk_user_ns(NETLINK_CB(cb->skb).sk),
167 				  NETLINK_CB(cb->skb).portid,
168 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
169 				  sock_i_ino(sk)) < 0) {
170 			num--;
171 			break;
172 		}
173 	}
174 
175 	mutex_unlock(&net->xdp.lock);
176 	cb->args[0] = num;
177 	return nlskb->len;
178 }
179 
180 static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
181 {
182 	struct netlink_dump_control c = { .dump = xsk_diag_dump };
183 	int hdrlen = sizeof(struct xdp_diag_req);
184 	struct net *net = sock_net(nlskb->sk);
185 
186 	if (nlmsg_len(hdr) < hdrlen)
187 		return -EINVAL;
188 
189 	if (!(hdr->nlmsg_flags & NLM_F_DUMP))
190 		return -EOPNOTSUPP;
191 
192 	return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c);
193 }
194 
195 static const struct sock_diag_handler xsk_diag_handler = {
196 	.family = AF_XDP,
197 	.dump = xsk_diag_handler_dump,
198 };
199 
200 static int __init xsk_diag_init(void)
201 {
202 	return sock_diag_register(&xsk_diag_handler);
203 }
204 
205 static void __exit xsk_diag_exit(void)
206 {
207 	sock_diag_unregister(&xsk_diag_handler);
208 }
209 
210 module_init(xsk_diag_init);
211 module_exit(xsk_diag_exit);
212 MODULE_LICENSE("GPL");
213 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);
214