xref: /openbmc/linux/net/packet/diag.c (revision 160b8e75)
1 #include <linux/module.h>
2 #include <linux/sock_diag.h>
3 #include <linux/net.h>
4 #include <linux/netdevice.h>
5 #include <linux/packet_diag.h>
6 #include <linux/percpu.h>
7 #include <net/net_namespace.h>
8 #include <net/sock.h>
9 
10 #include "internal.h"
11 
12 static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
13 {
14 	struct packet_diag_info pinfo;
15 
16 	pinfo.pdi_index = po->ifindex;
17 	pinfo.pdi_version = po->tp_version;
18 	pinfo.pdi_reserve = po->tp_reserve;
19 	pinfo.pdi_copy_thresh = po->copy_thresh;
20 	pinfo.pdi_tstamp = po->tp_tstamp;
21 
22 	pinfo.pdi_flags = 0;
23 	if (po->running)
24 		pinfo.pdi_flags |= PDI_RUNNING;
25 	if (po->auxdata)
26 		pinfo.pdi_flags |= PDI_AUXDATA;
27 	if (po->origdev)
28 		pinfo.pdi_flags |= PDI_ORIGDEV;
29 	if (po->has_vnet_hdr)
30 		pinfo.pdi_flags |= PDI_VNETHDR;
31 	if (po->tp_loss)
32 		pinfo.pdi_flags |= PDI_LOSS;
33 
34 	return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
35 }
36 
37 static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
38 {
39 	struct nlattr *mca;
40 	struct packet_mclist *ml;
41 
42 	mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
43 	if (!mca)
44 		return -EMSGSIZE;
45 
46 	rtnl_lock();
47 	for (ml = po->mclist; ml; ml = ml->next) {
48 		struct packet_diag_mclist *dml;
49 
50 		dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
51 		if (!dml) {
52 			rtnl_unlock();
53 			nla_nest_cancel(nlskb, mca);
54 			return -EMSGSIZE;
55 		}
56 
57 		dml->pdmc_index = ml->ifindex;
58 		dml->pdmc_type = ml->type;
59 		dml->pdmc_alen = ml->alen;
60 		dml->pdmc_count = ml->count;
61 		BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
62 		memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
63 	}
64 
65 	rtnl_unlock();
66 	nla_nest_end(nlskb, mca);
67 
68 	return 0;
69 }
70 
71 static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
72 		struct sk_buff *nlskb)
73 {
74 	struct packet_diag_ring pdr;
75 
76 	if (!ring->pg_vec)
77 		return 0;
78 
79 	pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
80 	pdr.pdr_block_nr = ring->pg_vec_len;
81 	pdr.pdr_frame_size = ring->frame_size;
82 	pdr.pdr_frame_nr = ring->frame_max + 1;
83 
84 	if (ver > TPACKET_V2) {
85 		pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
86 		pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
87 		pdr.pdr_features = ring->prb_bdqc.feature_req_word;
88 	} else {
89 		pdr.pdr_retire_tmo = 0;
90 		pdr.pdr_sizeof_priv = 0;
91 		pdr.pdr_features = 0;
92 	}
93 
94 	return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
95 }
96 
97 static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
98 {
99 	int ret;
100 
101 	mutex_lock(&po->pg_vec_lock);
102 	ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
103 			PACKET_DIAG_RX_RING, skb);
104 	if (!ret)
105 		ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
106 				PACKET_DIAG_TX_RING, skb);
107 	mutex_unlock(&po->pg_vec_lock);
108 
109 	return ret;
110 }
111 
112 static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
113 {
114 	int ret = 0;
115 
116 	mutex_lock(&fanout_mutex);
117 	if (po->fanout) {
118 		u32 val;
119 
120 		val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
121 		ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
122 	}
123 	mutex_unlock(&fanout_mutex);
124 
125 	return ret;
126 }
127 
128 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
129 			struct packet_diag_req *req,
130 			bool may_report_filterinfo,
131 			struct user_namespace *user_ns,
132 			u32 portid, u32 seq, u32 flags, int sk_ino)
133 {
134 	struct nlmsghdr *nlh;
135 	struct packet_diag_msg *rp;
136 	struct packet_sock *po = pkt_sk(sk);
137 
138 	nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
139 	if (!nlh)
140 		return -EMSGSIZE;
141 
142 	rp = nlmsg_data(nlh);
143 	rp->pdiag_family = AF_PACKET;
144 	rp->pdiag_type = sk->sk_type;
145 	rp->pdiag_num = ntohs(po->num);
146 	rp->pdiag_ino = sk_ino;
147 	sock_diag_save_cookie(sk, rp->pdiag_cookie);
148 
149 	if ((req->pdiag_show & PACKET_SHOW_INFO) &&
150 			pdiag_put_info(po, skb))
151 		goto out_nlmsg_trim;
152 
153 	if ((req->pdiag_show & PACKET_SHOW_INFO) &&
154 	    nla_put_u32(skb, PACKET_DIAG_UID,
155 			from_kuid_munged(user_ns, sock_i_uid(sk))))
156 		goto out_nlmsg_trim;
157 
158 	if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
159 			pdiag_put_mclist(po, skb))
160 		goto out_nlmsg_trim;
161 
162 	if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
163 			pdiag_put_rings_cfg(po, skb))
164 		goto out_nlmsg_trim;
165 
166 	if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
167 			pdiag_put_fanout(po, skb))
168 		goto out_nlmsg_trim;
169 
170 	if ((req->pdiag_show & PACKET_SHOW_MEMINFO) &&
171 	    sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO))
172 		goto out_nlmsg_trim;
173 
174 	if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
175 	    sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
176 				     PACKET_DIAG_FILTER))
177 		goto out_nlmsg_trim;
178 
179 	nlmsg_end(skb, nlh);
180 	return 0;
181 
182 out_nlmsg_trim:
183 	nlmsg_cancel(skb, nlh);
184 	return -EMSGSIZE;
185 }
186 
187 static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
188 {
189 	int num = 0, s_num = cb->args[0];
190 	struct packet_diag_req *req;
191 	struct net *net;
192 	struct sock *sk;
193 	bool may_report_filterinfo;
194 
195 	net = sock_net(skb->sk);
196 	req = nlmsg_data(cb->nlh);
197 	may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
198 
199 	mutex_lock(&net->packet.sklist_lock);
200 	sk_for_each(sk, &net->packet.sklist) {
201 		if (!net_eq(sock_net(sk), net))
202 			continue;
203 		if (num < s_num)
204 			goto next;
205 
206 		if (sk_diag_fill(sk, skb, req,
207 				 may_report_filterinfo,
208 				 sk_user_ns(NETLINK_CB(cb->skb).sk),
209 				 NETLINK_CB(cb->skb).portid,
210 				 cb->nlh->nlmsg_seq, NLM_F_MULTI,
211 				 sock_i_ino(sk)) < 0)
212 			goto done;
213 next:
214 		num++;
215 	}
216 done:
217 	mutex_unlock(&net->packet.sklist_lock);
218 	cb->args[0] = num;
219 
220 	return skb->len;
221 }
222 
223 static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
224 {
225 	int hdrlen = sizeof(struct packet_diag_req);
226 	struct net *net = sock_net(skb->sk);
227 	struct packet_diag_req *req;
228 
229 	if (nlmsg_len(h) < hdrlen)
230 		return -EINVAL;
231 
232 	req = nlmsg_data(h);
233 	/* Make it possible to support protocol filtering later */
234 	if (req->sdiag_protocol)
235 		return -EINVAL;
236 
237 	if (h->nlmsg_flags & NLM_F_DUMP) {
238 		struct netlink_dump_control c = {
239 			.dump = packet_diag_dump,
240 		};
241 		return netlink_dump_start(net->diag_nlsk, skb, h, &c);
242 	} else
243 		return -EOPNOTSUPP;
244 }
245 
246 static const struct sock_diag_handler packet_diag_handler = {
247 	.family = AF_PACKET,
248 	.dump = packet_diag_handler_dump,
249 };
250 
251 static int __init packet_diag_init(void)
252 {
253 	return sock_diag_register(&packet_diag_handler);
254 }
255 
256 static void __exit packet_diag_exit(void)
257 {
258 	sock_diag_unregister(&packet_diag_handler);
259 }
260 
261 module_init(packet_diag_init);
262 module_exit(packet_diag_exit);
263 MODULE_LICENSE("GPL");
264 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);
265