1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/module.h> 3 #include <linux/sock_diag.h> 4 #include <linux/net.h> 5 #include <linux/netdevice.h> 6 #include <linux/packet_diag.h> 7 #include <linux/percpu.h> 8 #include <net/net_namespace.h> 9 #include <net/sock.h> 10 11 #include "internal.h" 12 13 static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb) 14 { 15 struct packet_diag_info pinfo; 16 17 pinfo.pdi_index = po->ifindex; 18 pinfo.pdi_version = po->tp_version; 19 pinfo.pdi_reserve = po->tp_reserve; 20 pinfo.pdi_copy_thresh = po->copy_thresh; 21 pinfo.pdi_tstamp = READ_ONCE(po->tp_tstamp); 22 23 pinfo.pdi_flags = 0; 24 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) 25 pinfo.pdi_flags |= PDI_RUNNING; 26 if (packet_sock_flag(po, PACKET_SOCK_AUXDATA)) 27 pinfo.pdi_flags |= PDI_AUXDATA; 28 if (packet_sock_flag(po, PACKET_SOCK_ORIGDEV)) 29 pinfo.pdi_flags |= PDI_ORIGDEV; 30 if (READ_ONCE(po->vnet_hdr_sz)) 31 pinfo.pdi_flags |= PDI_VNETHDR; 32 if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) 33 pinfo.pdi_flags |= PDI_LOSS; 34 35 return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo); 36 } 37 38 static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb) 39 { 40 struct nlattr *mca; 41 struct packet_mclist *ml; 42 43 mca = nla_nest_start_noflag(nlskb, PACKET_DIAG_MCLIST); 44 if (!mca) 45 return -EMSGSIZE; 46 47 rtnl_lock(); 48 for (ml = po->mclist; ml; ml = ml->next) { 49 struct packet_diag_mclist *dml; 50 51 dml = nla_reserve_nohdr(nlskb, sizeof(*dml)); 52 if (!dml) { 53 rtnl_unlock(); 54 nla_nest_cancel(nlskb, mca); 55 return -EMSGSIZE; 56 } 57 58 dml->pdmc_index = ml->ifindex; 59 dml->pdmc_type = ml->type; 60 dml->pdmc_alen = ml->alen; 61 dml->pdmc_count = ml->count; 62 BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr)); 63 memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr)); 64 } 65 66 rtnl_unlock(); 67 nla_nest_end(nlskb, mca); 68 69 return 0; 70 } 71 72 static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type, 73 struct sk_buff *nlskb) 74 { 75 struct packet_diag_ring pdr; 76 77 if (!ring->pg_vec) 78 return 0; 79 80 pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT; 81 pdr.pdr_block_nr = ring->pg_vec_len; 82 pdr.pdr_frame_size = ring->frame_size; 83 pdr.pdr_frame_nr = ring->frame_max + 1; 84 85 if (ver > TPACKET_V2) { 86 pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov; 87 pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv; 88 pdr.pdr_features = ring->prb_bdqc.feature_req_word; 89 } else { 90 pdr.pdr_retire_tmo = 0; 91 pdr.pdr_sizeof_priv = 0; 92 pdr.pdr_features = 0; 93 } 94 95 return nla_put(nlskb, nl_type, sizeof(pdr), &pdr); 96 } 97 98 static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb) 99 { 100 int ret; 101 102 mutex_lock(&po->pg_vec_lock); 103 ret = pdiag_put_ring(&po->rx_ring, po->tp_version, 104 PACKET_DIAG_RX_RING, skb); 105 if (!ret) 106 ret = pdiag_put_ring(&po->tx_ring, po->tp_version, 107 PACKET_DIAG_TX_RING, skb); 108 mutex_unlock(&po->pg_vec_lock); 109 110 return ret; 111 } 112 113 static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb) 114 { 115 int ret = 0; 116 117 mutex_lock(&fanout_mutex); 118 if (po->fanout) { 119 u32 val; 120 121 val = (u32)po->fanout->id | ((u32)po->fanout->type << 16); 122 ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val); 123 } 124 mutex_unlock(&fanout_mutex); 125 126 return ret; 127 } 128 129 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 130 struct packet_diag_req *req, 131 bool may_report_filterinfo, 132 struct user_namespace *user_ns, 133 u32 portid, u32 seq, u32 flags, int sk_ino) 134 { 135 struct nlmsghdr *nlh; 136 struct packet_diag_msg *rp; 137 struct packet_sock *po = pkt_sk(sk); 138 139 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags); 140 if (!nlh) 141 return -EMSGSIZE; 142 143 rp = nlmsg_data(nlh); 144 rp->pdiag_family = AF_PACKET; 145 rp->pdiag_type = sk->sk_type; 146 rp->pdiag_num = ntohs(READ_ONCE(po->num)); 147 rp->pdiag_ino = sk_ino; 148 sock_diag_save_cookie(sk, rp->pdiag_cookie); 149 150 if ((req->pdiag_show & PACKET_SHOW_INFO) && 151 pdiag_put_info(po, skb)) 152 goto out_nlmsg_trim; 153 154 if ((req->pdiag_show & PACKET_SHOW_INFO) && 155 nla_put_u32(skb, PACKET_DIAG_UID, 156 from_kuid_munged(user_ns, sock_i_uid(sk)))) 157 goto out_nlmsg_trim; 158 159 if ((req->pdiag_show & PACKET_SHOW_MCLIST) && 160 pdiag_put_mclist(po, skb)) 161 goto out_nlmsg_trim; 162 163 if ((req->pdiag_show & PACKET_SHOW_RING_CFG) && 164 pdiag_put_rings_cfg(po, skb)) 165 goto out_nlmsg_trim; 166 167 if ((req->pdiag_show & PACKET_SHOW_FANOUT) && 168 pdiag_put_fanout(po, skb)) 169 goto out_nlmsg_trim; 170 171 if ((req->pdiag_show & PACKET_SHOW_MEMINFO) && 172 sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO)) 173 goto out_nlmsg_trim; 174 175 if ((req->pdiag_show & PACKET_SHOW_FILTER) && 176 sock_diag_put_filterinfo(may_report_filterinfo, sk, skb, 177 PACKET_DIAG_FILTER)) 178 goto out_nlmsg_trim; 179 180 nlmsg_end(skb, nlh); 181 return 0; 182 183 out_nlmsg_trim: 184 nlmsg_cancel(skb, nlh); 185 return -EMSGSIZE; 186 } 187 188 static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 189 { 190 int num = 0, s_num = cb->args[0]; 191 struct packet_diag_req *req; 192 struct net *net; 193 struct sock *sk; 194 bool may_report_filterinfo; 195 196 net = sock_net(skb->sk); 197 req = nlmsg_data(cb->nlh); 198 may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN); 199 200 mutex_lock(&net->packet.sklist_lock); 201 sk_for_each(sk, &net->packet.sklist) { 202 if (!net_eq(sock_net(sk), net)) 203 continue; 204 if (num < s_num) 205 goto next; 206 207 if (sk_diag_fill(sk, skb, req, 208 may_report_filterinfo, 209 sk_user_ns(NETLINK_CB(cb->skb).sk), 210 NETLINK_CB(cb->skb).portid, 211 cb->nlh->nlmsg_seq, NLM_F_MULTI, 212 sock_i_ino(sk)) < 0) 213 goto done; 214 next: 215 num++; 216 } 217 done: 218 mutex_unlock(&net->packet.sklist_lock); 219 cb->args[0] = num; 220 221 return skb->len; 222 } 223 224 static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) 225 { 226 int hdrlen = sizeof(struct packet_diag_req); 227 struct net *net = sock_net(skb->sk); 228 struct packet_diag_req *req; 229 230 if (nlmsg_len(h) < hdrlen) 231 return -EINVAL; 232 233 req = nlmsg_data(h); 234 /* Make it possible to support protocol filtering later */ 235 if (req->sdiag_protocol) 236 return -EINVAL; 237 238 if (h->nlmsg_flags & NLM_F_DUMP) { 239 struct netlink_dump_control c = { 240 .dump = packet_diag_dump, 241 }; 242 return netlink_dump_start(net->diag_nlsk, skb, h, &c); 243 } else 244 return -EOPNOTSUPP; 245 } 246 247 static const struct sock_diag_handler packet_diag_handler = { 248 .owner = THIS_MODULE, 249 .family = AF_PACKET, 250 .dump = packet_diag_handler_dump, 251 }; 252 253 static int __init packet_diag_init(void) 254 { 255 return sock_diag_register(&packet_diag_handler); 256 } 257 258 static void __exit packet_diag_exit(void) 259 { 260 sock_diag_unregister(&packet_diag_handler); 261 } 262 263 module_init(packet_diag_init); 264 module_exit(packet_diag_exit); 265 MODULE_LICENSE("GPL"); 266 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */); 267