1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/module.h> 3 4 #include <linux/inet_diag.h> 5 #include <linux/sock_diag.h> 6 7 #include <net/inet_sock.h> 8 #include <net/raw.h> 9 #include <net/rawv6.h> 10 11 #ifdef pr_fmt 12 # undef pr_fmt 13 #endif 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 static struct raw_hashinfo * 18 raw_get_hashinfo(const struct inet_diag_req_v2 *r) 19 { 20 if (r->sdiag_family == AF_INET) { 21 return &raw_v4_hashinfo; 22 #if IS_ENABLED(CONFIG_IPV6) 23 } else if (r->sdiag_family == AF_INET6) { 24 return &raw_v6_hashinfo; 25 #endif 26 } else { 27 return ERR_PTR(-EINVAL); 28 } 29 } 30 31 /* 32 * Due to requirement of not breaking user API we can't simply 33 * rename @pad field in inet_diag_req_v2 structure, instead 34 * use helper to figure it out. 35 */ 36 37 static struct sock *raw_lookup(struct net *net, struct sock *from, 38 const struct inet_diag_req_v2 *req) 39 { 40 struct inet_diag_req_raw *r = (void *)req; 41 struct sock *sk = NULL; 42 43 if (r->sdiag_family == AF_INET) 44 sk = __raw_v4_lookup(net, from, r->sdiag_raw_protocol, 45 r->id.idiag_dst[0], 46 r->id.idiag_src[0], 47 r->id.idiag_if, 0); 48 #if IS_ENABLED(CONFIG_IPV6) 49 else 50 sk = __raw_v6_lookup(net, from, r->sdiag_raw_protocol, 51 (const struct in6_addr *)r->id.idiag_src, 52 (const struct in6_addr *)r->id.idiag_dst, 53 r->id.idiag_if, 0); 54 #endif 55 return sk; 56 } 57 58 static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 *r) 59 { 60 struct raw_hashinfo *hashinfo = raw_get_hashinfo(r); 61 struct sock *sk = NULL, *s; 62 int slot; 63 64 if (IS_ERR(hashinfo)) 65 return ERR_CAST(hashinfo); 66 67 read_lock(&hashinfo->lock); 68 for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) { 69 sk_for_each(s, &hashinfo->ht[slot]) { 70 sk = raw_lookup(net, s, r); 71 if (sk) { 72 /* 73 * Grab it and keep until we fill 74 * diag meaage to be reported, so 75 * caller should call sock_put then. 76 * We can do that because we're keeping 77 * hashinfo->lock here. 78 */ 79 sock_hold(sk); 80 goto out_unlock; 81 } 82 } 83 } 84 out_unlock: 85 read_unlock(&hashinfo->lock); 86 87 return sk ? sk : ERR_PTR(-ENOENT); 88 } 89 90 static int raw_diag_dump_one(struct sk_buff *in_skb, 91 const struct nlmsghdr *nlh, 92 const struct inet_diag_req_v2 *r) 93 { 94 struct net *net = sock_net(in_skb->sk); 95 struct sk_buff *rep; 96 struct sock *sk; 97 int err; 98 99 sk = raw_sock_get(net, r); 100 if (IS_ERR(sk)) 101 return PTR_ERR(sk); 102 103 rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + 104 inet_diag_msg_attrs_size() + 105 nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, 106 GFP_KERNEL); 107 if (!rep) { 108 sock_put(sk); 109 return -ENOMEM; 110 } 111 112 err = inet_sk_diag_fill(sk, NULL, rep, r, 113 sk_user_ns(NETLINK_CB(in_skb).sk), 114 NETLINK_CB(in_skb).portid, 115 nlh->nlmsg_seq, 0, nlh, 116 netlink_net_capable(in_skb, CAP_NET_ADMIN)); 117 sock_put(sk); 118 119 if (err < 0) { 120 kfree_skb(rep); 121 return err; 122 } 123 124 err = netlink_unicast(net->diag_nlsk, rep, 125 NETLINK_CB(in_skb).portid, 126 MSG_DONTWAIT); 127 if (err > 0) 128 err = 0; 129 return err; 130 } 131 132 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, 133 struct netlink_callback *cb, 134 const struct inet_diag_req_v2 *r, 135 struct nlattr *bc, bool net_admin) 136 { 137 if (!inet_diag_bc_sk(bc, sk)) 138 return 0; 139 140 return inet_sk_diag_fill(sk, NULL, skb, r, 141 sk_user_ns(NETLINK_CB(cb->skb).sk), 142 NETLINK_CB(cb->skb).portid, 143 cb->nlh->nlmsg_seq, NLM_F_MULTI, 144 cb->nlh, net_admin); 145 } 146 147 static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, 148 const struct inet_diag_req_v2 *r, struct nlattr *bc) 149 { 150 bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); 151 struct raw_hashinfo *hashinfo = raw_get_hashinfo(r); 152 struct net *net = sock_net(skb->sk); 153 int num, s_num, slot, s_slot; 154 struct sock *sk = NULL; 155 156 if (IS_ERR(hashinfo)) 157 return; 158 159 s_slot = cb->args[0]; 160 num = s_num = cb->args[1]; 161 162 read_lock(&hashinfo->lock); 163 for (slot = s_slot; slot < RAW_HTABLE_SIZE; s_num = 0, slot++) { 164 num = 0; 165 166 sk_for_each(sk, &hashinfo->ht[slot]) { 167 struct inet_sock *inet = inet_sk(sk); 168 169 if (!net_eq(sock_net(sk), net)) 170 continue; 171 if (num < s_num) 172 goto next; 173 if (sk->sk_family != r->sdiag_family) 174 goto next; 175 if (r->id.idiag_sport != inet->inet_sport && 176 r->id.idiag_sport) 177 goto next; 178 if (r->id.idiag_dport != inet->inet_dport && 179 r->id.idiag_dport) 180 goto next; 181 if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) 182 goto out_unlock; 183 next: 184 num++; 185 } 186 } 187 188 out_unlock: 189 read_unlock(&hashinfo->lock); 190 191 cb->args[0] = slot; 192 cb->args[1] = num; 193 } 194 195 static void raw_diag_get_info(struct sock *sk, struct inet_diag_msg *r, 196 void *info) 197 { 198 r->idiag_rqueue = sk_rmem_alloc_get(sk); 199 r->idiag_wqueue = sk_wmem_alloc_get(sk); 200 } 201 202 #ifdef CONFIG_INET_DIAG_DESTROY 203 static int raw_diag_destroy(struct sk_buff *in_skb, 204 const struct inet_diag_req_v2 *r) 205 { 206 struct net *net = sock_net(in_skb->sk); 207 struct sock *sk; 208 int err; 209 210 sk = raw_sock_get(net, r); 211 if (IS_ERR(sk)) 212 return PTR_ERR(sk); 213 err = sock_diag_destroy(sk, ECONNABORTED); 214 sock_put(sk); 215 return err; 216 } 217 #endif 218 219 static const struct inet_diag_handler raw_diag_handler = { 220 .dump = raw_diag_dump, 221 .dump_one = raw_diag_dump_one, 222 .idiag_get_info = raw_diag_get_info, 223 .idiag_type = IPPROTO_RAW, 224 .idiag_info_size = 0, 225 #ifdef CONFIG_INET_DIAG_DESTROY 226 .destroy = raw_diag_destroy, 227 #endif 228 }; 229 230 static void __always_unused __check_inet_diag_req_raw(void) 231 { 232 /* 233 * Make sure the two structures are identical, 234 * except the @pad field. 235 */ 236 #define __offset_mismatch(m1, m2) \ 237 (offsetof(struct inet_diag_req_v2, m1) != \ 238 offsetof(struct inet_diag_req_raw, m2)) 239 240 BUILD_BUG_ON(sizeof(struct inet_diag_req_v2) != 241 sizeof(struct inet_diag_req_raw)); 242 BUILD_BUG_ON(__offset_mismatch(sdiag_family, sdiag_family)); 243 BUILD_BUG_ON(__offset_mismatch(sdiag_protocol, sdiag_protocol)); 244 BUILD_BUG_ON(__offset_mismatch(idiag_ext, idiag_ext)); 245 BUILD_BUG_ON(__offset_mismatch(pad, sdiag_raw_protocol)); 246 BUILD_BUG_ON(__offset_mismatch(idiag_states, idiag_states)); 247 BUILD_BUG_ON(__offset_mismatch(id, id)); 248 #undef __offset_mismatch 249 } 250 251 static int __init raw_diag_init(void) 252 { 253 return inet_diag_register(&raw_diag_handler); 254 } 255 256 static void __exit raw_diag_exit(void) 257 { 258 inet_diag_unregister(&raw_diag_handler); 259 } 260 261 module_init(raw_diag_init); 262 module_exit(raw_diag_exit); 263 MODULE_LICENSE("GPL"); 264 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-255 /* AF_INET - IPPROTO_RAW */); 265 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10-255 /* AF_INET6 - IPPROTO_RAW */); 266