1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Monitoring SMC transport protocol sockets 6 * 7 * Copyright IBM Corp. 2016 8 * 9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/init.h> 16 #include <linux/sock_diag.h> 17 #include <linux/inet_diag.h> 18 #include <linux/smc_diag.h> 19 #include <net/netlink.h> 20 #include <net/smc.h> 21 22 #include "smc.h" 23 #include "smc_core.h" 24 25 static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw) 26 { 27 sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", 28 be16_to_cpu(((__be16 *)gid_raw)[0]), 29 be16_to_cpu(((__be16 *)gid_raw)[1]), 30 be16_to_cpu(((__be16 *)gid_raw)[2]), 31 be16_to_cpu(((__be16 *)gid_raw)[3]), 32 be16_to_cpu(((__be16 *)gid_raw)[4]), 33 be16_to_cpu(((__be16 *)gid_raw)[5]), 34 be16_to_cpu(((__be16 *)gid_raw)[6]), 35 be16_to_cpu(((__be16 *)gid_raw)[7])); 36 } 37 38 static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk) 39 { 40 struct smc_sock *smc = smc_sk(sk); 41 42 memset(r, 0, sizeof(*r)); 43 r->diag_family = sk->sk_family; 44 sock_diag_save_cookie(sk, r->id.idiag_cookie); 45 if (!smc->clcsock) 46 return; 47 r->id.idiag_sport = htons(smc->clcsock->sk->sk_num); 48 r->id.idiag_dport = smc->clcsock->sk->sk_dport; 49 r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if; 50 if (sk->sk_protocol == SMCPROTO_SMC) { 51 r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr; 52 r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr; 53 #if IS_ENABLED(CONFIG_IPV6) 54 } else if (sk->sk_protocol == SMCPROTO_SMC6) { 55 memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr, 56 sizeof(smc->clcsock->sk->sk_v6_rcv_saddr)); 57 memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr, 58 sizeof(smc->clcsock->sk->sk_v6_daddr)); 59 #endif 60 } 61 } 62 63 static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, 64 struct smc_diag_msg *r, 65 struct user_namespace *user_ns) 66 { 67 if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown)) 68 return 1; 69 70 r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 71 r->diag_inode = sock_i_ino(sk); 72 return 0; 73 } 74 75 static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, 76 struct netlink_callback *cb, 77 const struct smc_diag_req *req, 78 struct nlattr *bc) 79 { 80 struct smc_sock *smc = smc_sk(sk); 81 struct smc_diag_fallback fallback; 82 struct user_namespace *user_ns; 83 struct smc_diag_msg *r; 84 struct nlmsghdr *nlh; 85 86 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 87 cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI); 88 if (!nlh) 89 return -EMSGSIZE; 90 91 r = nlmsg_data(nlh); 92 smc_diag_msg_common_fill(r, sk); 93 r->diag_state = sk->sk_state; 94 if (smc->use_fallback) 95 r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP; 96 else if (smc->conn.lgr && smc->conn.lgr->is_smcd) 97 r->diag_mode = SMC_DIAG_MODE_SMCD; 98 else 99 r->diag_mode = SMC_DIAG_MODE_SMCR; 100 user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk); 101 if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns)) 102 goto errout; 103 104 fallback.reason = smc->fallback_rsn; 105 fallback.peer_diagnosis = smc->peer_diagnosis; 106 if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0) 107 goto errout; 108 109 if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) && 110 smc->conn.alert_token_local) { 111 struct smc_connection *conn = &smc->conn; 112 struct smc_diag_conninfo cinfo = { 113 .token = conn->alert_token_local, 114 .sndbuf_size = conn->sndbuf_desc ? 115 conn->sndbuf_desc->len : 0, 116 .rmbe_size = conn->rmb_desc ? conn->rmb_desc->len : 0, 117 .peer_rmbe_size = conn->peer_rmbe_size, 118 119 .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap, 120 .rx_prod.count = conn->local_rx_ctrl.prod.count, 121 .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap, 122 .rx_cons.count = conn->local_rx_ctrl.cons.count, 123 124 .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap, 125 .tx_prod.count = conn->local_tx_ctrl.prod.count, 126 .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap, 127 .tx_cons.count = conn->local_tx_ctrl.cons.count, 128 129 .tx_prod_flags = 130 *(u8 *)&conn->local_tx_ctrl.prod_flags, 131 .tx_conn_state_flags = 132 *(u8 *)&conn->local_tx_ctrl.conn_state_flags, 133 .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags, 134 .rx_conn_state_flags = 135 *(u8 *)&conn->local_rx_ctrl.conn_state_flags, 136 137 .tx_prep.wrap = conn->tx_curs_prep.wrap, 138 .tx_prep.count = conn->tx_curs_prep.count, 139 .tx_sent.wrap = conn->tx_curs_sent.wrap, 140 .tx_sent.count = conn->tx_curs_sent.count, 141 .tx_fin.wrap = conn->tx_curs_fin.wrap, 142 .tx_fin.count = conn->tx_curs_fin.count, 143 }; 144 145 if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0) 146 goto errout; 147 } 148 149 if (smc->conn.lgr && !smc->conn.lgr->is_smcd && 150 (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && 151 !list_empty(&smc->conn.lgr->list)) { 152 struct smc_diag_lgrinfo linfo = { 153 .role = smc->conn.lgr->role, 154 .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport, 155 .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id, 156 }; 157 158 memcpy(linfo.lnk[0].ibname, 159 smc->conn.lgr->lnk[0].smcibdev->ibdev->name, 160 sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name)); 161 smc_gid_be16_convert(linfo.lnk[0].gid, 162 smc->conn.lgr->lnk[0].gid); 163 smc_gid_be16_convert(linfo.lnk[0].peer_gid, 164 smc->conn.lgr->lnk[0].peer_gid); 165 166 if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0) 167 goto errout; 168 } 169 if (smc->conn.lgr && smc->conn.lgr->is_smcd && 170 (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && 171 !list_empty(&smc->conn.lgr->list)) { 172 struct smc_connection *conn = &smc->conn; 173 struct smcd_diag_dmbinfo dinfo; 174 175 memset(&dinfo, 0, sizeof(dinfo)); 176 177 dinfo.linkid = *((u32 *)conn->lgr->id); 178 dinfo.peer_gid = conn->lgr->peer_gid; 179 dinfo.my_gid = conn->lgr->smcd->local_gid; 180 dinfo.token = conn->rmb_desc->token; 181 dinfo.peer_token = conn->peer_token; 182 183 if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0) 184 goto errout; 185 } 186 187 nlmsg_end(skb, nlh); 188 return 0; 189 190 errout: 191 nlmsg_cancel(skb, nlh); 192 return -EMSGSIZE; 193 } 194 195 static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb, 196 struct netlink_callback *cb) 197 { 198 struct net *net = sock_net(skb->sk); 199 struct nlattr *bc = NULL; 200 struct hlist_head *head; 201 struct sock *sk; 202 int rc = 0; 203 204 read_lock(&prot->h.smc_hash->lock); 205 head = &prot->h.smc_hash->ht; 206 if (hlist_empty(head)) 207 goto out; 208 209 sk_for_each(sk, head) { 210 if (!net_eq(sock_net(sk), net)) 211 continue; 212 rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc); 213 if (rc) 214 break; 215 } 216 217 out: 218 read_unlock(&prot->h.smc_hash->lock); 219 return rc; 220 } 221 222 static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 223 { 224 int rc = 0; 225 226 rc = smc_diag_dump_proto(&smc_proto, skb, cb); 227 if (!rc) 228 rc = smc_diag_dump_proto(&smc_proto6, skb, cb); 229 return rc; 230 } 231 232 static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) 233 { 234 struct net *net = sock_net(skb->sk); 235 236 if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY && 237 h->nlmsg_flags & NLM_F_DUMP) { 238 { 239 struct netlink_dump_control c = { 240 .dump = smc_diag_dump, 241 .min_dump_alloc = SKB_WITH_OVERHEAD(32768), 242 }; 243 return netlink_dump_start(net->diag_nlsk, skb, h, &c); 244 } 245 } 246 return 0; 247 } 248 249 static const struct sock_diag_handler smc_diag_handler = { 250 .family = AF_SMC, 251 .dump = smc_diag_handler_dump, 252 }; 253 254 static int __init smc_diag_init(void) 255 { 256 return sock_diag_register(&smc_diag_handler); 257 } 258 259 static void __exit smc_diag_exit(void) 260 { 261 sock_diag_unregister(&smc_diag_handler); 262 } 263 264 module_init(smc_diag_init); 265 module_exit(smc_diag_exit); 266 MODULE_LICENSE("GPL"); 267 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */); 268