1 /* 2 * Shared Memory Communications over RDMA (SMC-R) and RoCE 3 * 4 * Monitoring SMC transport protocol sockets 5 * 6 * Copyright IBM Corp. 2016 7 * 8 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/init.h> 15 #include <linux/sock_diag.h> 16 #include <linux/inet_diag.h> 17 #include <linux/smc_diag.h> 18 #include <net/netlink.h> 19 #include <net/smc.h> 20 21 #include "smc.h" 22 #include "smc_core.h" 23 24 static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw) 25 { 26 sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", 27 be16_to_cpu(((__be16 *)gid_raw)[0]), 28 be16_to_cpu(((__be16 *)gid_raw)[1]), 29 be16_to_cpu(((__be16 *)gid_raw)[2]), 30 be16_to_cpu(((__be16 *)gid_raw)[3]), 31 be16_to_cpu(((__be16 *)gid_raw)[4]), 32 be16_to_cpu(((__be16 *)gid_raw)[5]), 33 be16_to_cpu(((__be16 *)gid_raw)[6]), 34 be16_to_cpu(((__be16 *)gid_raw)[7])); 35 } 36 37 static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk) 38 { 39 struct smc_sock *smc = smc_sk(sk); 40 41 if (!smc->clcsock) 42 return; 43 r->id.idiag_sport = htons(smc->clcsock->sk->sk_num); 44 r->id.idiag_dport = smc->clcsock->sk->sk_dport; 45 r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if; 46 sock_diag_save_cookie(sk, r->id.idiag_cookie); 47 if (sk->sk_protocol == SMCPROTO_SMC) { 48 r->diag_family = PF_INET; 49 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); 50 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); 51 r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr; 52 r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr; 53 #if IS_ENABLED(CONFIG_IPV6) 54 } else if (sk->sk_protocol == SMCPROTO_SMC6) { 55 r->diag_family = PF_INET6; 56 memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr, 57 sizeof(smc->clcsock->sk->sk_v6_rcv_saddr)); 58 memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr, 59 sizeof(smc->clcsock->sk->sk_v6_daddr)); 60 #endif 61 } 62 } 63 64 static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, 65 struct smc_diag_msg *r, 66 struct user_namespace *user_ns) 67 { 68 if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown)) 69 return 1; 70 71 r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 72 r->diag_inode = sock_i_ino(sk); 73 return 0; 74 } 75 76 static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, 77 struct netlink_callback *cb, 78 const struct smc_diag_req *req, 79 struct nlattr *bc) 80 { 81 struct smc_sock *smc = smc_sk(sk); 82 struct user_namespace *user_ns; 83 struct smc_diag_msg *r; 84 struct nlmsghdr *nlh; 85 86 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 87 cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI); 88 if (!nlh) 89 return -EMSGSIZE; 90 91 r = nlmsg_data(nlh); 92 smc_diag_msg_common_fill(r, sk); 93 r->diag_state = sk->sk_state; 94 r->diag_fallback = smc->use_fallback; 95 user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk); 96 if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns)) 97 goto errout; 98 99 if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) && 100 smc->conn.alert_token_local) { 101 struct smc_connection *conn = &smc->conn; 102 struct smc_diag_conninfo cinfo = { 103 .token = conn->alert_token_local, 104 .sndbuf_size = conn->sndbuf_desc ? 105 conn->sndbuf_desc->len : 0, 106 .rmbe_size = conn->rmb_desc ? conn->rmb_desc->len : 0, 107 .peer_rmbe_size = conn->peer_rmbe_size, 108 109 .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap, 110 .rx_prod.count = conn->local_rx_ctrl.prod.count, 111 .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap, 112 .rx_cons.count = conn->local_rx_ctrl.cons.count, 113 114 .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap, 115 .tx_prod.count = conn->local_tx_ctrl.prod.count, 116 .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap, 117 .tx_cons.count = conn->local_tx_ctrl.cons.count, 118 119 .tx_prod_flags = 120 *(u8 *)&conn->local_tx_ctrl.prod_flags, 121 .tx_conn_state_flags = 122 *(u8 *)&conn->local_tx_ctrl.conn_state_flags, 123 .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags, 124 .rx_conn_state_flags = 125 *(u8 *)&conn->local_rx_ctrl.conn_state_flags, 126 127 .tx_prep.wrap = conn->tx_curs_prep.wrap, 128 .tx_prep.count = conn->tx_curs_prep.count, 129 .tx_sent.wrap = conn->tx_curs_sent.wrap, 130 .tx_sent.count = conn->tx_curs_sent.count, 131 .tx_fin.wrap = conn->tx_curs_fin.wrap, 132 .tx_fin.count = conn->tx_curs_fin.count, 133 }; 134 135 if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0) 136 goto errout; 137 } 138 139 if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr && 140 !list_empty(&smc->conn.lgr->list)) { 141 struct smc_diag_lgrinfo linfo = { 142 .role = smc->conn.lgr->role, 143 .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport, 144 .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id, 145 }; 146 147 memcpy(linfo.lnk[0].ibname, 148 smc->conn.lgr->lnk[0].smcibdev->ibdev->name, 149 sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name)); 150 smc_gid_be16_convert(linfo.lnk[0].gid, 151 smc->conn.lgr->lnk[0].gid.raw); 152 smc_gid_be16_convert(linfo.lnk[0].peer_gid, 153 smc->conn.lgr->lnk[0].peer_gid); 154 155 if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0) 156 goto errout; 157 } 158 159 nlmsg_end(skb, nlh); 160 return 0; 161 162 errout: 163 nlmsg_cancel(skb, nlh); 164 return -EMSGSIZE; 165 } 166 167 static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb, 168 struct netlink_callback *cb) 169 { 170 struct net *net = sock_net(skb->sk); 171 struct nlattr *bc = NULL; 172 struct hlist_head *head; 173 struct sock *sk; 174 int rc = 0; 175 176 read_lock(&prot->h.smc_hash->lock); 177 head = &prot->h.smc_hash->ht; 178 if (hlist_empty(head)) 179 goto out; 180 181 sk_for_each(sk, head) { 182 if (!net_eq(sock_net(sk), net)) 183 continue; 184 rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc); 185 if (rc) 186 break; 187 } 188 189 out: 190 read_unlock(&prot->h.smc_hash->lock); 191 return rc; 192 } 193 194 static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 195 { 196 int rc = 0; 197 198 rc = smc_diag_dump_proto(&smc_proto, skb, cb); 199 if (!rc) 200 rc = smc_diag_dump_proto(&smc_proto6, skb, cb); 201 return rc; 202 } 203 204 static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) 205 { 206 struct net *net = sock_net(skb->sk); 207 208 if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY && 209 h->nlmsg_flags & NLM_F_DUMP) { 210 { 211 struct netlink_dump_control c = { 212 .dump = smc_diag_dump, 213 .min_dump_alloc = SKB_WITH_OVERHEAD(32768), 214 }; 215 return netlink_dump_start(net->diag_nlsk, skb, h, &c); 216 } 217 } 218 return 0; 219 } 220 221 static const struct sock_diag_handler smc_diag_handler = { 222 .family = AF_SMC, 223 .dump = smc_diag_handler_dump, 224 }; 225 226 static int __init smc_diag_init(void) 227 { 228 return sock_diag_register(&smc_diag_handler); 229 } 230 231 static void __exit smc_diag_exit(void) 232 { 233 sock_diag_unregister(&smc_diag_handler); 234 } 235 236 module_init(smc_diag_init); 237 module_exit(smc_diag_exit); 238 MODULE_LICENSE("GPL"); 239 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */); 240