1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 5 */ 6 7 #include <linux/skbuff.h> 8 9 #include "rxe.h" 10 #include "rxe_loc.h" 11 12 /* check that QP matches packet opcode type and is in a valid state */ 13 static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, 14 struct rxe_qp *qp) 15 { 16 unsigned int pkt_type; 17 18 if (unlikely(!qp->valid)) 19 return -EINVAL; 20 21 pkt_type = pkt->opcode & 0xe0; 22 23 switch (qp_type(qp)) { 24 case IB_QPT_RC: 25 if (unlikely(pkt_type != IB_OPCODE_RC)) 26 return -EINVAL; 27 break; 28 case IB_QPT_UC: 29 if (unlikely(pkt_type != IB_OPCODE_UC)) 30 return -EINVAL; 31 break; 32 case IB_QPT_UD: 33 case IB_QPT_GSI: 34 if (unlikely(pkt_type != IB_OPCODE_UD)) 35 return -EINVAL; 36 break; 37 default: 38 return -EINVAL; 39 } 40 41 spin_lock_bh(&qp->state_lock); 42 if (pkt->mask & RXE_REQ_MASK) { 43 if (unlikely(qp_state(qp) < IB_QPS_RTR)) { 44 spin_unlock_bh(&qp->state_lock); 45 return -EINVAL; 46 } 47 } else { 48 if (unlikely(qp_state(qp) < IB_QPS_RTS)) { 49 spin_unlock_bh(&qp->state_lock); 50 return -EINVAL; 51 } 52 } 53 spin_unlock_bh(&qp->state_lock); 54 55 return 0; 56 } 57 58 static void set_bad_pkey_cntr(struct rxe_port *port) 59 { 60 spin_lock_bh(&port->port_lock); 61 port->attr.bad_pkey_cntr = min((u32)0xffff, 62 port->attr.bad_pkey_cntr + 1); 63 spin_unlock_bh(&port->port_lock); 64 } 65 66 static void set_qkey_viol_cntr(struct rxe_port *port) 67 { 68 spin_lock_bh(&port->port_lock); 69 port->attr.qkey_viol_cntr = min((u32)0xffff, 70 port->attr.qkey_viol_cntr + 1); 71 spin_unlock_bh(&port->port_lock); 72 } 73 74 static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, 75 u32 qpn, struct rxe_qp *qp) 76 { 77 struct rxe_port *port = &rxe->port; 78 u16 pkey = bth_pkey(pkt); 79 80 pkt->pkey_index = 0; 81 82 if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) { 83 set_bad_pkey_cntr(port); 84 return -EINVAL; 85 } 86 87 if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) { 88 u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey; 89 90 if (unlikely(deth_qkey(pkt) != qkey)) { 91 set_qkey_viol_cntr(port); 92 return -EINVAL; 93 } 94 } 95 96 return 0; 97 } 98 99 static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, 100 struct rxe_qp *qp) 101 { 102 struct sk_buff *skb = PKT_TO_SKB(pkt); 103 104 if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC) 105 return 0; 106 107 if (unlikely(pkt->port_num != qp->attr.port_num)) 108 return -EINVAL; 109 110 if (skb->protocol == htons(ETH_P_IP)) { 111 struct in_addr *saddr = 112 &qp->pri_av.sgid_addr._sockaddr_in.sin_addr; 113 struct in_addr *daddr = 114 &qp->pri_av.dgid_addr._sockaddr_in.sin_addr; 115 116 if ((ip_hdr(skb)->daddr != saddr->s_addr) || 117 (ip_hdr(skb)->saddr != daddr->s_addr)) 118 return -EINVAL; 119 120 } else if (skb->protocol == htons(ETH_P_IPV6)) { 121 struct in6_addr *saddr = 122 &qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr; 123 struct in6_addr *daddr = 124 &qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr; 125 126 if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr)) || 127 memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) 128 return -EINVAL; 129 } 130 131 return 0; 132 } 133 134 static int hdr_check(struct rxe_pkt_info *pkt) 135 { 136 struct rxe_dev *rxe = pkt->rxe; 137 struct rxe_port *port = &rxe->port; 138 struct rxe_qp *qp = NULL; 139 u32 qpn = bth_qpn(pkt); 140 int index; 141 int err; 142 143 if (unlikely(bth_tver(pkt) != BTH_TVER)) 144 goto err1; 145 146 if (unlikely(qpn == 0)) 147 goto err1; 148 149 if (qpn != IB_MULTICAST_QPN) { 150 index = (qpn == 1) ? port->qp_gsi_index : qpn; 151 152 qp = rxe_pool_get_index(&rxe->qp_pool, index); 153 if (unlikely(!qp)) 154 goto err1; 155 156 err = check_type_state(rxe, pkt, qp); 157 if (unlikely(err)) 158 goto err2; 159 160 err = check_addr(rxe, pkt, qp); 161 if (unlikely(err)) 162 goto err2; 163 164 err = check_keys(rxe, pkt, qpn, qp); 165 if (unlikely(err)) 166 goto err2; 167 } else { 168 if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) 169 goto err1; 170 } 171 172 pkt->qp = qp; 173 return 0; 174 175 err2: 176 rxe_put(qp); 177 err1: 178 return -EINVAL; 179 } 180 181 static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb) 182 { 183 if (pkt->mask & RXE_REQ_MASK) 184 rxe_resp_queue_pkt(pkt->qp, skb); 185 else 186 rxe_comp_queue_pkt(pkt->qp, skb); 187 } 188 189 static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) 190 { 191 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); 192 struct rxe_mcg *mcg; 193 struct rxe_mca *mca; 194 struct rxe_qp *qp; 195 union ib_gid dgid; 196 int err; 197 198 if (skb->protocol == htons(ETH_P_IP)) 199 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr, 200 (struct in6_addr *)&dgid); 201 else if (skb->protocol == htons(ETH_P_IPV6)) 202 memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid)); 203 204 /* lookup mcast group corresponding to mgid, takes a ref */ 205 mcg = rxe_lookup_mcg(rxe, &dgid); 206 if (!mcg) 207 goto drop; /* mcast group not registered */ 208 209 spin_lock_bh(&rxe->mcg_lock); 210 211 /* this is unreliable datagram service so we let 212 * failures to deliver a multicast packet to a 213 * single QP happen and just move on and try 214 * the rest of them on the list 215 */ 216 list_for_each_entry(mca, &mcg->qp_list, qp_list) { 217 qp = mca->qp; 218 219 /* validate qp for incoming packet */ 220 err = check_type_state(rxe, pkt, qp); 221 if (err) 222 continue; 223 224 err = check_keys(rxe, pkt, bth_qpn(pkt), qp); 225 if (err) 226 continue; 227 228 /* for all but the last QP create a new clone of the 229 * skb and pass to the QP. Pass the original skb to 230 * the last QP in the list. 231 */ 232 if (mca->qp_list.next != &mcg->qp_list) { 233 struct sk_buff *cskb; 234 struct rxe_pkt_info *cpkt; 235 236 cskb = skb_clone(skb, GFP_ATOMIC); 237 if (unlikely(!cskb)) 238 continue; 239 240 if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) { 241 kfree_skb(cskb); 242 break; 243 } 244 245 cpkt = SKB_TO_PKT(cskb); 246 cpkt->qp = qp; 247 rxe_get(qp); 248 rxe_rcv_pkt(cpkt, cskb); 249 } else { 250 pkt->qp = qp; 251 rxe_get(qp); 252 rxe_rcv_pkt(pkt, skb); 253 skb = NULL; /* mark consumed */ 254 } 255 } 256 257 spin_unlock_bh(&rxe->mcg_lock); 258 259 kref_put(&mcg->ref_cnt, rxe_cleanup_mcg); 260 261 if (likely(!skb)) 262 return; 263 264 /* This only occurs if one of the checks fails on the last 265 * QP in the list above 266 */ 267 268 drop: 269 kfree_skb(skb); 270 ib_device_put(&rxe->ib_dev); 271 } 272 273 /** 274 * rxe_chk_dgid - validate destination IP address 275 * @rxe: rxe device that received packet 276 * @skb: the received packet buffer 277 * 278 * Accept any loopback packets 279 * Extract IP address from packet and 280 * Accept if multicast packet 281 * Accept if matches an SGID table entry 282 */ 283 static int rxe_chk_dgid(struct rxe_dev *rxe, struct sk_buff *skb) 284 { 285 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); 286 const struct ib_gid_attr *gid_attr; 287 union ib_gid dgid; 288 union ib_gid *pdgid; 289 290 if (pkt->mask & RXE_LOOPBACK_MASK) 291 return 0; 292 293 if (skb->protocol == htons(ETH_P_IP)) { 294 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr, 295 (struct in6_addr *)&dgid); 296 pdgid = &dgid; 297 } else { 298 pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr; 299 } 300 301 if (rdma_is_multicast_addr((struct in6_addr *)pdgid)) 302 return 0; 303 304 gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid, 305 IB_GID_TYPE_ROCE_UDP_ENCAP, 306 1, skb->dev); 307 if (IS_ERR(gid_attr)) 308 return PTR_ERR(gid_attr); 309 310 rdma_put_gid_attr(gid_attr); 311 return 0; 312 } 313 314 /* rxe_rcv is called from the interface driver */ 315 void rxe_rcv(struct sk_buff *skb) 316 { 317 int err; 318 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); 319 struct rxe_dev *rxe = pkt->rxe; 320 321 if (unlikely(skb->len < RXE_BTH_BYTES)) 322 goto drop; 323 324 if (rxe_chk_dgid(rxe, skb) < 0) 325 goto drop; 326 327 pkt->opcode = bth_opcode(pkt); 328 pkt->psn = bth_psn(pkt); 329 pkt->qp = NULL; 330 pkt->mask |= rxe_opcode[pkt->opcode].mask; 331 332 if (unlikely(skb->len < header_size(pkt))) 333 goto drop; 334 335 err = hdr_check(pkt); 336 if (unlikely(err)) 337 goto drop; 338 339 err = rxe_icrc_check(skb, pkt); 340 if (unlikely(err)) 341 goto drop; 342 343 rxe_counter_inc(rxe, RXE_CNT_RCVD_PKTS); 344 345 if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN)) 346 rxe_rcv_mcast_pkt(rxe, skb); 347 else 348 rxe_rcv_pkt(pkt, skb); 349 350 return; 351 352 drop: 353 if (pkt->qp) 354 rxe_put(pkt->qp); 355 356 kfree_skb(skb); 357 ib_device_put(&rxe->ib_dev); 358 } 359