1 /* This file is part of the Emulex RoCE Device Driver for
2  * RoCE (RDMA over Converged Ethernet) adapters.
3  * Copyright (C) 2012-2015 Emulex. All rights reserved.
4  * EMULEX and SLI are trademarks of Emulex.
5  * www.emulex.com
6  *
7  * This software is available to you under a choice of one of two licenses.
8  * You may choose to be licensed under the terms of the GNU General Public
9  * License (GPL) Version 2, available from the file COPYING in the main
10  * directory of this source tree, or the BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  * - Redistributions of source code must retain the above copyright notice,
17  *   this list of conditions and the following disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above copyright
20  *   notice, this list of conditions and the following disclaimer in
21  *   the documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * Contact Information:
36  * linux-drivers@emulex.com
37  *
38  * Emulex
39  * 3333 Susan Street
40  * Costa Mesa, CA 92626
41  */
42 
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 
46 #include <rdma/ib_addr.h>
47 #include <rdma/ib_mad.h>
48 #include <rdma/ib_cache.h>
49 
50 #include "ocrdma.h"
51 #include "ocrdma_verbs.h"
52 #include "ocrdma_ah.h"
53 #include "ocrdma_hw.h"
54 #include "ocrdma_stats.h"
55 
56 #define OCRDMA_VID_PCP_SHIFT	0xD
57 
58 static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
59 {
60 	switch (hdr_type) {
61 	case OCRDMA_L3_TYPE_IB_GRH:
62 		return (u16)ETH_P_IBOE;
63 	case OCRDMA_L3_TYPE_IPV4:
64 		return (u16)0x0800;
65 	case OCRDMA_L3_TYPE_IPV6:
66 		return (u16)0x86dd;
67 	default:
68 		pr_err("ocrdma%d: Invalid network header\n", devid);
69 		return 0;
70 	}
71 }
72 
73 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
74 			struct rdma_ah_attr *attr, union ib_gid *sgid,
75 			int pdid, bool *isvlan, u16 vlan_tag)
76 {
77 	int status;
78 	struct ocrdma_eth_vlan eth;
79 	struct ocrdma_grh grh;
80 	int eth_sz;
81 	u16 proto_num = 0;
82 	u8 nxthdr = 0x11;
83 	struct iphdr ipv4;
84 	const struct ib_global_route *ib_grh;
85 	union {
86 		struct sockaddr     _sockaddr;
87 		struct sockaddr_in  _sockaddr_in;
88 		struct sockaddr_in6 _sockaddr_in6;
89 	} sgid_addr, dgid_addr;
90 
91 	memset(&eth, 0, sizeof(eth));
92 	memset(&grh, 0, sizeof(grh));
93 
94 	/* Protocol Number */
95 	proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
96 	if (!proto_num)
97 		return -EINVAL;
98 	nxthdr = (proto_num == ETH_P_IBOE) ? 0x1b : 0x11;
99 	/* VLAN */
100 	if (!vlan_tag || (vlan_tag > 0xFFF))
101 		vlan_tag = dev->pvid;
102 	if (vlan_tag || dev->pfc_state) {
103 		if (!vlan_tag) {
104 			pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
105 				dev->id);
106 			pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
107 				dev->id);
108 		}
109 		eth.eth_type = cpu_to_be16(0x8100);
110 		eth.roce_eth_type = cpu_to_be16(proto_num);
111 		vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
112 		eth.vlan_tag = cpu_to_be16(vlan_tag);
113 		eth_sz = sizeof(struct ocrdma_eth_vlan);
114 		*isvlan = true;
115 	} else {
116 		eth.eth_type = cpu_to_be16(proto_num);
117 		eth_sz = sizeof(struct ocrdma_eth_basic);
118 	}
119 	/* MAC */
120 	memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
121 	status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
122 	if (status)
123 		return status;
124 	ib_grh = rdma_ah_read_grh(attr);
125 	ah->sgid_index = ib_grh->sgid_index;
126 	/* Eth HDR */
127 	memcpy(&ah->av->eth_hdr, &eth, eth_sz);
128 	if (ah->hdr_type == RDMA_NETWORK_IPV4) {
129 		*((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
130 					   ib_grh->traffic_class);
131 		ipv4.id = cpu_to_be16(pdid);
132 		ipv4.frag_off = htons(IP_DF);
133 		ipv4.tot_len = htons(0);
134 		ipv4.ttl = ib_grh->hop_limit;
135 		ipv4.protocol = nxthdr;
136 		rdma_gid2ip(&sgid_addr._sockaddr, sgid);
137 		ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
138 		rdma_gid2ip(&dgid_addr._sockaddr, &ib_grh->dgid);
139 		ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
140 		memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
141 	} else {
142 		memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
143 		grh.tclass_flow = cpu_to_be32((6 << 28) |
144 					      (ib_grh->traffic_class << 24) |
145 					      ib_grh->flow_label);
146 		memcpy(&grh.dgid[0], ib_grh->dgid.raw,
147 		       sizeof(ib_grh->dgid.raw));
148 		grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
149 						(nxthdr << 8) |
150 						ib_grh->hop_limit);
151 		memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
152 	}
153 	if (*isvlan)
154 		ah->av->valid |= OCRDMA_AV_VLAN_VALID;
155 	ah->av->valid = cpu_to_le32(ah->av->valid);
156 	return status;
157 }
158 
159 struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
160 			       struct ib_udata *udata)
161 {
162 	u32 *ahid_addr;
163 	int status;
164 	struct ocrdma_ah *ah;
165 	bool isvlan = false;
166 	u16 vlan_tag = 0xffff;
167 	struct ib_gid_attr sgid_attr;
168 	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
169 	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
170 	const struct ib_global_route *grh;
171 	union ib_gid sgid;
172 
173 	if (!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))
174 		return ERR_PTR(-EINVAL);
175 
176 	grh = rdma_ah_read_grh(attr);
177 	if (atomic_cmpxchg(&dev->update_sl, 1, 0))
178 		ocrdma_init_service_level(dev);
179 
180 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
181 	if (!ah)
182 		return ERR_PTR(-ENOMEM);
183 
184 	status = ocrdma_alloc_av(dev, ah);
185 	if (status)
186 		goto av_err;
187 
188 	status = ib_get_cached_gid(&dev->ibdev, 1, grh->sgid_index, &sgid,
189 				   &sgid_attr);
190 	if (status) {
191 		pr_err("%s(): Failed to query sgid, status = %d\n",
192 		      __func__, status);
193 		goto av_conf_err;
194 	}
195 	if (sgid_attr.ndev) {
196 		if (is_vlan_dev(sgid_attr.ndev))
197 			vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
198 		dev_put(sgid_attr.ndev);
199 	}
200 	/* Get network header type for this GID */
201 	ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
202 
203 	if ((pd->uctx) &&
204 	    (!rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw)) &&
205 	    (!rdma_link_local_addr((struct in6_addr *)grh->dgid.raw))) {
206 		status = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
207 						      attr->dmac,
208 						      &vlan_tag,
209 						      &sgid_attr.ndev->ifindex,
210 						      NULL);
211 		if (status) {
212 			pr_err("%s(): Failed to resolve dmac from gid."
213 				"status = %d\n", __func__, status);
214 			goto av_conf_err;
215 		}
216 	}
217 
218 	status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan, vlan_tag);
219 	if (status)
220 		goto av_conf_err;
221 
222 	/* if pd is for the user process, pass the ah_id to user space */
223 	if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
224 		ahid_addr = pd->uctx->ah_tbl.va + rdma_ah_get_dlid(attr);
225 		*ahid_addr = 0;
226 		*ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
227 		if (ocrdma_is_udp_encap_supported(dev)) {
228 			*ahid_addr |= ((u32)ah->hdr_type &
229 				       OCRDMA_AH_L3_TYPE_MASK) <<
230 				       OCRDMA_AH_L3_TYPE_SHIFT;
231 		}
232 		if (isvlan)
233 			*ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
234 				       OCRDMA_AH_VLAN_VALID_SHIFT);
235 	}
236 
237 	return &ah->ibah;
238 
239 av_conf_err:
240 	ocrdma_free_av(dev, ah);
241 av_err:
242 	kfree(ah);
243 	return ERR_PTR(status);
244 }
245 
246 int ocrdma_destroy_ah(struct ib_ah *ibah)
247 {
248 	struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
249 	struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
250 
251 	ocrdma_free_av(dev, ah);
252 	kfree(ah);
253 	return 0;
254 }
255 
256 int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
257 {
258 	struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
259 	struct ocrdma_av *av = ah->av;
260 	struct ocrdma_grh *grh;
261 
262 	if (ah->av->valid & OCRDMA_AV_VALID) {
263 		grh = (struct ocrdma_grh *)((u8 *)ah->av +
264 				sizeof(struct ocrdma_eth_vlan));
265 		rdma_ah_set_sl(attr, be16_to_cpu(av->eth_hdr.vlan_tag) >> 13);
266 	} else {
267 		grh = (struct ocrdma_grh *)((u8 *)ah->av +
268 					sizeof(struct ocrdma_eth_basic));
269 		rdma_ah_set_sl(attr, 0);
270 	}
271 	rdma_ah_set_grh(attr, NULL,
272 			be32_to_cpu(grh->tclass_flow) & 0xffffffff,
273 			ah->sgid_index,
274 			be32_to_cpu(grh->pdid_hoplimit) & 0xff,
275 			be32_to_cpu(grh->tclass_flow) >> 24);
276 	rdma_ah_set_dgid_raw(attr, &grh->dgid[0]);
277 	return 0;
278 }
279 
280 int ocrdma_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
281 {
282 	/* modify_ah is unsupported */
283 	return -ENOSYS;
284 }
285 
286 int ocrdma_process_mad(struct ib_device *ibdev,
287 		       int process_mad_flags,
288 		       u8 port_num,
289 		       const struct ib_wc *in_wc,
290 		       const struct ib_grh *in_grh,
291 		       const struct ib_mad_hdr *in, size_t in_mad_size,
292 		       struct ib_mad_hdr *out, size_t *out_mad_size,
293 		       u16 *out_mad_pkey_index)
294 {
295 	int status;
296 	struct ocrdma_dev *dev;
297 	const struct ib_mad *in_mad = (const struct ib_mad *)in;
298 	struct ib_mad *out_mad = (struct ib_mad *)out;
299 
300 	if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
301 			 *out_mad_size != sizeof(*out_mad)))
302 		return IB_MAD_RESULT_FAILURE;
303 
304 	switch (in_mad->mad_hdr.mgmt_class) {
305 	case IB_MGMT_CLASS_PERF_MGMT:
306 		dev = get_ocrdma_dev(ibdev);
307 		if (!ocrdma_pma_counters(dev, out_mad))
308 			status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
309 		else
310 			status = IB_MAD_RESULT_SUCCESS;
311 		break;
312 	default:
313 		status = IB_MAD_RESULT_SUCCESS;
314 		break;
315 	}
316 	return status;
317 }
318