1 /* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_addr.h> 34 #include <rdma/ib_cache.h> 35 36 #include <linux/slab.h> 37 #include <linux/inet.h> 38 #include <linux/string.h> 39 #include <linux/mlx4/driver.h> 40 41 #include "mlx4_ib.h" 42 43 static struct ib_ah *create_ib_ah(struct ib_pd *pd, 44 struct rdma_ah_attr *ah_attr, 45 struct mlx4_ib_ah *ah) 46 { 47 struct mlx4_dev *dev = to_mdev(pd->device)->dev; 48 49 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | 50 (rdma_ah_get_port_num(ah_attr) << 24)); 51 ah->av.ib.g_slid = rdma_ah_get_path_bits(ah_attr); 52 ah->av.ib.sl_tclass_flowlabel = 53 cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28); 54 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { 55 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); 56 57 ah->av.ib.g_slid |= 0x80; 58 ah->av.ib.gid_index = grh->sgid_index; 59 ah->av.ib.hop_limit = grh->hop_limit; 60 ah->av.ib.sl_tclass_flowlabel |= 61 cpu_to_be32((grh->traffic_class << 20) | 62 grh->flow_label); 63 memcpy(ah->av.ib.dgid, grh->dgid.raw, 16); 64 } 65 66 ah->av.ib.dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr)); 67 if (rdma_ah_get_static_rate(ah_attr)) { 68 u8 static_rate = rdma_ah_get_static_rate(ah_attr) + 69 MLX4_STAT_RATE_OFFSET; 70 71 while (static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 72 !(1 << static_rate & dev->caps.stat_rate_support)) 73 --static_rate; 74 ah->av.ib.stat_rate = static_rate; 75 } 76 77 return &ah->ibah; 78 } 79 80 static struct ib_ah *create_iboe_ah(struct ib_pd *pd, 81 struct rdma_ah_attr *ah_attr, 82 struct mlx4_ib_ah *ah) 83 { 84 struct mlx4_ib_dev *ibdev = to_mdev(pd->device); 85 struct mlx4_dev *dev = ibdev->dev; 86 int is_mcast = 0; 87 struct in6_addr in6; 88 u16 vlan_tag = 0xffff; 89 union ib_gid sgid; 90 struct ib_gid_attr gid_attr; 91 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); 92 int ret; 93 94 memcpy(&in6, grh->dgid.raw, sizeof(in6)); 95 if (rdma_is_multicast_addr(&in6)) { 96 is_mcast = 1; 97 rdma_get_mcast_mac(&in6, ah->av.eth.mac); 98 } else { 99 memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN); 100 } 101 ret = ib_get_cached_gid(pd->device, rdma_ah_get_port_num(ah_attr), 102 grh->sgid_index, &sgid, &gid_attr); 103 if (ret) 104 return ERR_PTR(ret); 105 eth_zero_addr(ah->av.eth.s_mac); 106 if (gid_attr.ndev) { 107 if (is_vlan_dev(gid_attr.ndev)) 108 vlan_tag = vlan_dev_vlan_id(gid_attr.ndev); 109 memcpy(ah->av.eth.s_mac, gid_attr.ndev->dev_addr, ETH_ALEN); 110 dev_put(gid_attr.ndev); 111 } 112 if (vlan_tag < 0x1000) 113 vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13; 114 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | 115 (rdma_ah_get_port_num(ah_attr) << 24)); 116 ret = mlx4_ib_gid_index_to_real_index(ibdev, 117 rdma_ah_get_port_num(ah_attr), 118 grh->sgid_index); 119 if (ret < 0) 120 return ERR_PTR(ret); 121 ah->av.eth.gid_index = ret; 122 ah->av.eth.vlan = cpu_to_be16(vlan_tag); 123 ah->av.eth.hop_limit = grh->hop_limit; 124 if (rdma_ah_get_static_rate(ah_attr)) { 125 ah->av.eth.stat_rate = rdma_ah_get_static_rate(ah_attr) + 126 MLX4_STAT_RATE_OFFSET; 127 while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 128 !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) 129 --ah->av.eth.stat_rate; 130 } 131 ah->av.eth.sl_tclass_flowlabel |= 132 cpu_to_be32((grh->traffic_class << 20) | 133 grh->flow_label); 134 /* 135 * HW requires multicast LID so we just choose one. 136 */ 137 if (is_mcast) 138 ah->av.ib.dlid = cpu_to_be16(0xc000); 139 140 memcpy(ah->av.eth.dgid, grh->dgid.raw, 16); 141 ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(rdma_ah_get_sl(ah_attr) 142 << 29); 143 return &ah->ibah; 144 } 145 146 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, 147 struct ib_udata *udata) 148 149 { 150 struct mlx4_ib_ah *ah; 151 struct ib_ah *ret; 152 153 ah = kzalloc(sizeof *ah, GFP_ATOMIC); 154 if (!ah) 155 return ERR_PTR(-ENOMEM); 156 157 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { 158 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) { 159 ret = ERR_PTR(-EINVAL); 160 } else { 161 /* 162 * TBD: need to handle the case when we get 163 * called in an atomic context and there we 164 * might sleep. We don't expect this 165 * currently since we're working with link 166 * local addresses which we can translate 167 * without going to sleep. 168 */ 169 ret = create_iboe_ah(pd, ah_attr, ah); 170 } 171 172 if (IS_ERR(ret)) 173 kfree(ah); 174 175 return ret; 176 } else 177 return create_ib_ah(pd, ah_attr, ah); /* never fails */ 178 } 179 180 int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) 181 { 182 struct mlx4_ib_ah *ah = to_mah(ibah); 183 int port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24; 184 185 memset(ah_attr, 0, sizeof *ah_attr); 186 ah_attr->type = ibah->type; 187 188 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { 189 rdma_ah_set_dlid(ah_attr, 0); 190 rdma_ah_set_sl(ah_attr, 191 be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) 192 >> 29); 193 } else { 194 rdma_ah_set_dlid(ah_attr, be16_to_cpu(ah->av.ib.dlid)); 195 rdma_ah_set_sl(ah_attr, 196 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) 197 >> 28); 198 } 199 200 rdma_ah_set_port_num(ah_attr, port_num); 201 if (ah->av.ib.stat_rate) 202 rdma_ah_set_static_rate(ah_attr, 203 ah->av.ib.stat_rate - 204 MLX4_STAT_RATE_OFFSET); 205 rdma_ah_set_path_bits(ah_attr, ah->av.ib.g_slid & 0x7F); 206 if (mlx4_ib_ah_grh_present(ah)) { 207 u32 tc_fl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel); 208 209 rdma_ah_set_grh(ah_attr, NULL, 210 tc_fl & 0xfffff, ah->av.ib.gid_index, 211 ah->av.ib.hop_limit, 212 tc_fl >> 20); 213 rdma_ah_set_dgid_raw(ah_attr, ah->av.ib.dgid); 214 } 215 216 return 0; 217 } 218 219 int mlx4_ib_destroy_ah(struct ib_ah *ah) 220 { 221 kfree(to_mah(ah)); 222 return 0; 223 } 224