1 /* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_addr.h> 34 #include <rdma/ib_cache.h> 35 36 #include <linux/slab.h> 37 #include <linux/inet.h> 38 #include <linux/string.h> 39 #include <linux/mlx4/driver.h> 40 41 #include "mlx4_ib.h" 42 43 static struct ib_ah *create_ib_ah(struct ib_pd *pd, 44 struct rdma_ah_attr *ah_attr, 45 struct mlx4_ib_ah *ah) 46 { 47 struct mlx4_dev *dev = to_mdev(pd->device)->dev; 48 49 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | 50 (rdma_ah_get_port_num(ah_attr) << 24)); 51 ah->av.ib.g_slid = rdma_ah_get_path_bits(ah_attr); 52 ah->av.ib.sl_tclass_flowlabel = 53 cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28); 54 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { 55 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); 56 57 ah->av.ib.g_slid |= 0x80; 58 ah->av.ib.gid_index = grh->sgid_index; 59 ah->av.ib.hop_limit = grh->hop_limit; 60 ah->av.ib.sl_tclass_flowlabel |= 61 cpu_to_be32((grh->traffic_class << 20) | 62 grh->flow_label); 63 memcpy(ah->av.ib.dgid, grh->dgid.raw, 16); 64 } 65 66 ah->av.ib.dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr)); 67 if (rdma_ah_get_static_rate(ah_attr)) { 68 u8 static_rate = rdma_ah_get_static_rate(ah_attr) + 69 MLX4_STAT_RATE_OFFSET; 70 71 while (static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 72 !(1 << static_rate & dev->caps.stat_rate_support)) 73 --static_rate; 74 ah->av.ib.stat_rate = static_rate; 75 } 76 77 return &ah->ibah; 78 } 79 80 static struct ib_ah *create_iboe_ah(struct ib_pd *pd, 81 struct rdma_ah_attr *ah_attr, 82 struct mlx4_ib_ah *ah) 83 { 84 struct mlx4_ib_dev *ibdev = to_mdev(pd->device); 85 struct mlx4_dev *dev = ibdev->dev; 86 int is_mcast = 0; 87 struct in6_addr in6; 88 u16 vlan_tag = 0xffff; 89 union ib_gid sgid; 90 struct ib_gid_attr gid_attr; 91 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); 92 int ret; 93 94 memcpy(&in6, grh->dgid.raw, sizeof(in6)); 95 if (rdma_is_multicast_addr(&in6)) 96 is_mcast = 1; 97 98 memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN); 99 ret = ib_get_cached_gid(pd->device, rdma_ah_get_port_num(ah_attr), 100 grh->sgid_index, &sgid, &gid_attr); 101 if (ret) 102 return ERR_PTR(ret); 103 eth_zero_addr(ah->av.eth.s_mac); 104 if (gid_attr.ndev) { 105 if (is_vlan_dev(gid_attr.ndev)) 106 vlan_tag = vlan_dev_vlan_id(gid_attr.ndev); 107 memcpy(ah->av.eth.s_mac, gid_attr.ndev->dev_addr, ETH_ALEN); 108 dev_put(gid_attr.ndev); 109 } 110 if (vlan_tag < 0x1000) 111 vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13; 112 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | 113 (rdma_ah_get_port_num(ah_attr) << 24)); 114 ret = mlx4_ib_gid_index_to_real_index(ibdev, 115 rdma_ah_get_port_num(ah_attr), 116 grh->sgid_index); 117 if (ret < 0) 118 return ERR_PTR(ret); 119 ah->av.eth.gid_index = ret; 120 ah->av.eth.vlan = cpu_to_be16(vlan_tag); 121 ah->av.eth.hop_limit = grh->hop_limit; 122 if (rdma_ah_get_static_rate(ah_attr)) { 123 ah->av.eth.stat_rate = rdma_ah_get_static_rate(ah_attr) + 124 MLX4_STAT_RATE_OFFSET; 125 while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 126 !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) 127 --ah->av.eth.stat_rate; 128 } 129 ah->av.eth.sl_tclass_flowlabel |= 130 cpu_to_be32((grh->traffic_class << 20) | 131 grh->flow_label); 132 /* 133 * HW requires multicast LID so we just choose one. 134 */ 135 if (is_mcast) 136 ah->av.ib.dlid = cpu_to_be16(0xc000); 137 138 memcpy(ah->av.eth.dgid, grh->dgid.raw, 16); 139 ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(rdma_ah_get_sl(ah_attr) 140 << 29); 141 return &ah->ibah; 142 } 143 144 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, 145 struct ib_udata *udata) 146 147 { 148 struct mlx4_ib_ah *ah; 149 struct ib_ah *ret; 150 151 ah = kzalloc(sizeof *ah, GFP_ATOMIC); 152 if (!ah) 153 return ERR_PTR(-ENOMEM); 154 155 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { 156 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) { 157 ret = ERR_PTR(-EINVAL); 158 } else { 159 /* 160 * TBD: need to handle the case when we get 161 * called in an atomic context and there we 162 * might sleep. We don't expect this 163 * currently since we're working with link 164 * local addresses which we can translate 165 * without going to sleep. 166 */ 167 ret = create_iboe_ah(pd, ah_attr, ah); 168 } 169 170 if (IS_ERR(ret)) 171 kfree(ah); 172 173 return ret; 174 } else 175 return create_ib_ah(pd, ah_attr, ah); /* never fails */ 176 } 177 178 int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) 179 { 180 struct mlx4_ib_ah *ah = to_mah(ibah); 181 int port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24; 182 183 memset(ah_attr, 0, sizeof *ah_attr); 184 ah_attr->type = ibah->type; 185 186 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { 187 rdma_ah_set_dlid(ah_attr, 0); 188 rdma_ah_set_sl(ah_attr, 189 be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) 190 >> 29); 191 } else { 192 rdma_ah_set_dlid(ah_attr, be16_to_cpu(ah->av.ib.dlid)); 193 rdma_ah_set_sl(ah_attr, 194 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) 195 >> 28); 196 } 197 198 rdma_ah_set_port_num(ah_attr, port_num); 199 if (ah->av.ib.stat_rate) 200 rdma_ah_set_static_rate(ah_attr, 201 ah->av.ib.stat_rate - 202 MLX4_STAT_RATE_OFFSET); 203 rdma_ah_set_path_bits(ah_attr, ah->av.ib.g_slid & 0x7F); 204 if (mlx4_ib_ah_grh_present(ah)) { 205 u32 tc_fl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel); 206 207 rdma_ah_set_grh(ah_attr, NULL, 208 tc_fl & 0xfffff, ah->av.ib.gid_index, 209 ah->av.ib.hop_limit, 210 tc_fl >> 20); 211 rdma_ah_set_dgid_raw(ah_attr, ah->av.ib.dgid); 212 } 213 214 return 0; 215 } 216 217 int mlx4_ib_destroy_ah(struct ib_ah *ah) 218 { 219 kfree(to_mah(ah)); 220 return 0; 221 } 222