1 /* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_addr.h> 34 #include <rdma/ib_cache.h> 35 36 #include <linux/slab.h> 37 #include <linux/inet.h> 38 #include <linux/string.h> 39 #include <linux/mlx4/driver.h> 40 41 #include "mlx4_ib.h" 42 43 static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, 44 struct mlx4_ib_ah *ah) 45 { 46 struct mlx4_dev *dev = to_mdev(pd->device)->dev; 47 48 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 49 ah->av.ib.g_slid = ah_attr->src_path_bits; 50 if (ah_attr->ah_flags & IB_AH_GRH) { 51 ah->av.ib.g_slid |= 0x80; 52 ah->av.ib.gid_index = ah_attr->grh.sgid_index; 53 ah->av.ib.hop_limit = ah_attr->grh.hop_limit; 54 ah->av.ib.sl_tclass_flowlabel |= 55 cpu_to_be32((ah_attr->grh.traffic_class << 20) | 56 ah_attr->grh.flow_label); 57 memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16); 58 } 59 60 ah->av.ib.dlid = cpu_to_be16(ah_attr->dlid); 61 if (ah_attr->static_rate) { 62 ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; 63 while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 64 !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) 65 --ah->av.ib.stat_rate; 66 } 67 ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); 68 69 return &ah->ibah; 70 } 71 72 static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, 73 struct mlx4_ib_ah *ah) 74 { 75 struct mlx4_ib_dev *ibdev = to_mdev(pd->device); 76 struct mlx4_dev *dev = ibdev->dev; 77 int is_mcast = 0; 78 struct in6_addr in6; 79 u16 vlan_tag; 80 81 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); 82 if (rdma_is_multicast_addr(&in6)) { 83 is_mcast = 1; 84 rdma_get_mcast_mac(&in6, ah->av.eth.mac); 85 } else { 86 memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN); 87 } 88 vlan_tag = ah_attr->vlan_id; 89 if (vlan_tag < 0x1000) 90 vlan_tag |= (ah_attr->sl & 7) << 13; 91 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 92 ah->av.eth.gid_index = ah_attr->grh.sgid_index; 93 ah->av.eth.vlan = cpu_to_be16(vlan_tag); 94 if (ah_attr->static_rate) { 95 ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; 96 while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 97 !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) 98 --ah->av.eth.stat_rate; 99 } 100 101 /* 102 * HW requires multicast LID so we just choose one. 103 */ 104 if (is_mcast) 105 ah->av.ib.dlid = cpu_to_be16(0xc000); 106 107 memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16); 108 ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29); 109 110 return &ah->ibah; 111 } 112 113 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 114 { 115 struct mlx4_ib_ah *ah; 116 struct ib_ah *ret; 117 118 ah = kzalloc(sizeof *ah, GFP_ATOMIC); 119 if (!ah) 120 return ERR_PTR(-ENOMEM); 121 122 if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) { 123 if (!(ah_attr->ah_flags & IB_AH_GRH)) { 124 ret = ERR_PTR(-EINVAL); 125 } else { 126 /* 127 * TBD: need to handle the case when we get 128 * called in an atomic context and there we 129 * might sleep. We don't expect this 130 * currently since we're working with link 131 * local addresses which we can translate 132 * without going to sleep. 133 */ 134 ret = create_iboe_ah(pd, ah_attr, ah); 135 } 136 137 if (IS_ERR(ret)) 138 kfree(ah); 139 140 return ret; 141 } else 142 return create_ib_ah(pd, ah_attr, ah); /* never fails */ 143 } 144 145 int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) 146 { 147 struct mlx4_ib_ah *ah = to_mah(ibah); 148 enum rdma_link_layer ll; 149 150 memset(ah_attr, 0, sizeof *ah_attr); 151 ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; 152 ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24; 153 ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num); 154 ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0; 155 if (ah->av.ib.stat_rate) 156 ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET; 157 ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F; 158 159 if (mlx4_ib_ah_grh_present(ah)) { 160 ah_attr->ah_flags = IB_AH_GRH; 161 162 ah_attr->grh.traffic_class = 163 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20; 164 ah_attr->grh.flow_label = 165 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff; 166 ah_attr->grh.hop_limit = ah->av.ib.hop_limit; 167 ah_attr->grh.sgid_index = ah->av.ib.gid_index; 168 memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16); 169 } 170 171 return 0; 172 } 173 174 int mlx4_ib_destroy_ah(struct ib_ah *ah) 175 { 176 kfree(to_mah(ah)); 177 return 0; 178 } 179