1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/slab.h> 35 36 #include "ipoib.h" 37 38 int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca, 39 union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey) 40 { 41 struct ipoib_dev_priv *priv = ipoib_priv(dev); 42 struct ib_qp_attr *qp_attr = NULL; 43 int ret; 44 u16 pkey_index; 45 46 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) { 47 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 48 ret = -ENXIO; 49 goto out; 50 } 51 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 52 53 if (set_qkey) { 54 ret = -ENOMEM; 55 qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL); 56 if (!qp_attr) 57 goto out; 58 59 /* set correct QKey for QP */ 60 qp_attr->qkey = qkey; 61 ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY); 62 if (ret) { 63 ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret); 64 goto out; 65 } 66 } 67 68 /* attach QP to multicast group */ 69 ret = ib_attach_mcast(priv->qp, mgid, mlid); 70 if (ret) 71 ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret); 72 73 out: 74 kfree(qp_attr); 75 return ret; 76 } 77 78 int ipoib_mcast_detach(struct net_device *dev, struct ib_device *hca, 79 union ib_gid *mgid, u16 mlid) 80 { 81 struct ipoib_dev_priv *priv = ipoib_priv(dev); 82 int ret; 83 84 ret = ib_detach_mcast(priv->qp, mgid, mlid); 85 86 return ret; 87 } 88 89 int ipoib_init_qp(struct net_device *dev) 90 { 91 struct ipoib_dev_priv *priv = ipoib_priv(dev); 92 int ret; 93 struct ib_qp_attr qp_attr; 94 int attr_mask; 95 96 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 97 return -1; 98 99 qp_attr.qp_state = IB_QPS_INIT; 100 qp_attr.qkey = 0; 101 qp_attr.port_num = priv->port; 102 qp_attr.pkey_index = priv->pkey_index; 103 attr_mask = 104 IB_QP_QKEY | 105 IB_QP_PORT | 106 IB_QP_PKEY_INDEX | 107 IB_QP_STATE; 108 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); 109 if (ret) { 110 ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret); 111 goto out_fail; 112 } 113 114 qp_attr.qp_state = IB_QPS_RTR; 115 /* Can't set this in a INIT->RTR transition */ 116 attr_mask &= ~IB_QP_PORT; 117 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); 118 if (ret) { 119 ipoib_warn(priv, "failed to modify QP to RTR, ret = %d\n", ret); 120 goto out_fail; 121 } 122 123 qp_attr.qp_state = IB_QPS_RTS; 124 qp_attr.sq_psn = 0; 125 attr_mask |= IB_QP_SQ_PSN; 126 attr_mask &= ~IB_QP_PKEY_INDEX; 127 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); 128 if (ret) { 129 ipoib_warn(priv, "failed to modify QP to RTS, ret = %d\n", ret); 130 goto out_fail; 131 } 132 133 return 0; 134 135 out_fail: 136 qp_attr.qp_state = IB_QPS_RESET; 137 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 138 ipoib_warn(priv, "Failed to modify QP to RESET state\n"); 139 140 return ret; 141 } 142 143 int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) 144 { 145 struct ipoib_dev_priv *priv = ipoib_priv(dev); 146 struct ib_qp_init_attr init_attr = { 147 .cap = { 148 .max_send_wr = ipoib_sendq_size, 149 .max_recv_wr = ipoib_recvq_size, 150 .max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge, 151 MAX_SKB_FRAGS + 1), 152 .max_recv_sge = IPOIB_UD_RX_SG 153 }, 154 .sq_sig_type = IB_SIGNAL_ALL_WR, 155 .qp_type = IB_QPT_UD 156 }; 157 struct ib_cq_init_attr cq_attr = {}; 158 159 int ret, size, req_vec; 160 int i; 161 162 size = ipoib_recvq_size + 1; 163 ret = ipoib_cm_dev_init(dev); 164 if (!ret) { 165 size += ipoib_sendq_size; 166 if (ipoib_cm_has_srq(dev)) 167 size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */ 168 else 169 size += ipoib_recvq_size * ipoib_max_conn_qp; 170 } else 171 if (ret != -EOPNOTSUPP) 172 return ret; 173 174 req_vec = (priv->port - 1) * 2; 175 176 cq_attr.cqe = size; 177 cq_attr.comp_vector = req_vec % priv->ca->num_comp_vectors; 178 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL, 179 priv, &cq_attr); 180 if (IS_ERR(priv->recv_cq)) { 181 pr_warn("%s: failed to create receive CQ\n", ca->name); 182 goto out_cm_dev_cleanup; 183 } 184 185 cq_attr.cqe = ipoib_sendq_size; 186 cq_attr.comp_vector = (req_vec + 1) % priv->ca->num_comp_vectors; 187 priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, 188 priv, &cq_attr); 189 if (IS_ERR(priv->send_cq)) { 190 pr_warn("%s: failed to create send CQ\n", ca->name); 191 goto out_free_recv_cq; 192 } 193 194 if (ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP)) 195 goto out_free_send_cq; 196 197 init_attr.send_cq = priv->send_cq; 198 init_attr.recv_cq = priv->recv_cq; 199 200 if (priv->hca_caps & IB_DEVICE_UD_TSO) 201 init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; 202 203 if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK) 204 init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; 205 206 if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING) 207 init_attr.create_flags |= IB_QP_CREATE_NETIF_QP; 208 209 priv->qp = ib_create_qp(priv->pd, &init_attr); 210 if (IS_ERR(priv->qp)) { 211 pr_warn("%s: failed to create QP\n", ca->name); 212 goto out_free_send_cq; 213 } 214 215 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) 216 goto out_free_send_cq; 217 218 for (i = 0; i < MAX_SKB_FRAGS + 1; ++i) 219 priv->tx_sge[i].lkey = priv->pd->local_dma_lkey; 220 221 priv->tx_wr.wr.opcode = IB_WR_SEND; 222 priv->tx_wr.wr.sg_list = priv->tx_sge; 223 priv->tx_wr.wr.send_flags = IB_SEND_SIGNALED; 224 225 priv->rx_sge[0].lkey = priv->pd->local_dma_lkey; 226 227 priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); 228 priv->rx_wr.num_sge = 1; 229 230 priv->rx_wr.next = NULL; 231 priv->rx_wr.sg_list = priv->rx_sge; 232 233 if (init_attr.cap.max_send_sge > 1) 234 dev->features |= NETIF_F_SG; 235 236 priv->max_send_sge = init_attr.cap.max_send_sge; 237 238 return 0; 239 240 out_free_send_cq: 241 ib_destroy_cq(priv->send_cq); 242 243 out_free_recv_cq: 244 ib_destroy_cq(priv->recv_cq); 245 246 out_cm_dev_cleanup: 247 ipoib_cm_dev_cleanup(dev); 248 249 return -ENODEV; 250 } 251 252 void ipoib_transport_dev_cleanup(struct net_device *dev) 253 { 254 struct ipoib_dev_priv *priv = ipoib_priv(dev); 255 256 if (priv->qp) { 257 if (ib_destroy_qp(priv->qp)) 258 ipoib_warn(priv, "ib_qp_destroy failed\n"); 259 260 priv->qp = NULL; 261 } 262 263 if (ib_destroy_cq(priv->send_cq)) 264 ipoib_warn(priv, "ib_cq_destroy (send) failed\n"); 265 266 if (ib_destroy_cq(priv->recv_cq)) 267 ipoib_warn(priv, "ib_cq_destroy (recv) failed\n"); 268 } 269 270 void ipoib_event(struct ib_event_handler *handler, 271 struct ib_event *record) 272 { 273 struct ipoib_dev_priv *priv = 274 container_of(handler, struct ipoib_dev_priv, event_handler); 275 276 if (record->element.port_num != priv->port) 277 return; 278 279 ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event, 280 dev_name(&record->device->dev), record->element.port_num); 281 282 if (record->event == IB_EVENT_CLIENT_REREGISTER) { 283 queue_work(ipoib_workqueue, &priv->flush_light); 284 } else if (record->event == IB_EVENT_PORT_ERR || 285 record->event == IB_EVENT_PORT_ACTIVE || 286 record->event == IB_EVENT_LID_CHANGE) { 287 queue_work(ipoib_workqueue, &priv->flush_normal); 288 } else if (record->event == IB_EVENT_PKEY_CHANGE) { 289 queue_work(ipoib_workqueue, &priv->flush_heavy); 290 } else if (record->event == IB_EVENT_GID_CHANGE && 291 !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { 292 queue_work(ipoib_workqueue, &priv->flush_light); 293 } 294 } 295