1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $ 33 */ 34 35 #include <ib_verbs.h> 36 #include <ib_mad.h> 37 #include <ib_smi.h> 38 39 #include "mthca_dev.h" 40 #include "mthca_cmd.h" 41 42 enum { 43 MTHCA_VENDOR_CLASS1 = 0x9, 44 MTHCA_VENDOR_CLASS2 = 0xa 45 }; 46 47 struct mthca_trap_mad { 48 struct ib_mad *mad; 49 DECLARE_PCI_UNMAP_ADDR(mapping) 50 }; 51 52 static void update_sm_ah(struct mthca_dev *dev, 53 u8 port_num, u16 lid, u8 sl) 54 { 55 struct ib_ah *new_ah; 56 struct ib_ah_attr ah_attr; 57 unsigned long flags; 58 59 if (!dev->send_agent[port_num - 1][0]) 60 return; 61 62 memset(&ah_attr, 0, sizeof ah_attr); 63 ah_attr.dlid = lid; 64 ah_attr.sl = sl; 65 ah_attr.port_num = port_num; 66 67 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, 68 &ah_attr); 69 if (IS_ERR(new_ah)) 70 return; 71 72 spin_lock_irqsave(&dev->sm_lock, flags); 73 if (dev->sm_ah[port_num - 1]) 74 ib_destroy_ah(dev->sm_ah[port_num - 1]); 75 dev->sm_ah[port_num - 1] = new_ah; 76 spin_unlock_irqrestore(&dev->sm_lock, flags); 77 } 78 79 /* 80 * Snoop SM MADs for port info and P_Key table sets, so we can 81 * synthesize LID change and P_Key change events. 82 */ 83 static void smp_snoop(struct ib_device *ibdev, 84 u8 port_num, 85 struct ib_mad *mad) 86 { 87 struct ib_event event; 88 89 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 90 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 91 mad->mad_hdr.method == IB_MGMT_METHOD_SET) { 92 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 93 update_sm_ah(to_mdev(ibdev), port_num, 94 be16_to_cpup((__be16 *) (mad->data + 58)), 95 (*(u8 *) (mad->data + 76)) & 0xf); 96 97 event.device = ibdev; 98 event.event = IB_EVENT_LID_CHANGE; 99 event.element.port_num = port_num; 100 ib_dispatch_event(&event); 101 } 102 103 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 104 event.device = ibdev; 105 event.event = IB_EVENT_PKEY_CHANGE; 106 event.element.port_num = port_num; 107 ib_dispatch_event(&event); 108 } 109 } 110 } 111 112 static void forward_trap(struct mthca_dev *dev, 113 u8 port_num, 114 struct ib_mad *mad) 115 { 116 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; 117 struct mthca_trap_mad *tmad; 118 struct ib_sge gather_list; 119 struct ib_send_wr *bad_wr, wr = { 120 .opcode = IB_WR_SEND, 121 .sg_list = &gather_list, 122 .num_sge = 1, 123 .send_flags = IB_SEND_SIGNALED, 124 .wr = { 125 .ud = { 126 .remote_qpn = qpn, 127 .remote_qkey = qpn ? IB_QP1_QKEY : 0, 128 .timeout_ms = 0 129 } 130 } 131 }; 132 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; 133 int ret; 134 unsigned long flags; 135 136 if (agent) { 137 tmad = kmalloc(sizeof *tmad, GFP_KERNEL); 138 if (!tmad) 139 return; 140 141 tmad->mad = kmalloc(sizeof *tmad->mad, GFP_KERNEL); 142 if (!tmad->mad) { 143 kfree(tmad); 144 return; 145 } 146 147 memcpy(tmad->mad, mad, sizeof *mad); 148 149 wr.wr.ud.mad_hdr = &tmad->mad->mad_hdr; 150 wr.wr_id = (unsigned long) tmad; 151 152 gather_list.addr = dma_map_single(agent->device->dma_device, 153 tmad->mad, 154 sizeof *tmad->mad, 155 DMA_TO_DEVICE); 156 gather_list.length = sizeof *tmad->mad; 157 gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey; 158 pci_unmap_addr_set(tmad, mapping, gather_list.addr); 159 160 /* 161 * We rely here on the fact that MLX QPs don't use the 162 * address handle after the send is posted (this is 163 * wrong following the IB spec strictly, but we know 164 * it's OK for our devices). 165 */ 166 spin_lock_irqsave(&dev->sm_lock, flags); 167 wr.wr.ud.ah = dev->sm_ah[port_num - 1]; 168 if (wr.wr.ud.ah) 169 ret = ib_post_send_mad(agent, &wr, &bad_wr); 170 else 171 ret = -EINVAL; 172 spin_unlock_irqrestore(&dev->sm_lock, flags); 173 174 if (ret) { 175 dma_unmap_single(agent->device->dma_device, 176 pci_unmap_addr(tmad, mapping), 177 sizeof *tmad->mad, 178 DMA_TO_DEVICE); 179 kfree(tmad->mad); 180 kfree(tmad); 181 } 182 } 183 } 184 185 int mthca_process_mad(struct ib_device *ibdev, 186 int mad_flags, 187 u8 port_num, 188 struct ib_wc *in_wc, 189 struct ib_grh *in_grh, 190 struct ib_mad *in_mad, 191 struct ib_mad *out_mad) 192 { 193 int err; 194 u8 status; 195 u16 slid = in_wc ? in_wc->slid : IB_LID_PERMISSIVE; 196 197 /* Forward locally generated traps to the SM */ 198 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 199 slid == 0) { 200 forward_trap(to_mdev(ibdev), port_num, in_mad); 201 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 202 } 203 204 /* 205 * Only handle SM gets, sets and trap represses for SM class 206 * 207 * Only handle PMA and Mellanox vendor-specific class gets and 208 * sets for other classes. 209 */ 210 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 211 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 212 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 213 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && 214 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) 215 return IB_MAD_RESULT_SUCCESS; 216 217 /* 218 * Don't process SMInfo queries or vendor-specific 219 * MADs -- the SMA can't handle them. 220 */ 221 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || 222 ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == 223 IB_SMP_ATTR_VENDOR_MASK)) 224 return IB_MAD_RESULT_SUCCESS; 225 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 226 in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || 227 in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { 228 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 229 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) 230 return IB_MAD_RESULT_SUCCESS; 231 } else 232 return IB_MAD_RESULT_SUCCESS; 233 234 err = mthca_MAD_IFC(to_mdev(ibdev), 235 mad_flags & IB_MAD_IGNORE_MKEY, 236 mad_flags & IB_MAD_IGNORE_BKEY, 237 port_num, in_wc, in_grh, in_mad, out_mad, 238 &status); 239 if (err) { 240 mthca_err(to_mdev(ibdev), "MAD_IFC failed\n"); 241 return IB_MAD_RESULT_FAILURE; 242 } 243 if (status == MTHCA_CMD_STAT_BAD_PKT) 244 return IB_MAD_RESULT_SUCCESS; 245 if (status) { 246 mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n", 247 status); 248 return IB_MAD_RESULT_FAILURE; 249 } 250 251 if (!out_mad->mad_hdr.status) 252 smp_snoop(ibdev, port_num, in_mad); 253 254 /* set return bit in status of directed route responses */ 255 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 256 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); 257 258 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) 259 /* no response for trap repress */ 260 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 261 262 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 263 } 264 265 static void send_handler(struct ib_mad_agent *agent, 266 struct ib_mad_send_wc *mad_send_wc) 267 { 268 struct mthca_trap_mad *tmad = 269 (void *) (unsigned long) mad_send_wc->wr_id; 270 271 dma_unmap_single(agent->device->dma_device, 272 pci_unmap_addr(tmad, mapping), 273 sizeof *tmad->mad, 274 DMA_TO_DEVICE); 275 kfree(tmad->mad); 276 kfree(tmad); 277 } 278 279 int mthca_create_agents(struct mthca_dev *dev) 280 { 281 struct ib_mad_agent *agent; 282 int p, q; 283 284 spin_lock_init(&dev->sm_lock); 285 286 for (p = 0; p < dev->limits.num_ports; ++p) 287 for (q = 0; q <= 1; ++q) { 288 agent = ib_register_mad_agent(&dev->ib_dev, p + 1, 289 q ? IB_QPT_GSI : IB_QPT_SMI, 290 NULL, 0, send_handler, 291 NULL, NULL); 292 if (IS_ERR(agent)) 293 goto err; 294 dev->send_agent[p][q] = agent; 295 } 296 297 return 0; 298 299 err: 300 for (p = 0; p < dev->limits.num_ports; ++p) 301 for (q = 0; q <= 1; ++q) 302 if (dev->send_agent[p][q]) 303 ib_unregister_mad_agent(dev->send_agent[p][q]); 304 305 return PTR_ERR(agent); 306 } 307 308 void mthca_free_agents(struct mthca_dev *dev) 309 { 310 struct ib_mad_agent *agent; 311 int p, q; 312 313 for (p = 0; p < dev->limits.num_ports; ++p) { 314 for (q = 0; q <= 1; ++q) { 315 agent = dev->send_agent[p][q]; 316 dev->send_agent[p][q] = NULL; 317 ib_unregister_mad_agent(agent); 318 } 319 320 if (dev->sm_ah[p]) 321 ib_destroy_ah(dev->sm_ah[p]); 322 } 323 } 324