1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $ 37 */ 38 39 #include <linux/dma-mapping.h> 40 41 #include <asm/bug.h> 42 43 #include <ib_smi.h> 44 45 #include "smi.h" 46 #include "agent_priv.h" 47 #include "mad_priv.h" 48 #include "agent.h" 49 50 spinlock_t ib_agent_port_list_lock; 51 static LIST_HEAD(ib_agent_port_list); 52 53 /* 54 * Caller must hold ib_agent_port_list_lock 55 */ 56 static inline struct ib_agent_port_private * 57 __ib_get_agent_port(struct ib_device *device, int port_num, 58 struct ib_mad_agent *mad_agent) 59 { 60 struct ib_agent_port_private *entry; 61 62 BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */ 63 64 if (device) { 65 list_for_each_entry(entry, &ib_agent_port_list, port_list) { 66 if (entry->smp_agent->device == device && 67 entry->port_num == port_num) 68 return entry; 69 } 70 } else { 71 list_for_each_entry(entry, &ib_agent_port_list, port_list) { 72 if ((entry->smp_agent == mad_agent) || 73 (entry->perf_mgmt_agent == mad_agent)) 74 return entry; 75 } 76 } 77 return NULL; 78 } 79 80 static inline struct ib_agent_port_private * 81 ib_get_agent_port(struct ib_device *device, int port_num, 82 struct ib_mad_agent *mad_agent) 83 { 84 struct ib_agent_port_private *entry; 85 unsigned long flags; 86 87 spin_lock_irqsave(&ib_agent_port_list_lock, flags); 88 entry = __ib_get_agent_port(device, port_num, mad_agent); 89 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 90 91 return entry; 92 } 93 94 int smi_check_local_dr_smp(struct ib_smp *smp, 95 struct ib_device *device, 96 int port_num) 97 { 98 struct ib_agent_port_private *port_priv; 99 100 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 101 return 1; 102 port_priv = ib_get_agent_port(device, port_num, NULL); 103 if (!port_priv) { 104 printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d " 105 "not open\n", 106 device->name, port_num); 107 return 1; 108 } 109 110 return smi_check_local_smp(port_priv->smp_agent, smp); 111 } 112 113 static int agent_mad_send(struct ib_mad_agent *mad_agent, 114 struct ib_agent_port_private *port_priv, 115 struct ib_mad_private *mad_priv, 116 struct ib_grh *grh, 117 struct ib_wc *wc) 118 { 119 struct ib_agent_send_wr *agent_send_wr; 120 struct ib_sge gather_list; 121 struct ib_send_wr send_wr; 122 struct ib_send_wr *bad_send_wr; 123 struct ib_ah_attr ah_attr; 124 unsigned long flags; 125 int ret = 1; 126 127 agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL); 128 if (!agent_send_wr) 129 goto out; 130 agent_send_wr->mad = mad_priv; 131 132 /* PCI mapping */ 133 gather_list.addr = dma_map_single(mad_agent->device->dma_device, 134 &mad_priv->mad, 135 sizeof(mad_priv->mad), 136 DMA_TO_DEVICE); 137 gather_list.length = sizeof(mad_priv->mad); 138 gather_list.lkey = (*port_priv->mr).lkey; 139 140 send_wr.next = NULL; 141 send_wr.opcode = IB_WR_SEND; 142 send_wr.sg_list = &gather_list; 143 send_wr.num_sge = 1; 144 send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */ 145 send_wr.wr.ud.timeout_ms = 0; 146 send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; 147 148 ah_attr.dlid = wc->slid; 149 ah_attr.port_num = mad_agent->port_num; 150 ah_attr.src_path_bits = wc->dlid_path_bits; 151 ah_attr.sl = wc->sl; 152 ah_attr.static_rate = 0; 153 ah_attr.ah_flags = 0; /* No GRH */ 154 if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { 155 if (wc->wc_flags & IB_WC_GRH) { 156 ah_attr.ah_flags = IB_AH_GRH; 157 /* Should sgid be looked up ? */ 158 ah_attr.grh.sgid_index = 0; 159 ah_attr.grh.hop_limit = grh->hop_limit; 160 ah_attr.grh.flow_label = be32_to_cpup( 161 &grh->version_tclass_flow) & 0xfffff; 162 ah_attr.grh.traffic_class = (be32_to_cpup( 163 &grh->version_tclass_flow) >> 20) & 0xff; 164 memcpy(ah_attr.grh.dgid.raw, 165 grh->sgid.raw, 166 sizeof(ah_attr.grh.dgid)); 167 } 168 } 169 170 agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr); 171 if (IS_ERR(agent_send_wr->ah)) { 172 printk(KERN_ERR SPFX "No memory for address handle\n"); 173 kfree(agent_send_wr); 174 goto out; 175 } 176 177 send_wr.wr.ud.ah = agent_send_wr->ah; 178 if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { 179 send_wr.wr.ud.pkey_index = wc->pkey_index; 180 send_wr.wr.ud.remote_qkey = IB_QP1_QKEY; 181 } else { /* for SMPs */ 182 send_wr.wr.ud.pkey_index = 0; 183 send_wr.wr.ud.remote_qkey = 0; 184 } 185 send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr; 186 send_wr.wr_id = (unsigned long)agent_send_wr; 187 188 pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr); 189 190 /* Send */ 191 spin_lock_irqsave(&port_priv->send_list_lock, flags); 192 if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) { 193 spin_unlock_irqrestore(&port_priv->send_list_lock, flags); 194 dma_unmap_single(mad_agent->device->dma_device, 195 pci_unmap_addr(agent_send_wr, mapping), 196 sizeof(mad_priv->mad), 197 DMA_TO_DEVICE); 198 ib_destroy_ah(agent_send_wr->ah); 199 kfree(agent_send_wr); 200 } else { 201 list_add_tail(&agent_send_wr->send_list, 202 &port_priv->send_posted_list); 203 spin_unlock_irqrestore(&port_priv->send_list_lock, flags); 204 ret = 0; 205 } 206 207 out: 208 return ret; 209 } 210 211 int agent_send(struct ib_mad_private *mad, 212 struct ib_grh *grh, 213 struct ib_wc *wc, 214 struct ib_device *device, 215 int port_num) 216 { 217 struct ib_agent_port_private *port_priv; 218 struct ib_mad_agent *mad_agent; 219 220 port_priv = ib_get_agent_port(device, port_num, NULL); 221 if (!port_priv) { 222 printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n", 223 device->name, port_num); 224 return 1; 225 } 226 227 /* Get mad agent based on mgmt_class in MAD */ 228 switch (mad->mad.mad.mad_hdr.mgmt_class) { 229 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 230 case IB_MGMT_CLASS_SUBN_LID_ROUTED: 231 mad_agent = port_priv->smp_agent; 232 break; 233 case IB_MGMT_CLASS_PERF_MGMT: 234 mad_agent = port_priv->perf_mgmt_agent; 235 break; 236 default: 237 return 1; 238 } 239 240 return agent_mad_send(mad_agent, port_priv, mad, grh, wc); 241 } 242 243 static void agent_send_handler(struct ib_mad_agent *mad_agent, 244 struct ib_mad_send_wc *mad_send_wc) 245 { 246 struct ib_agent_port_private *port_priv; 247 struct ib_agent_send_wr *agent_send_wr; 248 unsigned long flags; 249 250 /* Find matching MAD agent */ 251 port_priv = ib_get_agent_port(NULL, 0, mad_agent); 252 if (!port_priv) { 253 printk(KERN_ERR SPFX "agent_send_handler: no matching MAD " 254 "agent %p\n", mad_agent); 255 return; 256 } 257 258 agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id; 259 spin_lock_irqsave(&port_priv->send_list_lock, flags); 260 /* Remove completed send from posted send MAD list */ 261 list_del(&agent_send_wr->send_list); 262 spin_unlock_irqrestore(&port_priv->send_list_lock, flags); 263 264 /* Unmap PCI */ 265 dma_unmap_single(mad_agent->device->dma_device, 266 pci_unmap_addr(agent_send_wr, mapping), 267 sizeof(agent_send_wr->mad->mad), 268 DMA_TO_DEVICE); 269 270 ib_destroy_ah(agent_send_wr->ah); 271 272 /* Release allocated memory */ 273 kmem_cache_free(ib_mad_cache, agent_send_wr->mad); 274 kfree(agent_send_wr); 275 } 276 277 int ib_agent_port_open(struct ib_device *device, int port_num) 278 { 279 int ret; 280 struct ib_agent_port_private *port_priv; 281 unsigned long flags; 282 283 /* First, check if port already open for SMI */ 284 port_priv = ib_get_agent_port(device, port_num, NULL); 285 if (port_priv) { 286 printk(KERN_DEBUG SPFX "%s port %d already open\n", 287 device->name, port_num); 288 return 0; 289 } 290 291 /* Create new device info */ 292 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); 293 if (!port_priv) { 294 printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); 295 ret = -ENOMEM; 296 goto error1; 297 } 298 299 memset(port_priv, 0, sizeof *port_priv); 300 port_priv->port_num = port_num; 301 spin_lock_init(&port_priv->send_list_lock); 302 INIT_LIST_HEAD(&port_priv->send_posted_list); 303 304 /* Obtain send only MAD agent for SM class (SMI QP) */ 305 port_priv->smp_agent = ib_register_mad_agent(device, port_num, 306 IB_QPT_SMI, 307 NULL, 0, 308 &agent_send_handler, 309 NULL, NULL); 310 311 if (IS_ERR(port_priv->smp_agent)) { 312 ret = PTR_ERR(port_priv->smp_agent); 313 goto error2; 314 } 315 316 /* Obtain send only MAD agent for PerfMgmt class (GSI QP) */ 317 port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num, 318 IB_QPT_GSI, 319 NULL, 0, 320 &agent_send_handler, 321 NULL, NULL); 322 if (IS_ERR(port_priv->perf_mgmt_agent)) { 323 ret = PTR_ERR(port_priv->perf_mgmt_agent); 324 goto error3; 325 } 326 327 port_priv->mr = ib_get_dma_mr(port_priv->smp_agent->qp->pd, 328 IB_ACCESS_LOCAL_WRITE); 329 if (IS_ERR(port_priv->mr)) { 330 printk(KERN_ERR SPFX "Couldn't get DMA MR\n"); 331 ret = PTR_ERR(port_priv->mr); 332 goto error4; 333 } 334 335 spin_lock_irqsave(&ib_agent_port_list_lock, flags); 336 list_add_tail(&port_priv->port_list, &ib_agent_port_list); 337 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 338 339 return 0; 340 341 error4: 342 ib_unregister_mad_agent(port_priv->perf_mgmt_agent); 343 error3: 344 ib_unregister_mad_agent(port_priv->smp_agent); 345 error2: 346 kfree(port_priv); 347 error1: 348 return ret; 349 } 350 351 int ib_agent_port_close(struct ib_device *device, int port_num) 352 { 353 struct ib_agent_port_private *port_priv; 354 unsigned long flags; 355 356 spin_lock_irqsave(&ib_agent_port_list_lock, flags); 357 port_priv = __ib_get_agent_port(device, port_num, NULL); 358 if (port_priv == NULL) { 359 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 360 printk(KERN_ERR SPFX "Port %d not found\n", port_num); 361 return -ENODEV; 362 } 363 list_del(&port_priv->port_list); 364 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 365 366 ib_dereg_mr(port_priv->mr); 367 368 ib_unregister_mad_agent(port_priv->perf_mgmt_agent); 369 ib_unregister_mad_agent(port_priv->smp_agent); 370 kfree(port_priv); 371 372 return 0; 373 } 374