1 /* 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2009 HNR Consulting. All rights reserved. 6 * Copyright (c) 2014,2018 Intel Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/module.h> 43 #include <linux/security.h> 44 #include <linux/xarray.h> 45 #include <rdma/ib_cache.h> 46 47 #include "mad_priv.h" 48 #include "core_priv.h" 49 #include "mad_rmpp.h" 50 #include "smi.h" 51 #include "opa_smi.h" 52 #include "agent.h" 53 54 #define CREATE_TRACE_POINTS 55 #include <trace/events/ib_mad.h> 56 57 #ifdef CONFIG_TRACEPOINTS 58 static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, 59 struct ib_mad_qp_info *qp_info, 60 struct trace_event_raw_ib_mad_send_template *entry) 61 { 62 u16 pkey; 63 struct ib_device *dev = qp_info->port_priv->device; 64 u8 pnum = qp_info->port_priv->port_num; 65 struct ib_ud_wr *wr = &mad_send_wr->send_wr; 66 struct rdma_ah_attr attr = {}; 67 68 rdma_query_ah(wr->ah, &attr); 69 70 /* These are common */ 71 entry->sl = attr.sl; 72 ib_query_pkey(dev, pnum, wr->pkey_index, &pkey); 73 entry->pkey = pkey; 74 entry->rqpn = wr->remote_qpn; 75 entry->rqkey = wr->remote_qkey; 76 entry->dlid = rdma_ah_get_dlid(&attr); 77 } 78 #endif 79 80 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 81 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 82 83 module_param_named(send_queue_size, mad_sendq_size, int, 0444); 84 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); 85 module_param_named(recv_queue_size, mad_recvq_size, int, 0444); 86 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); 87 88 /* Client ID 0 is used for snoop-only clients */ 89 static DEFINE_XARRAY_ALLOC1(ib_mad_clients); 90 static u32 ib_mad_client_next; 91 static struct list_head ib_mad_port_list; 92 93 /* Port list lock */ 94 static DEFINE_SPINLOCK(ib_mad_port_list_lock); 95 96 /* Forward declarations */ 97 static int method_in_use(struct ib_mad_mgmt_method_table **method, 98 struct ib_mad_reg_req *mad_reg_req); 99 static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 100 static struct ib_mad_agent_private *find_mad_agent( 101 struct ib_mad_port_private *port_priv, 102 const struct ib_mad_hdr *mad); 103 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 104 struct ib_mad_private *mad); 105 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 106 static void timeout_sends(struct work_struct *work); 107 static void local_completions(struct work_struct *work); 108 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 109 struct ib_mad_agent_private *agent_priv, 110 u8 mgmt_class); 111 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 112 struct ib_mad_agent_private *agent_priv); 113 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 114 struct ib_wc *wc); 115 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); 116 117 /* 118 * Returns a ib_mad_port_private structure or NULL for a device/port 119 * Assumes ib_mad_port_list_lock is being held 120 */ 121 static inline struct ib_mad_port_private * 122 __ib_get_mad_port(struct ib_device *device, int port_num) 123 { 124 struct ib_mad_port_private *entry; 125 126 list_for_each_entry(entry, &ib_mad_port_list, port_list) { 127 if (entry->device == device && entry->port_num == port_num) 128 return entry; 129 } 130 return NULL; 131 } 132 133 /* 134 * Wrapper function to return a ib_mad_port_private structure or NULL 135 * for a device/port 136 */ 137 static inline struct ib_mad_port_private * 138 ib_get_mad_port(struct ib_device *device, int port_num) 139 { 140 struct ib_mad_port_private *entry; 141 unsigned long flags; 142 143 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 144 entry = __ib_get_mad_port(device, port_num); 145 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 146 147 return entry; 148 } 149 150 static inline u8 convert_mgmt_class(u8 mgmt_class) 151 { 152 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ 153 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 154 0 : mgmt_class; 155 } 156 157 static int get_spl_qp_index(enum ib_qp_type qp_type) 158 { 159 switch (qp_type) 160 { 161 case IB_QPT_SMI: 162 return 0; 163 case IB_QPT_GSI: 164 return 1; 165 default: 166 return -1; 167 } 168 } 169 170 static int vendor_class_index(u8 mgmt_class) 171 { 172 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; 173 } 174 175 static int is_vendor_class(u8 mgmt_class) 176 { 177 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || 178 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) 179 return 0; 180 return 1; 181 } 182 183 static int is_vendor_oui(char *oui) 184 { 185 if (oui[0] || oui[1] || oui[2]) 186 return 1; 187 return 0; 188 } 189 190 static int is_vendor_method_in_use( 191 struct ib_mad_mgmt_vendor_class *vendor_class, 192 struct ib_mad_reg_req *mad_reg_req) 193 { 194 struct ib_mad_mgmt_method_table *method; 195 int i; 196 197 for (i = 0; i < MAX_MGMT_OUI; i++) { 198 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { 199 method = vendor_class->method_table[i]; 200 if (method) { 201 if (method_in_use(&method, mad_reg_req)) 202 return 1; 203 else 204 break; 205 } 206 } 207 } 208 return 0; 209 } 210 211 int ib_response_mad(const struct ib_mad_hdr *hdr) 212 { 213 return ((hdr->method & IB_MGMT_METHOD_RESP) || 214 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || 215 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && 216 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); 217 } 218 EXPORT_SYMBOL(ib_response_mad); 219 220 /* 221 * ib_register_mad_agent - Register to send/receive MADs 222 * 223 * Context: Process context. 224 */ 225 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 226 u8 port_num, 227 enum ib_qp_type qp_type, 228 struct ib_mad_reg_req *mad_reg_req, 229 u8 rmpp_version, 230 ib_mad_send_handler send_handler, 231 ib_mad_recv_handler recv_handler, 232 void *context, 233 u32 registration_flags) 234 { 235 struct ib_mad_port_private *port_priv; 236 struct ib_mad_agent *ret = ERR_PTR(-EINVAL); 237 struct ib_mad_agent_private *mad_agent_priv; 238 struct ib_mad_reg_req *reg_req = NULL; 239 struct ib_mad_mgmt_class_table *class; 240 struct ib_mad_mgmt_vendor_class_table *vendor; 241 struct ib_mad_mgmt_vendor_class *vendor_class; 242 struct ib_mad_mgmt_method_table *method; 243 int ret2, qpn; 244 u8 mgmt_class, vclass; 245 246 if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) || 247 (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num))) 248 return ERR_PTR(-EPROTONOSUPPORT); 249 250 /* Validate parameters */ 251 qpn = get_spl_qp_index(qp_type); 252 if (qpn == -1) { 253 dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n", 254 __func__, qp_type); 255 goto error1; 256 } 257 258 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { 259 dev_dbg_ratelimited(&device->dev, 260 "%s: invalid RMPP Version %u\n", 261 __func__, rmpp_version); 262 goto error1; 263 } 264 265 /* Validate MAD registration request if supplied */ 266 if (mad_reg_req) { 267 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { 268 dev_dbg_ratelimited(&device->dev, 269 "%s: invalid Class Version %u\n", 270 __func__, 271 mad_reg_req->mgmt_class_version); 272 goto error1; 273 } 274 if (!recv_handler) { 275 dev_dbg_ratelimited(&device->dev, 276 "%s: no recv_handler\n", __func__); 277 goto error1; 278 } 279 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { 280 /* 281 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only 282 * one in this range currently allowed 283 */ 284 if (mad_reg_req->mgmt_class != 285 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 286 dev_dbg_ratelimited(&device->dev, 287 "%s: Invalid Mgmt Class 0x%x\n", 288 __func__, mad_reg_req->mgmt_class); 289 goto error1; 290 } 291 } else if (mad_reg_req->mgmt_class == 0) { 292 /* 293 * Class 0 is reserved in IBA and is used for 294 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 295 */ 296 dev_dbg_ratelimited(&device->dev, 297 "%s: Invalid Mgmt Class 0\n", 298 __func__); 299 goto error1; 300 } else if (is_vendor_class(mad_reg_req->mgmt_class)) { 301 /* 302 * If class is in "new" vendor range, 303 * ensure supplied OUI is not zero 304 */ 305 if (!is_vendor_oui(mad_reg_req->oui)) { 306 dev_dbg_ratelimited(&device->dev, 307 "%s: No OUI specified for class 0x%x\n", 308 __func__, 309 mad_reg_req->mgmt_class); 310 goto error1; 311 } 312 } 313 /* Make sure class supplied is consistent with RMPP */ 314 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 315 if (rmpp_version) { 316 dev_dbg_ratelimited(&device->dev, 317 "%s: RMPP version for non-RMPP class 0x%x\n", 318 __func__, mad_reg_req->mgmt_class); 319 goto error1; 320 } 321 } 322 323 /* Make sure class supplied is consistent with QP type */ 324 if (qp_type == IB_QPT_SMI) { 325 if ((mad_reg_req->mgmt_class != 326 IB_MGMT_CLASS_SUBN_LID_ROUTED) && 327 (mad_reg_req->mgmt_class != 328 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 329 dev_dbg_ratelimited(&device->dev, 330 "%s: Invalid SM QP type: class 0x%x\n", 331 __func__, mad_reg_req->mgmt_class); 332 goto error1; 333 } 334 } else { 335 if ((mad_reg_req->mgmt_class == 336 IB_MGMT_CLASS_SUBN_LID_ROUTED) || 337 (mad_reg_req->mgmt_class == 338 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 339 dev_dbg_ratelimited(&device->dev, 340 "%s: Invalid GS QP type: class 0x%x\n", 341 __func__, mad_reg_req->mgmt_class); 342 goto error1; 343 } 344 } 345 } else { 346 /* No registration request supplied */ 347 if (!send_handler) 348 goto error1; 349 if (registration_flags & IB_MAD_USER_RMPP) 350 goto error1; 351 } 352 353 /* Validate device and port */ 354 port_priv = ib_get_mad_port(device, port_num); 355 if (!port_priv) { 356 dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n", 357 __func__, port_num); 358 ret = ERR_PTR(-ENODEV); 359 goto error1; 360 } 361 362 /* Verify the QP requested is supported. For example, Ethernet devices 363 * will not have QP0. 364 */ 365 if (!port_priv->qp_info[qpn].qp) { 366 dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n", 367 __func__, qpn); 368 ret = ERR_PTR(-EPROTONOSUPPORT); 369 goto error1; 370 } 371 372 /* Allocate structures */ 373 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); 374 if (!mad_agent_priv) { 375 ret = ERR_PTR(-ENOMEM); 376 goto error1; 377 } 378 379 if (mad_reg_req) { 380 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); 381 if (!reg_req) { 382 ret = ERR_PTR(-ENOMEM); 383 goto error3; 384 } 385 } 386 387 /* Now, fill in the various structures */ 388 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 389 mad_agent_priv->reg_req = reg_req; 390 mad_agent_priv->agent.rmpp_version = rmpp_version; 391 mad_agent_priv->agent.device = device; 392 mad_agent_priv->agent.recv_handler = recv_handler; 393 mad_agent_priv->agent.send_handler = send_handler; 394 mad_agent_priv->agent.context = context; 395 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 396 mad_agent_priv->agent.port_num = port_num; 397 mad_agent_priv->agent.flags = registration_flags; 398 spin_lock_init(&mad_agent_priv->lock); 399 INIT_LIST_HEAD(&mad_agent_priv->send_list); 400 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 401 INIT_LIST_HEAD(&mad_agent_priv->done_list); 402 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 403 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); 404 INIT_LIST_HEAD(&mad_agent_priv->local_list); 405 INIT_WORK(&mad_agent_priv->local_work, local_completions); 406 atomic_set(&mad_agent_priv->refcount, 1); 407 init_completion(&mad_agent_priv->comp); 408 409 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); 410 if (ret2) { 411 ret = ERR_PTR(ret2); 412 goto error4; 413 } 414 415 /* 416 * The mlx4 driver uses the top byte to distinguish which virtual 417 * function generated the MAD, so we must avoid using it. 418 */ 419 ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid, 420 mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1), 421 &ib_mad_client_next, GFP_KERNEL); 422 if (ret2 < 0) { 423 ret = ERR_PTR(ret2); 424 goto error5; 425 } 426 427 /* 428 * Make sure MAD registration (if supplied) 429 * is non overlapping with any existing ones 430 */ 431 spin_lock_irq(&port_priv->reg_lock); 432 if (mad_reg_req) { 433 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); 434 if (!is_vendor_class(mgmt_class)) { 435 class = port_priv->version[mad_reg_req-> 436 mgmt_class_version].class; 437 if (class) { 438 method = class->method_table[mgmt_class]; 439 if (method) { 440 if (method_in_use(&method, 441 mad_reg_req)) 442 goto error6; 443 } 444 } 445 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, 446 mgmt_class); 447 } else { 448 /* "New" vendor class range */ 449 vendor = port_priv->version[mad_reg_req-> 450 mgmt_class_version].vendor; 451 if (vendor) { 452 vclass = vendor_class_index(mgmt_class); 453 vendor_class = vendor->vendor_class[vclass]; 454 if (vendor_class) { 455 if (is_vendor_method_in_use( 456 vendor_class, 457 mad_reg_req)) 458 goto error6; 459 } 460 } 461 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 462 } 463 if (ret2) { 464 ret = ERR_PTR(ret2); 465 goto error6; 466 } 467 } 468 spin_unlock_irq(&port_priv->reg_lock); 469 470 trace_ib_mad_create_agent(mad_agent_priv); 471 return &mad_agent_priv->agent; 472 error6: 473 spin_unlock_irq(&port_priv->reg_lock); 474 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); 475 error5: 476 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 477 error4: 478 kfree(reg_req); 479 error3: 480 kfree(mad_agent_priv); 481 error1: 482 return ret; 483 } 484 EXPORT_SYMBOL(ib_register_mad_agent); 485 486 static inline int is_snooping_sends(int mad_snoop_flags) 487 { 488 return (mad_snoop_flags & 489 (/*IB_MAD_SNOOP_POSTED_SENDS | 490 IB_MAD_SNOOP_RMPP_SENDS |*/ 491 IB_MAD_SNOOP_SEND_COMPLETIONS /*| 492 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); 493 } 494 495 static inline int is_snooping_recvs(int mad_snoop_flags) 496 { 497 return (mad_snoop_flags & 498 (IB_MAD_SNOOP_RECVS /*| 499 IB_MAD_SNOOP_RMPP_RECVS*/)); 500 } 501 502 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, 503 struct ib_mad_snoop_private *mad_snoop_priv) 504 { 505 struct ib_mad_snoop_private **new_snoop_table; 506 unsigned long flags; 507 int i; 508 509 spin_lock_irqsave(&qp_info->snoop_lock, flags); 510 /* Check for empty slot in array. */ 511 for (i = 0; i < qp_info->snoop_table_size; i++) 512 if (!qp_info->snoop_table[i]) 513 break; 514 515 if (i == qp_info->snoop_table_size) { 516 /* Grow table. */ 517 new_snoop_table = krealloc(qp_info->snoop_table, 518 sizeof mad_snoop_priv * 519 (qp_info->snoop_table_size + 1), 520 GFP_ATOMIC); 521 if (!new_snoop_table) { 522 i = -ENOMEM; 523 goto out; 524 } 525 526 qp_info->snoop_table = new_snoop_table; 527 qp_info->snoop_table_size++; 528 } 529 qp_info->snoop_table[i] = mad_snoop_priv; 530 atomic_inc(&qp_info->snoop_count); 531 out: 532 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 533 return i; 534 } 535 536 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, 537 u8 port_num, 538 enum ib_qp_type qp_type, 539 int mad_snoop_flags, 540 ib_mad_snoop_handler snoop_handler, 541 ib_mad_recv_handler recv_handler, 542 void *context) 543 { 544 struct ib_mad_port_private *port_priv; 545 struct ib_mad_agent *ret; 546 struct ib_mad_snoop_private *mad_snoop_priv; 547 int qpn; 548 int err; 549 550 /* Validate parameters */ 551 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || 552 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { 553 ret = ERR_PTR(-EINVAL); 554 goto error1; 555 } 556 qpn = get_spl_qp_index(qp_type); 557 if (qpn == -1) { 558 ret = ERR_PTR(-EINVAL); 559 goto error1; 560 } 561 port_priv = ib_get_mad_port(device, port_num); 562 if (!port_priv) { 563 ret = ERR_PTR(-ENODEV); 564 goto error1; 565 } 566 /* Allocate structures */ 567 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 568 if (!mad_snoop_priv) { 569 ret = ERR_PTR(-ENOMEM); 570 goto error1; 571 } 572 573 /* Now, fill in the various structures */ 574 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 575 mad_snoop_priv->agent.device = device; 576 mad_snoop_priv->agent.recv_handler = recv_handler; 577 mad_snoop_priv->agent.snoop_handler = snoop_handler; 578 mad_snoop_priv->agent.context = context; 579 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 580 mad_snoop_priv->agent.port_num = port_num; 581 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 582 init_completion(&mad_snoop_priv->comp); 583 584 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); 585 if (err) { 586 ret = ERR_PTR(err); 587 goto error2; 588 } 589 590 mad_snoop_priv->snoop_index = register_snoop_agent( 591 &port_priv->qp_info[qpn], 592 mad_snoop_priv); 593 if (mad_snoop_priv->snoop_index < 0) { 594 ret = ERR_PTR(mad_snoop_priv->snoop_index); 595 goto error3; 596 } 597 598 atomic_set(&mad_snoop_priv->refcount, 1); 599 return &mad_snoop_priv->agent; 600 error3: 601 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); 602 error2: 603 kfree(mad_snoop_priv); 604 error1: 605 return ret; 606 } 607 EXPORT_SYMBOL(ib_register_mad_snoop); 608 609 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 610 { 611 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 612 complete(&mad_agent_priv->comp); 613 } 614 615 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) 616 { 617 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 618 complete(&mad_snoop_priv->comp); 619 } 620 621 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 622 { 623 struct ib_mad_port_private *port_priv; 624 625 /* Note that we could still be handling received MADs */ 626 trace_ib_mad_unregister_agent(mad_agent_priv); 627 628 /* 629 * Canceling all sends results in dropping received response 630 * MADs, preventing us from queuing additional work 631 */ 632 cancel_mads(mad_agent_priv); 633 port_priv = mad_agent_priv->qp_info->port_priv; 634 cancel_delayed_work(&mad_agent_priv->timed_work); 635 636 spin_lock_irq(&port_priv->reg_lock); 637 remove_mad_reg_req(mad_agent_priv); 638 spin_unlock_irq(&port_priv->reg_lock); 639 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); 640 641 flush_workqueue(port_priv->wq); 642 ib_cancel_rmpp_recvs(mad_agent_priv); 643 644 deref_mad_agent(mad_agent_priv); 645 wait_for_completion(&mad_agent_priv->comp); 646 647 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 648 649 kfree(mad_agent_priv->reg_req); 650 kfree_rcu(mad_agent_priv, rcu); 651 } 652 653 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) 654 { 655 struct ib_mad_qp_info *qp_info; 656 unsigned long flags; 657 658 qp_info = mad_snoop_priv->qp_info; 659 spin_lock_irqsave(&qp_info->snoop_lock, flags); 660 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; 661 atomic_dec(&qp_info->snoop_count); 662 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 663 664 deref_snoop_agent(mad_snoop_priv); 665 wait_for_completion(&mad_snoop_priv->comp); 666 667 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); 668 669 kfree(mad_snoop_priv); 670 } 671 672 /* 673 * ib_unregister_mad_agent - Unregisters a client from using MAD services 674 * 675 * Context: Process context. 676 */ 677 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) 678 { 679 struct ib_mad_agent_private *mad_agent_priv; 680 struct ib_mad_snoop_private *mad_snoop_priv; 681 682 /* If the TID is zero, the agent can only snoop. */ 683 if (mad_agent->hi_tid) { 684 mad_agent_priv = container_of(mad_agent, 685 struct ib_mad_agent_private, 686 agent); 687 unregister_mad_agent(mad_agent_priv); 688 } else { 689 mad_snoop_priv = container_of(mad_agent, 690 struct ib_mad_snoop_private, 691 agent); 692 unregister_mad_snoop(mad_snoop_priv); 693 } 694 } 695 EXPORT_SYMBOL(ib_unregister_mad_agent); 696 697 static void dequeue_mad(struct ib_mad_list_head *mad_list) 698 { 699 struct ib_mad_queue *mad_queue; 700 unsigned long flags; 701 702 mad_queue = mad_list->mad_queue; 703 spin_lock_irqsave(&mad_queue->lock, flags); 704 list_del(&mad_list->list); 705 mad_queue->count--; 706 spin_unlock_irqrestore(&mad_queue->lock, flags); 707 } 708 709 static void snoop_send(struct ib_mad_qp_info *qp_info, 710 struct ib_mad_send_buf *send_buf, 711 struct ib_mad_send_wc *mad_send_wc, 712 int mad_snoop_flags) 713 { 714 struct ib_mad_snoop_private *mad_snoop_priv; 715 unsigned long flags; 716 int i; 717 718 spin_lock_irqsave(&qp_info->snoop_lock, flags); 719 for (i = 0; i < qp_info->snoop_table_size; i++) { 720 mad_snoop_priv = qp_info->snoop_table[i]; 721 if (!mad_snoop_priv || 722 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 723 continue; 724 725 atomic_inc(&mad_snoop_priv->refcount); 726 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 727 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 728 send_buf, mad_send_wc); 729 deref_snoop_agent(mad_snoop_priv); 730 spin_lock_irqsave(&qp_info->snoop_lock, flags); 731 } 732 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 733 } 734 735 static void snoop_recv(struct ib_mad_qp_info *qp_info, 736 struct ib_mad_recv_wc *mad_recv_wc, 737 int mad_snoop_flags) 738 { 739 struct ib_mad_snoop_private *mad_snoop_priv; 740 unsigned long flags; 741 int i; 742 743 spin_lock_irqsave(&qp_info->snoop_lock, flags); 744 for (i = 0; i < qp_info->snoop_table_size; i++) { 745 mad_snoop_priv = qp_info->snoop_table[i]; 746 if (!mad_snoop_priv || 747 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 748 continue; 749 750 atomic_inc(&mad_snoop_priv->refcount); 751 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 752 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, 753 mad_recv_wc); 754 deref_snoop_agent(mad_snoop_priv); 755 spin_lock_irqsave(&qp_info->snoop_lock, flags); 756 } 757 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 758 } 759 760 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, 761 u16 pkey_index, u8 port_num, struct ib_wc *wc) 762 { 763 memset(wc, 0, sizeof *wc); 764 wc->wr_cqe = cqe; 765 wc->status = IB_WC_SUCCESS; 766 wc->opcode = IB_WC_RECV; 767 wc->pkey_index = pkey_index; 768 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 769 wc->src_qp = IB_QP0; 770 wc->qp = qp; 771 wc->slid = slid; 772 wc->sl = 0; 773 wc->dlid_path_bits = 0; 774 wc->port_num = port_num; 775 } 776 777 static size_t mad_priv_size(const struct ib_mad_private *mp) 778 { 779 return sizeof(struct ib_mad_private) + mp->mad_size; 780 } 781 782 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) 783 { 784 size_t size = sizeof(struct ib_mad_private) + mad_size; 785 struct ib_mad_private *ret = kzalloc(size, flags); 786 787 if (ret) 788 ret->mad_size = mad_size; 789 790 return ret; 791 } 792 793 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) 794 { 795 return rdma_max_mad_size(port_priv->device, port_priv->port_num); 796 } 797 798 static size_t mad_priv_dma_size(const struct ib_mad_private *mp) 799 { 800 return sizeof(struct ib_grh) + mp->mad_size; 801 } 802 803 /* 804 * Return 0 if SMP is to be sent 805 * Return 1 if SMP was consumed locally (whether or not solicited) 806 * Return < 0 if error 807 */ 808 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 809 struct ib_mad_send_wr_private *mad_send_wr) 810 { 811 int ret = 0; 812 struct ib_smp *smp = mad_send_wr->send_buf.mad; 813 struct opa_smp *opa_smp = (struct opa_smp *)smp; 814 unsigned long flags; 815 struct ib_mad_local_private *local; 816 struct ib_mad_private *mad_priv; 817 struct ib_mad_port_private *port_priv; 818 struct ib_mad_agent_private *recv_mad_agent = NULL; 819 struct ib_device *device = mad_agent_priv->agent.device; 820 u8 port_num; 821 struct ib_wc mad_wc; 822 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; 823 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); 824 u16 out_mad_pkey_index = 0; 825 u16 drslid; 826 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 827 mad_agent_priv->qp_info->port_priv->port_num); 828 829 if (rdma_cap_ib_switch(device) && 830 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 831 port_num = send_wr->port_num; 832 else 833 port_num = mad_agent_priv->agent.port_num; 834 835 /* 836 * Directed route handling starts if the initial LID routed part of 837 * a request or the ending LID routed part of a response is empty. 838 * If we are at the start of the LID routed part, don't update the 839 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. 840 */ 841 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { 842 u32 opa_drslid; 843 844 trace_ib_mad_handle_out_opa_smi(opa_smp); 845 846 if ((opa_get_smp_direction(opa_smp) 847 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 848 OPA_LID_PERMISSIVE && 849 opa_smi_handle_dr_smp_send(opa_smp, 850 rdma_cap_ib_switch(device), 851 port_num) == IB_SMI_DISCARD) { 852 ret = -EINVAL; 853 dev_err(&device->dev, "OPA Invalid directed route\n"); 854 goto out; 855 } 856 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 857 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && 858 opa_drslid & 0xffff0000) { 859 ret = -EINVAL; 860 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 861 opa_drslid); 862 goto out; 863 } 864 drslid = (u16)(opa_drslid & 0x0000ffff); 865 866 /* Check to post send on QP or process locally */ 867 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && 868 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) 869 goto out; 870 } else { 871 trace_ib_mad_handle_out_ib_smi(smp); 872 873 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 874 IB_LID_PERMISSIVE && 875 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == 876 IB_SMI_DISCARD) { 877 ret = -EINVAL; 878 dev_err(&device->dev, "Invalid directed route\n"); 879 goto out; 880 } 881 drslid = be16_to_cpu(smp->dr_slid); 882 883 /* Check to post send on QP or process locally */ 884 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && 885 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) 886 goto out; 887 } 888 889 local = kmalloc(sizeof *local, GFP_ATOMIC); 890 if (!local) { 891 ret = -ENOMEM; 892 goto out; 893 } 894 local->mad_priv = NULL; 895 local->recv_mad_agent = NULL; 896 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); 897 if (!mad_priv) { 898 ret = -ENOMEM; 899 kfree(local); 900 goto out; 901 } 902 903 build_smp_wc(mad_agent_priv->agent.qp, 904 send_wr->wr.wr_cqe, drslid, 905 send_wr->pkey_index, 906 send_wr->port_num, &mad_wc); 907 908 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { 909 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len 910 + mad_send_wr->send_buf.data_len 911 + sizeof(struct ib_grh); 912 } 913 914 /* No GRH for DR SMP */ 915 ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL, 916 (const struct ib_mad_hdr *)smp, mad_size, 917 (struct ib_mad_hdr *)mad_priv->mad, 918 &mad_size, &out_mad_pkey_index); 919 switch (ret) 920 { 921 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 922 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && 923 mad_agent_priv->agent.recv_handler) { 924 local->mad_priv = mad_priv; 925 local->recv_mad_agent = mad_agent_priv; 926 /* 927 * Reference MAD agent until receive 928 * side of local completion handled 929 */ 930 atomic_inc(&mad_agent_priv->refcount); 931 } else 932 kfree(mad_priv); 933 break; 934 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 935 kfree(mad_priv); 936 break; 937 case IB_MAD_RESULT_SUCCESS: 938 /* Treat like an incoming receive MAD */ 939 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 940 mad_agent_priv->agent.port_num); 941 if (port_priv) { 942 memcpy(mad_priv->mad, smp, mad_priv->mad_size); 943 recv_mad_agent = find_mad_agent(port_priv, 944 (const struct ib_mad_hdr *)mad_priv->mad); 945 } 946 if (!port_priv || !recv_mad_agent) { 947 /* 948 * No receiving agent so drop packet and 949 * generate send completion. 950 */ 951 kfree(mad_priv); 952 break; 953 } 954 local->mad_priv = mad_priv; 955 local->recv_mad_agent = recv_mad_agent; 956 break; 957 default: 958 kfree(mad_priv); 959 kfree(local); 960 ret = -EINVAL; 961 goto out; 962 } 963 964 local->mad_send_wr = mad_send_wr; 965 if (opa) { 966 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; 967 local->return_wc_byte_len = mad_size; 968 } 969 /* Reference MAD agent until send side of local completion handled */ 970 atomic_inc(&mad_agent_priv->refcount); 971 /* Queue local completion to local list */ 972 spin_lock_irqsave(&mad_agent_priv->lock, flags); 973 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); 974 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 975 queue_work(mad_agent_priv->qp_info->port_priv->wq, 976 &mad_agent_priv->local_work); 977 ret = 1; 978 out: 979 return ret; 980 } 981 982 static int get_pad_size(int hdr_len, int data_len, size_t mad_size) 983 { 984 int seg_size, pad; 985 986 seg_size = mad_size - hdr_len; 987 if (data_len && seg_size) { 988 pad = seg_size - data_len % seg_size; 989 return pad == seg_size ? 0 : pad; 990 } else 991 return seg_size; 992 } 993 994 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) 995 { 996 struct ib_rmpp_segment *s, *t; 997 998 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { 999 list_del(&s->list); 1000 kfree(s); 1001 } 1002 } 1003 1004 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, 1005 size_t mad_size, gfp_t gfp_mask) 1006 { 1007 struct ib_mad_send_buf *send_buf = &send_wr->send_buf; 1008 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; 1009 struct ib_rmpp_segment *seg = NULL; 1010 int left, seg_size, pad; 1011 1012 send_buf->seg_size = mad_size - send_buf->hdr_len; 1013 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; 1014 seg_size = send_buf->seg_size; 1015 pad = send_wr->pad; 1016 1017 /* Allocate data segments. */ 1018 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { 1019 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); 1020 if (!seg) { 1021 free_send_rmpp_list(send_wr); 1022 return -ENOMEM; 1023 } 1024 seg->num = ++send_buf->seg_count; 1025 list_add_tail(&seg->list, &send_wr->rmpp_list); 1026 } 1027 1028 /* Zero any padding */ 1029 if (pad) 1030 memset(seg->data + seg_size - pad, 0, pad); 1031 1032 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> 1033 agent.rmpp_version; 1034 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 1035 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 1036 1037 send_wr->cur_seg = container_of(send_wr->rmpp_list.next, 1038 struct ib_rmpp_segment, list); 1039 send_wr->last_ack_seg = send_wr->cur_seg; 1040 return 0; 1041 } 1042 1043 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) 1044 { 1045 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); 1046 } 1047 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); 1048 1049 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 1050 u32 remote_qpn, u16 pkey_index, 1051 int rmpp_active, 1052 int hdr_len, int data_len, 1053 gfp_t gfp_mask, 1054 u8 base_version) 1055 { 1056 struct ib_mad_agent_private *mad_agent_priv; 1057 struct ib_mad_send_wr_private *mad_send_wr; 1058 int pad, message_size, ret, size; 1059 void *buf; 1060 size_t mad_size; 1061 bool opa; 1062 1063 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 1064 agent); 1065 1066 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); 1067 1068 if (opa && base_version == OPA_MGMT_BASE_VERSION) 1069 mad_size = sizeof(struct opa_mad); 1070 else 1071 mad_size = sizeof(struct ib_mad); 1072 1073 pad = get_pad_size(hdr_len, data_len, mad_size); 1074 message_size = hdr_len + data_len + pad; 1075 1076 if (ib_mad_kernel_rmpp_agent(mad_agent)) { 1077 if (!rmpp_active && message_size > mad_size) 1078 return ERR_PTR(-EINVAL); 1079 } else 1080 if (rmpp_active || message_size > mad_size) 1081 return ERR_PTR(-EINVAL); 1082 1083 size = rmpp_active ? hdr_len : mad_size; 1084 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); 1085 if (!buf) 1086 return ERR_PTR(-ENOMEM); 1087 1088 mad_send_wr = buf + size; 1089 INIT_LIST_HEAD(&mad_send_wr->rmpp_list); 1090 mad_send_wr->send_buf.mad = buf; 1091 mad_send_wr->send_buf.hdr_len = hdr_len; 1092 mad_send_wr->send_buf.data_len = data_len; 1093 mad_send_wr->pad = pad; 1094 1095 mad_send_wr->mad_agent_priv = mad_agent_priv; 1096 mad_send_wr->sg_list[0].length = hdr_len; 1097 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; 1098 1099 /* OPA MADs don't have to be the full 2048 bytes */ 1100 if (opa && base_version == OPA_MGMT_BASE_VERSION && 1101 data_len < mad_size - hdr_len) 1102 mad_send_wr->sg_list[1].length = data_len; 1103 else 1104 mad_send_wr->sg_list[1].length = mad_size - hdr_len; 1105 1106 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; 1107 1108 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1109 1110 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1111 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; 1112 mad_send_wr->send_wr.wr.num_sge = 2; 1113 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; 1114 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; 1115 mad_send_wr->send_wr.remote_qpn = remote_qpn; 1116 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; 1117 mad_send_wr->send_wr.pkey_index = pkey_index; 1118 1119 if (rmpp_active) { 1120 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); 1121 if (ret) { 1122 kfree(buf); 1123 return ERR_PTR(ret); 1124 } 1125 } 1126 1127 mad_send_wr->send_buf.mad_agent = mad_agent; 1128 atomic_inc(&mad_agent_priv->refcount); 1129 return &mad_send_wr->send_buf; 1130 } 1131 EXPORT_SYMBOL(ib_create_send_mad); 1132 1133 int ib_get_mad_data_offset(u8 mgmt_class) 1134 { 1135 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 1136 return IB_MGMT_SA_HDR; 1137 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1138 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1139 (mgmt_class == IB_MGMT_CLASS_BIS)) 1140 return IB_MGMT_DEVICE_HDR; 1141 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1142 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 1143 return IB_MGMT_VENDOR_HDR; 1144 else 1145 return IB_MGMT_MAD_HDR; 1146 } 1147 EXPORT_SYMBOL(ib_get_mad_data_offset); 1148 1149 int ib_is_mad_class_rmpp(u8 mgmt_class) 1150 { 1151 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 1152 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1153 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1154 (mgmt_class == IB_MGMT_CLASS_BIS) || 1155 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1156 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 1157 return 1; 1158 return 0; 1159 } 1160 EXPORT_SYMBOL(ib_is_mad_class_rmpp); 1161 1162 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 1163 { 1164 struct ib_mad_send_wr_private *mad_send_wr; 1165 struct list_head *list; 1166 1167 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1168 send_buf); 1169 list = &mad_send_wr->cur_seg->list; 1170 1171 if (mad_send_wr->cur_seg->num < seg_num) { 1172 list_for_each_entry(mad_send_wr->cur_seg, list, list) 1173 if (mad_send_wr->cur_seg->num == seg_num) 1174 break; 1175 } else if (mad_send_wr->cur_seg->num > seg_num) { 1176 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) 1177 if (mad_send_wr->cur_seg->num == seg_num) 1178 break; 1179 } 1180 return mad_send_wr->cur_seg->data; 1181 } 1182 EXPORT_SYMBOL(ib_get_rmpp_segment); 1183 1184 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) 1185 { 1186 if (mad_send_wr->send_buf.seg_count) 1187 return ib_get_rmpp_segment(&mad_send_wr->send_buf, 1188 mad_send_wr->seg_num); 1189 else 1190 return mad_send_wr->send_buf.mad + 1191 mad_send_wr->send_buf.hdr_len; 1192 } 1193 1194 void ib_free_send_mad(struct ib_mad_send_buf *send_buf) 1195 { 1196 struct ib_mad_agent_private *mad_agent_priv; 1197 struct ib_mad_send_wr_private *mad_send_wr; 1198 1199 mad_agent_priv = container_of(send_buf->mad_agent, 1200 struct ib_mad_agent_private, agent); 1201 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1202 send_buf); 1203 1204 free_send_rmpp_list(mad_send_wr); 1205 kfree(send_buf->mad); 1206 deref_mad_agent(mad_agent_priv); 1207 } 1208 EXPORT_SYMBOL(ib_free_send_mad); 1209 1210 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 1211 { 1212 struct ib_mad_qp_info *qp_info; 1213 struct list_head *list; 1214 struct ib_mad_agent *mad_agent; 1215 struct ib_sge *sge; 1216 unsigned long flags; 1217 int ret; 1218 1219 /* Set WR ID to find mad_send_wr upon completion */ 1220 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1221 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1222 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1223 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1224 1225 mad_agent = mad_send_wr->send_buf.mad_agent; 1226 sge = mad_send_wr->sg_list; 1227 sge[0].addr = ib_dma_map_single(mad_agent->device, 1228 mad_send_wr->send_buf.mad, 1229 sge[0].length, 1230 DMA_TO_DEVICE); 1231 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) 1232 return -ENOMEM; 1233 1234 mad_send_wr->header_mapping = sge[0].addr; 1235 1236 sge[1].addr = ib_dma_map_single(mad_agent->device, 1237 ib_get_payload(mad_send_wr), 1238 sge[1].length, 1239 DMA_TO_DEVICE); 1240 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { 1241 ib_dma_unmap_single(mad_agent->device, 1242 mad_send_wr->header_mapping, 1243 sge[0].length, DMA_TO_DEVICE); 1244 return -ENOMEM; 1245 } 1246 mad_send_wr->payload_mapping = sge[1].addr; 1247 1248 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1249 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1250 trace_ib_mad_ib_send_mad(mad_send_wr, qp_info); 1251 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, 1252 NULL); 1253 list = &qp_info->send_queue.list; 1254 } else { 1255 ret = 0; 1256 list = &qp_info->overflow_list; 1257 } 1258 1259 if (!ret) { 1260 qp_info->send_queue.count++; 1261 list_add_tail(&mad_send_wr->mad_list.list, list); 1262 } 1263 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1264 if (ret) { 1265 ib_dma_unmap_single(mad_agent->device, 1266 mad_send_wr->header_mapping, 1267 sge[0].length, DMA_TO_DEVICE); 1268 ib_dma_unmap_single(mad_agent->device, 1269 mad_send_wr->payload_mapping, 1270 sge[1].length, DMA_TO_DEVICE); 1271 } 1272 return ret; 1273 } 1274 1275 /* 1276 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 1277 * with the registered client 1278 */ 1279 int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 1280 struct ib_mad_send_buf **bad_send_buf) 1281 { 1282 struct ib_mad_agent_private *mad_agent_priv; 1283 struct ib_mad_send_buf *next_send_buf; 1284 struct ib_mad_send_wr_private *mad_send_wr; 1285 unsigned long flags; 1286 int ret = -EINVAL; 1287 1288 /* Walk list of send WRs and post each on send list */ 1289 for (; send_buf; send_buf = next_send_buf) { 1290 mad_send_wr = container_of(send_buf, 1291 struct ib_mad_send_wr_private, 1292 send_buf); 1293 mad_agent_priv = mad_send_wr->mad_agent_priv; 1294 1295 ret = ib_mad_enforce_security(mad_agent_priv, 1296 mad_send_wr->send_wr.pkey_index); 1297 if (ret) 1298 goto error; 1299 1300 if (!send_buf->mad_agent->send_handler || 1301 (send_buf->timeout_ms && 1302 !send_buf->mad_agent->recv_handler)) { 1303 ret = -EINVAL; 1304 goto error; 1305 } 1306 1307 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1308 if (mad_agent_priv->agent.rmpp_version) { 1309 ret = -EINVAL; 1310 goto error; 1311 } 1312 } 1313 1314 /* 1315 * Save pointer to next work request to post in case the 1316 * current one completes, and the user modifies the work 1317 * request associated with the completion 1318 */ 1319 next_send_buf = send_buf->next; 1320 mad_send_wr->send_wr.ah = send_buf->ah; 1321 1322 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1323 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1324 ret = handle_outgoing_dr_smp(mad_agent_priv, 1325 mad_send_wr); 1326 if (ret < 0) /* error */ 1327 goto error; 1328 else if (ret == 1) /* locally consumed */ 1329 continue; 1330 } 1331 1332 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; 1333 /* Timeout will be updated after send completes */ 1334 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); 1335 mad_send_wr->max_retries = send_buf->retries; 1336 mad_send_wr->retries_left = send_buf->retries; 1337 send_buf->retries = 0; 1338 /* Reference for work request to QP + response */ 1339 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 1340 mad_send_wr->status = IB_WC_SUCCESS; 1341 1342 /* Reference MAD agent until send completes */ 1343 atomic_inc(&mad_agent_priv->refcount); 1344 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1345 list_add_tail(&mad_send_wr->agent_list, 1346 &mad_agent_priv->send_list); 1347 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1348 1349 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1350 ret = ib_send_rmpp_mad(mad_send_wr); 1351 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) 1352 ret = ib_send_mad(mad_send_wr); 1353 } else 1354 ret = ib_send_mad(mad_send_wr); 1355 if (ret < 0) { 1356 /* Fail send request */ 1357 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1358 list_del(&mad_send_wr->agent_list); 1359 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1360 atomic_dec(&mad_agent_priv->refcount); 1361 goto error; 1362 } 1363 } 1364 return 0; 1365 error: 1366 if (bad_send_buf) 1367 *bad_send_buf = send_buf; 1368 return ret; 1369 } 1370 EXPORT_SYMBOL(ib_post_send_mad); 1371 1372 /* 1373 * ib_free_recv_mad - Returns data buffers used to receive 1374 * a MAD to the access layer 1375 */ 1376 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1377 { 1378 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; 1379 struct ib_mad_private_header *mad_priv_hdr; 1380 struct ib_mad_private *priv; 1381 struct list_head free_list; 1382 1383 INIT_LIST_HEAD(&free_list); 1384 list_splice_init(&mad_recv_wc->rmpp_list, &free_list); 1385 1386 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, 1387 &free_list, list) { 1388 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, 1389 recv_buf); 1390 mad_priv_hdr = container_of(mad_recv_wc, 1391 struct ib_mad_private_header, 1392 recv_wc); 1393 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1394 header); 1395 kfree(priv); 1396 } 1397 } 1398 EXPORT_SYMBOL(ib_free_recv_mad); 1399 1400 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, 1401 u8 rmpp_version, 1402 ib_mad_send_handler send_handler, 1403 ib_mad_recv_handler recv_handler, 1404 void *context) 1405 { 1406 return ERR_PTR(-EINVAL); /* XXX: for now */ 1407 } 1408 EXPORT_SYMBOL(ib_redirect_mad_qp); 1409 1410 int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 1411 struct ib_wc *wc) 1412 { 1413 dev_err(&mad_agent->device->dev, 1414 "ib_process_mad_wc() not implemented yet\n"); 1415 return 0; 1416 } 1417 EXPORT_SYMBOL(ib_process_mad_wc); 1418 1419 static int method_in_use(struct ib_mad_mgmt_method_table **method, 1420 struct ib_mad_reg_req *mad_reg_req) 1421 { 1422 int i; 1423 1424 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { 1425 if ((*method)->agent[i]) { 1426 pr_err("Method %d already in use\n", i); 1427 return -EINVAL; 1428 } 1429 } 1430 return 0; 1431 } 1432 1433 static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1434 { 1435 /* Allocate management method table */ 1436 *method = kzalloc(sizeof **method, GFP_ATOMIC); 1437 return (*method) ? 0 : (-ENOMEM); 1438 } 1439 1440 /* 1441 * Check to see if there are any methods still in use 1442 */ 1443 static int check_method_table(struct ib_mad_mgmt_method_table *method) 1444 { 1445 int i; 1446 1447 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) 1448 if (method->agent[i]) 1449 return 1; 1450 return 0; 1451 } 1452 1453 /* 1454 * Check to see if there are any method tables for this class still in use 1455 */ 1456 static int check_class_table(struct ib_mad_mgmt_class_table *class) 1457 { 1458 int i; 1459 1460 for (i = 0; i < MAX_MGMT_CLASS; i++) 1461 if (class->method_table[i]) 1462 return 1; 1463 return 0; 1464 } 1465 1466 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) 1467 { 1468 int i; 1469 1470 for (i = 0; i < MAX_MGMT_OUI; i++) 1471 if (vendor_class->method_table[i]) 1472 return 1; 1473 return 0; 1474 } 1475 1476 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, 1477 const char *oui) 1478 { 1479 int i; 1480 1481 for (i = 0; i < MAX_MGMT_OUI; i++) 1482 /* Is there matching OUI for this vendor class ? */ 1483 if (!memcmp(vendor_class->oui[i], oui, 3)) 1484 return i; 1485 1486 return -1; 1487 } 1488 1489 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) 1490 { 1491 int i; 1492 1493 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) 1494 if (vendor->vendor_class[i]) 1495 return 1; 1496 1497 return 0; 1498 } 1499 1500 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, 1501 struct ib_mad_agent_private *agent) 1502 { 1503 int i; 1504 1505 /* Remove any methods for this mad agent */ 1506 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { 1507 if (method->agent[i] == agent) { 1508 method->agent[i] = NULL; 1509 } 1510 } 1511 } 1512 1513 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1514 struct ib_mad_agent_private *agent_priv, 1515 u8 mgmt_class) 1516 { 1517 struct ib_mad_port_private *port_priv; 1518 struct ib_mad_mgmt_class_table **class; 1519 struct ib_mad_mgmt_method_table **method; 1520 int i, ret; 1521 1522 port_priv = agent_priv->qp_info->port_priv; 1523 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1524 if (!*class) { 1525 /* Allocate management class table for "new" class version */ 1526 *class = kzalloc(sizeof **class, GFP_ATOMIC); 1527 if (!*class) { 1528 ret = -ENOMEM; 1529 goto error1; 1530 } 1531 1532 /* Allocate method table for this management class */ 1533 method = &(*class)->method_table[mgmt_class]; 1534 if ((ret = allocate_method_table(method))) 1535 goto error2; 1536 } else { 1537 method = &(*class)->method_table[mgmt_class]; 1538 if (!*method) { 1539 /* Allocate method table for this management class */ 1540 if ((ret = allocate_method_table(method))) 1541 goto error1; 1542 } 1543 } 1544 1545 /* Now, make sure methods are not already in use */ 1546 if (method_in_use(method, mad_reg_req)) 1547 goto error3; 1548 1549 /* Finally, add in methods being registered */ 1550 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1551 (*method)->agent[i] = agent_priv; 1552 1553 return 0; 1554 1555 error3: 1556 /* Remove any methods for this mad agent */ 1557 remove_methods_mad_agent(*method, agent_priv); 1558 /* Now, check to see if there are any methods in use */ 1559 if (!check_method_table(*method)) { 1560 /* If not, release management method table */ 1561 kfree(*method); 1562 *method = NULL; 1563 } 1564 ret = -EINVAL; 1565 goto error1; 1566 error2: 1567 kfree(*class); 1568 *class = NULL; 1569 error1: 1570 return ret; 1571 } 1572 1573 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1574 struct ib_mad_agent_private *agent_priv) 1575 { 1576 struct ib_mad_port_private *port_priv; 1577 struct ib_mad_mgmt_vendor_class_table **vendor_table; 1578 struct ib_mad_mgmt_vendor_class_table *vendor = NULL; 1579 struct ib_mad_mgmt_vendor_class *vendor_class = NULL; 1580 struct ib_mad_mgmt_method_table **method; 1581 int i, ret = -ENOMEM; 1582 u8 vclass; 1583 1584 /* "New" vendor (with OUI) class */ 1585 vclass = vendor_class_index(mad_reg_req->mgmt_class); 1586 port_priv = agent_priv->qp_info->port_priv; 1587 vendor_table = &port_priv->version[ 1588 mad_reg_req->mgmt_class_version].vendor; 1589 if (!*vendor_table) { 1590 /* Allocate mgmt vendor class table for "new" class version */ 1591 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); 1592 if (!vendor) 1593 goto error1; 1594 1595 *vendor_table = vendor; 1596 } 1597 if (!(*vendor_table)->vendor_class[vclass]) { 1598 /* Allocate table for this management vendor class */ 1599 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); 1600 if (!vendor_class) 1601 goto error2; 1602 1603 (*vendor_table)->vendor_class[vclass] = vendor_class; 1604 } 1605 for (i = 0; i < MAX_MGMT_OUI; i++) { 1606 /* Is there matching OUI for this vendor class ? */ 1607 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], 1608 mad_reg_req->oui, 3)) { 1609 method = &(*vendor_table)->vendor_class[ 1610 vclass]->method_table[i]; 1611 if (!*method) 1612 goto error3; 1613 goto check_in_use; 1614 } 1615 } 1616 for (i = 0; i < MAX_MGMT_OUI; i++) { 1617 /* OUI slot available ? */ 1618 if (!is_vendor_oui((*vendor_table)->vendor_class[ 1619 vclass]->oui[i])) { 1620 method = &(*vendor_table)->vendor_class[ 1621 vclass]->method_table[i]; 1622 /* Allocate method table for this OUI */ 1623 if (!*method) { 1624 ret = allocate_method_table(method); 1625 if (ret) 1626 goto error3; 1627 } 1628 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], 1629 mad_reg_req->oui, 3); 1630 goto check_in_use; 1631 } 1632 } 1633 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); 1634 goto error3; 1635 1636 check_in_use: 1637 /* Now, make sure methods are not already in use */ 1638 if (method_in_use(method, mad_reg_req)) 1639 goto error4; 1640 1641 /* Finally, add in methods being registered */ 1642 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1643 (*method)->agent[i] = agent_priv; 1644 1645 return 0; 1646 1647 error4: 1648 /* Remove any methods for this mad agent */ 1649 remove_methods_mad_agent(*method, agent_priv); 1650 /* Now, check to see if there are any methods in use */ 1651 if (!check_method_table(*method)) { 1652 /* If not, release management method table */ 1653 kfree(*method); 1654 *method = NULL; 1655 } 1656 ret = -EINVAL; 1657 error3: 1658 if (vendor_class) { 1659 (*vendor_table)->vendor_class[vclass] = NULL; 1660 kfree(vendor_class); 1661 } 1662 error2: 1663 if (vendor) { 1664 *vendor_table = NULL; 1665 kfree(vendor); 1666 } 1667 error1: 1668 return ret; 1669 } 1670 1671 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) 1672 { 1673 struct ib_mad_port_private *port_priv; 1674 struct ib_mad_mgmt_class_table *class; 1675 struct ib_mad_mgmt_method_table *method; 1676 struct ib_mad_mgmt_vendor_class_table *vendor; 1677 struct ib_mad_mgmt_vendor_class *vendor_class; 1678 int index; 1679 u8 mgmt_class; 1680 1681 /* 1682 * Was MAD registration request supplied 1683 * with original registration ? 1684 */ 1685 if (!agent_priv->reg_req) { 1686 goto out; 1687 } 1688 1689 port_priv = agent_priv->qp_info->port_priv; 1690 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); 1691 class = port_priv->version[ 1692 agent_priv->reg_req->mgmt_class_version].class; 1693 if (!class) 1694 goto vendor_check; 1695 1696 method = class->method_table[mgmt_class]; 1697 if (method) { 1698 /* Remove any methods for this mad agent */ 1699 remove_methods_mad_agent(method, agent_priv); 1700 /* Now, check to see if there are any methods still in use */ 1701 if (!check_method_table(method)) { 1702 /* If not, release management method table */ 1703 kfree(method); 1704 class->method_table[mgmt_class] = NULL; 1705 /* Any management classes left ? */ 1706 if (!check_class_table(class)) { 1707 /* If not, release management class table */ 1708 kfree(class); 1709 port_priv->version[ 1710 agent_priv->reg_req-> 1711 mgmt_class_version].class = NULL; 1712 } 1713 } 1714 } 1715 1716 vendor_check: 1717 if (!is_vendor_class(mgmt_class)) 1718 goto out; 1719 1720 /* normalize mgmt_class to vendor range 2 */ 1721 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); 1722 vendor = port_priv->version[ 1723 agent_priv->reg_req->mgmt_class_version].vendor; 1724 1725 if (!vendor) 1726 goto out; 1727 1728 vendor_class = vendor->vendor_class[mgmt_class]; 1729 if (vendor_class) { 1730 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); 1731 if (index < 0) 1732 goto out; 1733 method = vendor_class->method_table[index]; 1734 if (method) { 1735 /* Remove any methods for this mad agent */ 1736 remove_methods_mad_agent(method, agent_priv); 1737 /* 1738 * Now, check to see if there are 1739 * any methods still in use 1740 */ 1741 if (!check_method_table(method)) { 1742 /* If not, release management method table */ 1743 kfree(method); 1744 vendor_class->method_table[index] = NULL; 1745 memset(vendor_class->oui[index], 0, 3); 1746 /* Any OUIs left ? */ 1747 if (!check_vendor_class(vendor_class)) { 1748 /* If not, release vendor class table */ 1749 kfree(vendor_class); 1750 vendor->vendor_class[mgmt_class] = NULL; 1751 /* Any other vendor classes left ? */ 1752 if (!check_vendor_table(vendor)) { 1753 kfree(vendor); 1754 port_priv->version[ 1755 agent_priv->reg_req-> 1756 mgmt_class_version]. 1757 vendor = NULL; 1758 } 1759 } 1760 } 1761 } 1762 } 1763 1764 out: 1765 return; 1766 } 1767 1768 static struct ib_mad_agent_private * 1769 find_mad_agent(struct ib_mad_port_private *port_priv, 1770 const struct ib_mad_hdr *mad_hdr) 1771 { 1772 struct ib_mad_agent_private *mad_agent = NULL; 1773 unsigned long flags; 1774 1775 if (ib_response_mad(mad_hdr)) { 1776 u32 hi_tid; 1777 1778 /* 1779 * Routing is based on high 32 bits of transaction ID 1780 * of MAD. 1781 */ 1782 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; 1783 rcu_read_lock(); 1784 mad_agent = xa_load(&ib_mad_clients, hi_tid); 1785 if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount)) 1786 mad_agent = NULL; 1787 rcu_read_unlock(); 1788 } else { 1789 struct ib_mad_mgmt_class_table *class; 1790 struct ib_mad_mgmt_method_table *method; 1791 struct ib_mad_mgmt_vendor_class_table *vendor; 1792 struct ib_mad_mgmt_vendor_class *vendor_class; 1793 const struct ib_vendor_mad *vendor_mad; 1794 int index; 1795 1796 spin_lock_irqsave(&port_priv->reg_lock, flags); 1797 /* 1798 * Routing is based on version, class, and method 1799 * For "newer" vendor MADs, also based on OUI 1800 */ 1801 if (mad_hdr->class_version >= MAX_MGMT_VERSION) 1802 goto out; 1803 if (!is_vendor_class(mad_hdr->mgmt_class)) { 1804 class = port_priv->version[ 1805 mad_hdr->class_version].class; 1806 if (!class) 1807 goto out; 1808 if (convert_mgmt_class(mad_hdr->mgmt_class) >= 1809 ARRAY_SIZE(class->method_table)) 1810 goto out; 1811 method = class->method_table[convert_mgmt_class( 1812 mad_hdr->mgmt_class)]; 1813 if (method) 1814 mad_agent = method->agent[mad_hdr->method & 1815 ~IB_MGMT_METHOD_RESP]; 1816 } else { 1817 vendor = port_priv->version[ 1818 mad_hdr->class_version].vendor; 1819 if (!vendor) 1820 goto out; 1821 vendor_class = vendor->vendor_class[vendor_class_index( 1822 mad_hdr->mgmt_class)]; 1823 if (!vendor_class) 1824 goto out; 1825 /* Find matching OUI */ 1826 vendor_mad = (const struct ib_vendor_mad *)mad_hdr; 1827 index = find_vendor_oui(vendor_class, vendor_mad->oui); 1828 if (index == -1) 1829 goto out; 1830 method = vendor_class->method_table[index]; 1831 if (method) { 1832 mad_agent = method->agent[mad_hdr->method & 1833 ~IB_MGMT_METHOD_RESP]; 1834 } 1835 } 1836 if (mad_agent) 1837 atomic_inc(&mad_agent->refcount); 1838 out: 1839 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 1840 } 1841 1842 if (mad_agent && !mad_agent->agent.recv_handler) { 1843 dev_notice(&port_priv->device->dev, 1844 "No receive handler for client %p on port %d\n", 1845 &mad_agent->agent, port_priv->port_num); 1846 deref_mad_agent(mad_agent); 1847 mad_agent = NULL; 1848 } 1849 1850 return mad_agent; 1851 } 1852 1853 static int validate_mad(const struct ib_mad_hdr *mad_hdr, 1854 const struct ib_mad_qp_info *qp_info, 1855 bool opa) 1856 { 1857 int valid = 0; 1858 u32 qp_num = qp_info->qp->qp_num; 1859 1860 /* Make sure MAD base version is understood */ 1861 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && 1862 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { 1863 pr_err("MAD received with unsupported base version %d %s\n", 1864 mad_hdr->base_version, opa ? "(opa)" : ""); 1865 goto out; 1866 } 1867 1868 /* Filter SMI packets sent to other than QP0 */ 1869 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || 1870 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 1871 if (qp_num == 0) 1872 valid = 1; 1873 } else { 1874 /* CM attributes other than ClassPortInfo only use Send method */ 1875 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && 1876 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && 1877 (mad_hdr->method != IB_MGMT_METHOD_SEND)) 1878 goto out; 1879 /* Filter GSI packets sent to QP0 */ 1880 if (qp_num != 0) 1881 valid = 1; 1882 } 1883 1884 out: 1885 return valid; 1886 } 1887 1888 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, 1889 const struct ib_mad_hdr *mad_hdr) 1890 { 1891 struct ib_rmpp_mad *rmpp_mad; 1892 1893 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; 1894 return !mad_agent_priv->agent.rmpp_version || 1895 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || 1896 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 1897 IB_MGMT_RMPP_FLAG_ACTIVE) || 1898 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1899 } 1900 1901 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, 1902 const struct ib_mad_recv_wc *rwc) 1903 { 1904 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == 1905 rwc->recv_buf.mad->mad_hdr.mgmt_class; 1906 } 1907 1908 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, 1909 const struct ib_mad_send_wr_private *wr, 1910 const struct ib_mad_recv_wc *rwc ) 1911 { 1912 struct rdma_ah_attr attr; 1913 u8 send_resp, rcv_resp; 1914 union ib_gid sgid; 1915 struct ib_device *device = mad_agent_priv->agent.device; 1916 u8 port_num = mad_agent_priv->agent.port_num; 1917 u8 lmc; 1918 bool has_grh; 1919 1920 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); 1921 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); 1922 1923 if (send_resp == rcv_resp) 1924 /* both requests, or both responses. GIDs different */ 1925 return 0; 1926 1927 if (rdma_query_ah(wr->send_buf.ah, &attr)) 1928 /* Assume not equal, to avoid false positives. */ 1929 return 0; 1930 1931 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH); 1932 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH)) 1933 /* one has GID, other does not. Assume different */ 1934 return 0; 1935 1936 if (!send_resp && rcv_resp) { 1937 /* is request/response. */ 1938 if (!has_grh) { 1939 if (ib_get_cached_lmc(device, port_num, &lmc)) 1940 return 0; 1941 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^ 1942 rwc->wc->dlid_path_bits) & 1943 ((1 << lmc) - 1))); 1944 } else { 1945 const struct ib_global_route *grh = 1946 rdma_ah_read_grh(&attr); 1947 1948 if (rdma_query_gid(device, port_num, 1949 grh->sgid_index, &sgid)) 1950 return 0; 1951 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 1952 16); 1953 } 1954 } 1955 1956 if (!has_grh) 1957 return rdma_ah_get_dlid(&attr) == rwc->wc->slid; 1958 else 1959 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw, 1960 rwc->recv_buf.grh->sgid.raw, 1961 16); 1962 } 1963 1964 static inline int is_direct(u8 class) 1965 { 1966 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); 1967 } 1968 1969 struct ib_mad_send_wr_private* 1970 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, 1971 const struct ib_mad_recv_wc *wc) 1972 { 1973 struct ib_mad_send_wr_private *wr; 1974 const struct ib_mad_hdr *mad_hdr; 1975 1976 mad_hdr = &wc->recv_buf.mad->mad_hdr; 1977 1978 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { 1979 if ((wr->tid == mad_hdr->tid) && 1980 rcv_has_same_class(wr, wc) && 1981 /* 1982 * Don't check GID for direct routed MADs. 1983 * These might have permissive LIDs. 1984 */ 1985 (is_direct(mad_hdr->mgmt_class) || 1986 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1987 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1988 } 1989 1990 /* 1991 * It's possible to receive the response before we've 1992 * been notified that the send has completed 1993 */ 1994 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { 1995 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && 1996 wr->tid == mad_hdr->tid && 1997 wr->timeout && 1998 rcv_has_same_class(wr, wc) && 1999 /* 2000 * Don't check GID for direct routed MADs. 2001 * These might have permissive LIDs. 2002 */ 2003 (is_direct(mad_hdr->mgmt_class) || 2004 rcv_has_same_gid(mad_agent_priv, wr, wc))) 2005 /* Verify request has not been canceled */ 2006 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 2007 } 2008 return NULL; 2009 } 2010 2011 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) 2012 { 2013 mad_send_wr->timeout = 0; 2014 if (mad_send_wr->refcount == 1) 2015 list_move_tail(&mad_send_wr->agent_list, 2016 &mad_send_wr->mad_agent_priv->done_list); 2017 } 2018 2019 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 2020 struct ib_mad_recv_wc *mad_recv_wc) 2021 { 2022 struct ib_mad_send_wr_private *mad_send_wr; 2023 struct ib_mad_send_wc mad_send_wc; 2024 unsigned long flags; 2025 int ret; 2026 2027 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 2028 ret = ib_mad_enforce_security(mad_agent_priv, 2029 mad_recv_wc->wc->pkey_index); 2030 if (ret) { 2031 ib_free_recv_mad(mad_recv_wc); 2032 deref_mad_agent(mad_agent_priv); 2033 return; 2034 } 2035 2036 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 2037 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 2038 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 2039 mad_recv_wc); 2040 if (!mad_recv_wc) { 2041 deref_mad_agent(mad_agent_priv); 2042 return; 2043 } 2044 } 2045 2046 /* Complete corresponding request */ 2047 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { 2048 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2049 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 2050 if (!mad_send_wr) { 2051 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2052 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) 2053 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) 2054 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) 2055 & IB_MGMT_RMPP_FLAG_ACTIVE)) { 2056 /* user rmpp is in effect 2057 * and this is an active RMPP MAD 2058 */ 2059 mad_agent_priv->agent.recv_handler( 2060 &mad_agent_priv->agent, NULL, 2061 mad_recv_wc); 2062 atomic_dec(&mad_agent_priv->refcount); 2063 } else { 2064 /* not user rmpp, revert to normal behavior and 2065 * drop the mad */ 2066 ib_free_recv_mad(mad_recv_wc); 2067 deref_mad_agent(mad_agent_priv); 2068 return; 2069 } 2070 } else { 2071 ib_mark_mad_done(mad_send_wr); 2072 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2073 2074 /* Defined behavior is to complete response before request */ 2075 mad_agent_priv->agent.recv_handler( 2076 &mad_agent_priv->agent, 2077 &mad_send_wr->send_buf, 2078 mad_recv_wc); 2079 atomic_dec(&mad_agent_priv->refcount); 2080 2081 mad_send_wc.status = IB_WC_SUCCESS; 2082 mad_send_wc.vendor_err = 0; 2083 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2084 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2085 } 2086 } else { 2087 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, 2088 mad_recv_wc); 2089 deref_mad_agent(mad_agent_priv); 2090 } 2091 2092 return; 2093 } 2094 2095 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, 2096 const struct ib_mad_qp_info *qp_info, 2097 const struct ib_wc *wc, 2098 int port_num, 2099 struct ib_mad_private *recv, 2100 struct ib_mad_private *response) 2101 { 2102 enum smi_forward_action retsmi; 2103 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2104 2105 trace_ib_mad_handle_ib_smi(smp); 2106 2107 if (smi_handle_dr_smp_recv(smp, 2108 rdma_cap_ib_switch(port_priv->device), 2109 port_num, 2110 port_priv->device->phys_port_cnt) == 2111 IB_SMI_DISCARD) 2112 return IB_SMI_DISCARD; 2113 2114 retsmi = smi_check_forward_dr_smp(smp); 2115 if (retsmi == IB_SMI_LOCAL) 2116 return IB_SMI_HANDLE; 2117 2118 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2119 if (smi_handle_dr_smp_send(smp, 2120 rdma_cap_ib_switch(port_priv->device), 2121 port_num) == IB_SMI_DISCARD) 2122 return IB_SMI_DISCARD; 2123 2124 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2125 return IB_SMI_DISCARD; 2126 } else if (rdma_cap_ib_switch(port_priv->device)) { 2127 /* forward case for switches */ 2128 memcpy(response, recv, mad_priv_size(response)); 2129 response->header.recv_wc.wc = &response->header.wc; 2130 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2131 response->header.recv_wc.recv_buf.grh = &response->grh; 2132 2133 agent_send_response((const struct ib_mad_hdr *)response->mad, 2134 &response->grh, wc, 2135 port_priv->device, 2136 smi_get_fwd_port(smp), 2137 qp_info->qp->qp_num, 2138 response->mad_size, 2139 false); 2140 2141 return IB_SMI_DISCARD; 2142 } 2143 return IB_SMI_HANDLE; 2144 } 2145 2146 static bool generate_unmatched_resp(const struct ib_mad_private *recv, 2147 struct ib_mad_private *response, 2148 size_t *resp_len, bool opa) 2149 { 2150 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; 2151 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; 2152 2153 if (recv_hdr->method == IB_MGMT_METHOD_GET || 2154 recv_hdr->method == IB_MGMT_METHOD_SET) { 2155 memcpy(response, recv, mad_priv_size(response)); 2156 response->header.recv_wc.wc = &response->header.wc; 2157 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2158 response->header.recv_wc.recv_buf.grh = &response->grh; 2159 resp_hdr->method = IB_MGMT_METHOD_GET_RESP; 2160 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 2161 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2162 resp_hdr->status |= IB_SMP_DIRECTION; 2163 2164 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { 2165 if (recv_hdr->mgmt_class == 2166 IB_MGMT_CLASS_SUBN_LID_ROUTED || 2167 recv_hdr->mgmt_class == 2168 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2169 *resp_len = opa_get_smp_header_size( 2170 (struct opa_smp *)recv->mad); 2171 else 2172 *resp_len = sizeof(struct ib_mad_hdr); 2173 } 2174 2175 return true; 2176 } else { 2177 return false; 2178 } 2179 } 2180 2181 static enum smi_action 2182 handle_opa_smi(struct ib_mad_port_private *port_priv, 2183 struct ib_mad_qp_info *qp_info, 2184 struct ib_wc *wc, 2185 int port_num, 2186 struct ib_mad_private *recv, 2187 struct ib_mad_private *response) 2188 { 2189 enum smi_forward_action retsmi; 2190 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2191 2192 trace_ib_mad_handle_opa_smi(smp); 2193 2194 if (opa_smi_handle_dr_smp_recv(smp, 2195 rdma_cap_ib_switch(port_priv->device), 2196 port_num, 2197 port_priv->device->phys_port_cnt) == 2198 IB_SMI_DISCARD) 2199 return IB_SMI_DISCARD; 2200 2201 retsmi = opa_smi_check_forward_dr_smp(smp); 2202 if (retsmi == IB_SMI_LOCAL) 2203 return IB_SMI_HANDLE; 2204 2205 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2206 if (opa_smi_handle_dr_smp_send(smp, 2207 rdma_cap_ib_switch(port_priv->device), 2208 port_num) == IB_SMI_DISCARD) 2209 return IB_SMI_DISCARD; 2210 2211 if (opa_smi_check_local_smp(smp, port_priv->device) == 2212 IB_SMI_DISCARD) 2213 return IB_SMI_DISCARD; 2214 2215 } else if (rdma_cap_ib_switch(port_priv->device)) { 2216 /* forward case for switches */ 2217 memcpy(response, recv, mad_priv_size(response)); 2218 response->header.recv_wc.wc = &response->header.wc; 2219 response->header.recv_wc.recv_buf.opa_mad = 2220 (struct opa_mad *)response->mad; 2221 response->header.recv_wc.recv_buf.grh = &response->grh; 2222 2223 agent_send_response((const struct ib_mad_hdr *)response->mad, 2224 &response->grh, wc, 2225 port_priv->device, 2226 opa_smi_get_fwd_port(smp), 2227 qp_info->qp->qp_num, 2228 recv->header.wc.byte_len, 2229 true); 2230 2231 return IB_SMI_DISCARD; 2232 } 2233 2234 return IB_SMI_HANDLE; 2235 } 2236 2237 static enum smi_action 2238 handle_smi(struct ib_mad_port_private *port_priv, 2239 struct ib_mad_qp_info *qp_info, 2240 struct ib_wc *wc, 2241 int port_num, 2242 struct ib_mad_private *recv, 2243 struct ib_mad_private *response, 2244 bool opa) 2245 { 2246 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; 2247 2248 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && 2249 mad_hdr->class_version == OPA_SM_CLASS_VERSION) 2250 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, 2251 response); 2252 2253 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); 2254 } 2255 2256 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) 2257 { 2258 struct ib_mad_port_private *port_priv = cq->cq_context; 2259 struct ib_mad_list_head *mad_list = 2260 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2261 struct ib_mad_qp_info *qp_info; 2262 struct ib_mad_private_header *mad_priv_hdr; 2263 struct ib_mad_private *recv, *response = NULL; 2264 struct ib_mad_agent_private *mad_agent; 2265 int port_num; 2266 int ret = IB_MAD_RESULT_SUCCESS; 2267 size_t mad_size; 2268 u16 resp_mad_pkey_index = 0; 2269 bool opa; 2270 2271 if (list_empty_careful(&port_priv->port_list)) 2272 return; 2273 2274 if (wc->status != IB_WC_SUCCESS) { 2275 /* 2276 * Receive errors indicate that the QP has entered the error 2277 * state - error handling/shutdown code will cleanup 2278 */ 2279 return; 2280 } 2281 2282 qp_info = mad_list->mad_queue->qp_info; 2283 dequeue_mad(mad_list); 2284 2285 opa = rdma_cap_opa_mad(qp_info->port_priv->device, 2286 qp_info->port_priv->port_num); 2287 2288 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 2289 mad_list); 2290 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 2291 ib_dma_unmap_single(port_priv->device, 2292 recv->header.mapping, 2293 mad_priv_dma_size(recv), 2294 DMA_FROM_DEVICE); 2295 2296 /* Setup MAD receive work completion from "normal" work completion */ 2297 recv->header.wc = *wc; 2298 recv->header.recv_wc.wc = &recv->header.wc; 2299 2300 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { 2301 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); 2302 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2303 } else { 2304 recv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2305 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2306 } 2307 2308 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; 2309 recv->header.recv_wc.recv_buf.grh = &recv->grh; 2310 2311 if (atomic_read(&qp_info->snoop_count)) 2312 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); 2313 2314 /* Validate MAD */ 2315 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) 2316 goto out; 2317 2318 trace_ib_mad_recv_done_handler(qp_info, wc, 2319 (struct ib_mad_hdr *)recv->mad); 2320 2321 mad_size = recv->mad_size; 2322 response = alloc_mad_private(mad_size, GFP_KERNEL); 2323 if (!response) 2324 goto out; 2325 2326 if (rdma_cap_ib_switch(port_priv->device)) 2327 port_num = wc->port_num; 2328 else 2329 port_num = port_priv->port_num; 2330 2331 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == 2332 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 2333 if (handle_smi(port_priv, qp_info, wc, port_num, recv, 2334 response, opa) 2335 == IB_SMI_DISCARD) 2336 goto out; 2337 } 2338 2339 /* Give driver "right of first refusal" on incoming MAD */ 2340 if (port_priv->device->ops.process_mad) { 2341 ret = port_priv->device->ops.process_mad( 2342 port_priv->device, 0, port_priv->port_num, wc, 2343 &recv->grh, (const struct ib_mad_hdr *)recv->mad, 2344 recv->mad_size, (struct ib_mad_hdr *)response->mad, 2345 &mad_size, &resp_mad_pkey_index); 2346 2347 if (opa) 2348 wc->pkey_index = resp_mad_pkey_index; 2349 2350 if (ret & IB_MAD_RESULT_SUCCESS) { 2351 if (ret & IB_MAD_RESULT_CONSUMED) 2352 goto out; 2353 if (ret & IB_MAD_RESULT_REPLY) { 2354 agent_send_response((const struct ib_mad_hdr *)response->mad, 2355 &recv->grh, wc, 2356 port_priv->device, 2357 port_num, 2358 qp_info->qp->qp_num, 2359 mad_size, opa); 2360 goto out; 2361 } 2362 } 2363 } 2364 2365 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); 2366 if (mad_agent) { 2367 trace_ib_mad_recv_done_agent(mad_agent); 2368 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); 2369 /* 2370 * recv is freed up in error cases in ib_mad_complete_recv 2371 * or via recv_handler in ib_mad_complete_recv() 2372 */ 2373 recv = NULL; 2374 } else if ((ret & IB_MAD_RESULT_SUCCESS) && 2375 generate_unmatched_resp(recv, response, &mad_size, opa)) { 2376 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, 2377 port_priv->device, port_num, 2378 qp_info->qp->qp_num, mad_size, opa); 2379 } 2380 2381 out: 2382 /* Post another receive request for this QP */ 2383 if (response) { 2384 ib_mad_post_receive_mads(qp_info, response); 2385 kfree(recv); 2386 } else 2387 ib_mad_post_receive_mads(qp_info, recv); 2388 } 2389 2390 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) 2391 { 2392 struct ib_mad_send_wr_private *mad_send_wr; 2393 unsigned long delay; 2394 2395 if (list_empty(&mad_agent_priv->wait_list)) { 2396 cancel_delayed_work(&mad_agent_priv->timed_work); 2397 } else { 2398 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2399 struct ib_mad_send_wr_private, 2400 agent_list); 2401 2402 if (time_after(mad_agent_priv->timeout, 2403 mad_send_wr->timeout)) { 2404 mad_agent_priv->timeout = mad_send_wr->timeout; 2405 delay = mad_send_wr->timeout - jiffies; 2406 if ((long)delay <= 0) 2407 delay = 1; 2408 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2409 &mad_agent_priv->timed_work, delay); 2410 } 2411 } 2412 } 2413 2414 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) 2415 { 2416 struct ib_mad_agent_private *mad_agent_priv; 2417 struct ib_mad_send_wr_private *temp_mad_send_wr; 2418 struct list_head *list_item; 2419 unsigned long delay; 2420 2421 mad_agent_priv = mad_send_wr->mad_agent_priv; 2422 list_del(&mad_send_wr->agent_list); 2423 2424 delay = mad_send_wr->timeout; 2425 mad_send_wr->timeout += jiffies; 2426 2427 if (delay) { 2428 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { 2429 temp_mad_send_wr = list_entry(list_item, 2430 struct ib_mad_send_wr_private, 2431 agent_list); 2432 if (time_after(mad_send_wr->timeout, 2433 temp_mad_send_wr->timeout)) 2434 break; 2435 } 2436 } 2437 else 2438 list_item = &mad_agent_priv->wait_list; 2439 list_add(&mad_send_wr->agent_list, list_item); 2440 2441 /* Reschedule a work item if we have a shorter timeout */ 2442 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) 2443 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2444 &mad_agent_priv->timed_work, delay); 2445 } 2446 2447 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2448 unsigned long timeout_ms) 2449 { 2450 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2451 wait_for_response(mad_send_wr); 2452 } 2453 2454 /* 2455 * Process a send work completion 2456 */ 2457 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 2458 struct ib_mad_send_wc *mad_send_wc) 2459 { 2460 struct ib_mad_agent_private *mad_agent_priv; 2461 unsigned long flags; 2462 int ret; 2463 2464 mad_agent_priv = mad_send_wr->mad_agent_priv; 2465 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2466 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 2467 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); 2468 if (ret == IB_RMPP_RESULT_CONSUMED) 2469 goto done; 2470 } else 2471 ret = IB_RMPP_RESULT_UNHANDLED; 2472 2473 if (mad_send_wc->status != IB_WC_SUCCESS && 2474 mad_send_wr->status == IB_WC_SUCCESS) { 2475 mad_send_wr->status = mad_send_wc->status; 2476 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2477 } 2478 2479 if (--mad_send_wr->refcount > 0) { 2480 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && 2481 mad_send_wr->status == IB_WC_SUCCESS) { 2482 wait_for_response(mad_send_wr); 2483 } 2484 goto done; 2485 } 2486 2487 /* Remove send from MAD agent and notify client of completion */ 2488 list_del(&mad_send_wr->agent_list); 2489 adjust_timeout(mad_agent_priv); 2490 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2491 2492 if (mad_send_wr->status != IB_WC_SUCCESS ) 2493 mad_send_wc->status = mad_send_wr->status; 2494 if (ret == IB_RMPP_RESULT_INTERNAL) 2495 ib_rmpp_send_handler(mad_send_wc); 2496 else 2497 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2498 mad_send_wc); 2499 2500 /* Release reference on agent taken when sending */ 2501 deref_mad_agent(mad_agent_priv); 2502 return; 2503 done: 2504 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2505 } 2506 2507 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) 2508 { 2509 struct ib_mad_port_private *port_priv = cq->cq_context; 2510 struct ib_mad_list_head *mad_list = 2511 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2512 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; 2513 struct ib_mad_qp_info *qp_info; 2514 struct ib_mad_queue *send_queue; 2515 struct ib_mad_send_wc mad_send_wc; 2516 unsigned long flags; 2517 int ret; 2518 2519 if (list_empty_careful(&port_priv->port_list)) 2520 return; 2521 2522 if (wc->status != IB_WC_SUCCESS) { 2523 if (!ib_mad_send_error(port_priv, wc)) 2524 return; 2525 } 2526 2527 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2528 mad_list); 2529 send_queue = mad_list->mad_queue; 2530 qp_info = send_queue->qp_info; 2531 2532 trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv); 2533 trace_ib_mad_send_done_handler(mad_send_wr, wc); 2534 2535 retry: 2536 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2537 mad_send_wr->header_mapping, 2538 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2539 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2540 mad_send_wr->payload_mapping, 2541 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2542 queued_send_wr = NULL; 2543 spin_lock_irqsave(&send_queue->lock, flags); 2544 list_del(&mad_list->list); 2545 2546 /* Move queued send to the send queue */ 2547 if (send_queue->count-- > send_queue->max_active) { 2548 mad_list = container_of(qp_info->overflow_list.next, 2549 struct ib_mad_list_head, list); 2550 queued_send_wr = container_of(mad_list, 2551 struct ib_mad_send_wr_private, 2552 mad_list); 2553 list_move_tail(&mad_list->list, &send_queue->list); 2554 } 2555 spin_unlock_irqrestore(&send_queue->lock, flags); 2556 2557 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2558 mad_send_wc.status = wc->status; 2559 mad_send_wc.vendor_err = wc->vendor_err; 2560 if (atomic_read(&qp_info->snoop_count)) 2561 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, 2562 IB_MAD_SNOOP_SEND_COMPLETIONS); 2563 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2564 2565 if (queued_send_wr) { 2566 trace_ib_mad_send_done_resend(queued_send_wr, qp_info); 2567 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, 2568 NULL); 2569 if (ret) { 2570 dev_err(&port_priv->device->dev, 2571 "ib_post_send failed: %d\n", ret); 2572 mad_send_wr = queued_send_wr; 2573 wc->status = IB_WC_LOC_QP_OP_ERR; 2574 goto retry; 2575 } 2576 } 2577 } 2578 2579 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) 2580 { 2581 struct ib_mad_send_wr_private *mad_send_wr; 2582 struct ib_mad_list_head *mad_list; 2583 unsigned long flags; 2584 2585 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 2586 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { 2587 mad_send_wr = container_of(mad_list, 2588 struct ib_mad_send_wr_private, 2589 mad_list); 2590 mad_send_wr->retry = 1; 2591 } 2592 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 2593 } 2594 2595 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 2596 struct ib_wc *wc) 2597 { 2598 struct ib_mad_list_head *mad_list = 2599 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2600 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; 2601 struct ib_mad_send_wr_private *mad_send_wr; 2602 int ret; 2603 2604 /* 2605 * Send errors will transition the QP to SQE - move 2606 * QP to RTS and repost flushed work requests 2607 */ 2608 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2609 mad_list); 2610 if (wc->status == IB_WC_WR_FLUSH_ERR) { 2611 if (mad_send_wr->retry) { 2612 /* Repost send */ 2613 mad_send_wr->retry = 0; 2614 trace_ib_mad_error_handler(mad_send_wr, qp_info); 2615 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, 2616 NULL); 2617 if (!ret) 2618 return false; 2619 } 2620 } else { 2621 struct ib_qp_attr *attr; 2622 2623 /* Transition QP to RTS and fail offending send */ 2624 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2625 if (attr) { 2626 attr->qp_state = IB_QPS_RTS; 2627 attr->cur_qp_state = IB_QPS_SQE; 2628 ret = ib_modify_qp(qp_info->qp, attr, 2629 IB_QP_STATE | IB_QP_CUR_STATE); 2630 kfree(attr); 2631 if (ret) 2632 dev_err(&port_priv->device->dev, 2633 "%s - ib_modify_qp to RTS: %d\n", 2634 __func__, ret); 2635 else 2636 mark_sends_for_retry(qp_info); 2637 } 2638 } 2639 2640 return true; 2641 } 2642 2643 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) 2644 { 2645 unsigned long flags; 2646 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; 2647 struct ib_mad_send_wc mad_send_wc; 2648 struct list_head cancel_list; 2649 2650 INIT_LIST_HEAD(&cancel_list); 2651 2652 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2653 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2654 &mad_agent_priv->send_list, agent_list) { 2655 if (mad_send_wr->status == IB_WC_SUCCESS) { 2656 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2657 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2658 } 2659 } 2660 2661 /* Empty wait list to prevent receives from finding a request */ 2662 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); 2663 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2664 2665 /* Report all cancelled requests */ 2666 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 2667 mad_send_wc.vendor_err = 0; 2668 2669 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2670 &cancel_list, agent_list) { 2671 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2672 list_del(&mad_send_wr->agent_list); 2673 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2674 &mad_send_wc); 2675 atomic_dec(&mad_agent_priv->refcount); 2676 } 2677 } 2678 2679 static struct ib_mad_send_wr_private* 2680 find_send_wr(struct ib_mad_agent_private *mad_agent_priv, 2681 struct ib_mad_send_buf *send_buf) 2682 { 2683 struct ib_mad_send_wr_private *mad_send_wr; 2684 2685 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2686 agent_list) { 2687 if (&mad_send_wr->send_buf == send_buf) 2688 return mad_send_wr; 2689 } 2690 2691 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2692 agent_list) { 2693 if (is_rmpp_data_mad(mad_agent_priv, 2694 mad_send_wr->send_buf.mad) && 2695 &mad_send_wr->send_buf == send_buf) 2696 return mad_send_wr; 2697 } 2698 return NULL; 2699 } 2700 2701 int ib_modify_mad(struct ib_mad_agent *mad_agent, 2702 struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2703 { 2704 struct ib_mad_agent_private *mad_agent_priv; 2705 struct ib_mad_send_wr_private *mad_send_wr; 2706 unsigned long flags; 2707 int active; 2708 2709 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2710 agent); 2711 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2712 mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2713 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2714 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2715 return -EINVAL; 2716 } 2717 2718 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); 2719 if (!timeout_ms) { 2720 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2721 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2722 } 2723 2724 mad_send_wr->send_buf.timeout_ms = timeout_ms; 2725 if (active) 2726 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2727 else 2728 ib_reset_mad_timeout(mad_send_wr, timeout_ms); 2729 2730 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2731 return 0; 2732 } 2733 EXPORT_SYMBOL(ib_modify_mad); 2734 2735 void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2736 struct ib_mad_send_buf *send_buf) 2737 { 2738 ib_modify_mad(mad_agent, send_buf, 0); 2739 } 2740 EXPORT_SYMBOL(ib_cancel_mad); 2741 2742 static void local_completions(struct work_struct *work) 2743 { 2744 struct ib_mad_agent_private *mad_agent_priv; 2745 struct ib_mad_local_private *local; 2746 struct ib_mad_agent_private *recv_mad_agent; 2747 unsigned long flags; 2748 int free_mad; 2749 struct ib_wc wc; 2750 struct ib_mad_send_wc mad_send_wc; 2751 bool opa; 2752 2753 mad_agent_priv = 2754 container_of(work, struct ib_mad_agent_private, local_work); 2755 2756 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 2757 mad_agent_priv->qp_info->port_priv->port_num); 2758 2759 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2760 while (!list_empty(&mad_agent_priv->local_list)) { 2761 local = list_entry(mad_agent_priv->local_list.next, 2762 struct ib_mad_local_private, 2763 completion_list); 2764 list_del(&local->completion_list); 2765 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2766 free_mad = 0; 2767 if (local->mad_priv) { 2768 u8 base_version; 2769 recv_mad_agent = local->recv_mad_agent; 2770 if (!recv_mad_agent) { 2771 dev_err(&mad_agent_priv->agent.device->dev, 2772 "No receive MAD agent for local completion\n"); 2773 free_mad = 1; 2774 goto local_send_completion; 2775 } 2776 2777 /* 2778 * Defined behavior is to complete response 2779 * before request 2780 */ 2781 build_smp_wc(recv_mad_agent->agent.qp, 2782 local->mad_send_wr->send_wr.wr.wr_cqe, 2783 be16_to_cpu(IB_LID_PERMISSIVE), 2784 local->mad_send_wr->send_wr.pkey_index, 2785 recv_mad_agent->agent.port_num, &wc); 2786 2787 local->mad_priv->header.recv_wc.wc = &wc; 2788 2789 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; 2790 if (opa && base_version == OPA_MGMT_BASE_VERSION) { 2791 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; 2792 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2793 } else { 2794 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2795 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2796 } 2797 2798 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); 2799 list_add(&local->mad_priv->header.recv_wc.recv_buf.list, 2800 &local->mad_priv->header.recv_wc.rmpp_list); 2801 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2802 local->mad_priv->header.recv_wc.recv_buf.mad = 2803 (struct ib_mad *)local->mad_priv->mad; 2804 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) 2805 snoop_recv(recv_mad_agent->qp_info, 2806 &local->mad_priv->header.recv_wc, 2807 IB_MAD_SNOOP_RECVS); 2808 recv_mad_agent->agent.recv_handler( 2809 &recv_mad_agent->agent, 2810 &local->mad_send_wr->send_buf, 2811 &local->mad_priv->header.recv_wc); 2812 spin_lock_irqsave(&recv_mad_agent->lock, flags); 2813 atomic_dec(&recv_mad_agent->refcount); 2814 spin_unlock_irqrestore(&recv_mad_agent->lock, flags); 2815 } 2816 2817 local_send_completion: 2818 /* Complete send */ 2819 mad_send_wc.status = IB_WC_SUCCESS; 2820 mad_send_wc.vendor_err = 0; 2821 mad_send_wc.send_buf = &local->mad_send_wr->send_buf; 2822 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2823 snoop_send(mad_agent_priv->qp_info, 2824 &local->mad_send_wr->send_buf, 2825 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); 2826 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2827 &mad_send_wc); 2828 2829 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2830 atomic_dec(&mad_agent_priv->refcount); 2831 if (free_mad) 2832 kfree(local->mad_priv); 2833 kfree(local); 2834 } 2835 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2836 } 2837 2838 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) 2839 { 2840 int ret; 2841 2842 if (!mad_send_wr->retries_left) 2843 return -ETIMEDOUT; 2844 2845 mad_send_wr->retries_left--; 2846 mad_send_wr->send_buf.retries++; 2847 2848 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 2849 2850 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { 2851 ret = ib_retry_rmpp(mad_send_wr); 2852 switch (ret) { 2853 case IB_RMPP_RESULT_UNHANDLED: 2854 ret = ib_send_mad(mad_send_wr); 2855 break; 2856 case IB_RMPP_RESULT_CONSUMED: 2857 ret = 0; 2858 break; 2859 default: 2860 ret = -ECOMM; 2861 break; 2862 } 2863 } else 2864 ret = ib_send_mad(mad_send_wr); 2865 2866 if (!ret) { 2867 mad_send_wr->refcount++; 2868 list_add_tail(&mad_send_wr->agent_list, 2869 &mad_send_wr->mad_agent_priv->send_list); 2870 } 2871 return ret; 2872 } 2873 2874 static void timeout_sends(struct work_struct *work) 2875 { 2876 struct ib_mad_agent_private *mad_agent_priv; 2877 struct ib_mad_send_wr_private *mad_send_wr; 2878 struct ib_mad_send_wc mad_send_wc; 2879 unsigned long flags, delay; 2880 2881 mad_agent_priv = container_of(work, struct ib_mad_agent_private, 2882 timed_work.work); 2883 mad_send_wc.vendor_err = 0; 2884 2885 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2886 while (!list_empty(&mad_agent_priv->wait_list)) { 2887 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2888 struct ib_mad_send_wr_private, 2889 agent_list); 2890 2891 if (time_after(mad_send_wr->timeout, jiffies)) { 2892 delay = mad_send_wr->timeout - jiffies; 2893 if ((long)delay <= 0) 2894 delay = 1; 2895 queue_delayed_work(mad_agent_priv->qp_info-> 2896 port_priv->wq, 2897 &mad_agent_priv->timed_work, delay); 2898 break; 2899 } 2900 2901 list_del(&mad_send_wr->agent_list); 2902 if (mad_send_wr->status == IB_WC_SUCCESS && 2903 !retry_send(mad_send_wr)) 2904 continue; 2905 2906 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2907 2908 if (mad_send_wr->status == IB_WC_SUCCESS) 2909 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2910 else 2911 mad_send_wc.status = mad_send_wr->status; 2912 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2913 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2914 &mad_send_wc); 2915 2916 atomic_dec(&mad_agent_priv->refcount); 2917 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2918 } 2919 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2920 } 2921 2922 /* 2923 * Allocate receive MADs and post receive WRs for them 2924 */ 2925 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 2926 struct ib_mad_private *mad) 2927 { 2928 unsigned long flags; 2929 int post, ret; 2930 struct ib_mad_private *mad_priv; 2931 struct ib_sge sg_list; 2932 struct ib_recv_wr recv_wr; 2933 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; 2934 2935 /* Initialize common scatter list fields */ 2936 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; 2937 2938 /* Initialize common receive WR fields */ 2939 recv_wr.next = NULL; 2940 recv_wr.sg_list = &sg_list; 2941 recv_wr.num_sge = 1; 2942 2943 do { 2944 /* Allocate and map receive buffer */ 2945 if (mad) { 2946 mad_priv = mad; 2947 mad = NULL; 2948 } else { 2949 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), 2950 GFP_ATOMIC); 2951 if (!mad_priv) { 2952 ret = -ENOMEM; 2953 break; 2954 } 2955 } 2956 sg_list.length = mad_priv_dma_size(mad_priv); 2957 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, 2958 &mad_priv->grh, 2959 mad_priv_dma_size(mad_priv), 2960 DMA_FROM_DEVICE); 2961 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2962 sg_list.addr))) { 2963 ret = -ENOMEM; 2964 break; 2965 } 2966 mad_priv->header.mapping = sg_list.addr; 2967 mad_priv->header.mad_list.mad_queue = recv_queue; 2968 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; 2969 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; 2970 2971 /* Post receive WR */ 2972 spin_lock_irqsave(&recv_queue->lock, flags); 2973 post = (++recv_queue->count < recv_queue->max_active); 2974 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); 2975 spin_unlock_irqrestore(&recv_queue->lock, flags); 2976 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL); 2977 if (ret) { 2978 spin_lock_irqsave(&recv_queue->lock, flags); 2979 list_del(&mad_priv->header.mad_list.list); 2980 recv_queue->count--; 2981 spin_unlock_irqrestore(&recv_queue->lock, flags); 2982 ib_dma_unmap_single(qp_info->port_priv->device, 2983 mad_priv->header.mapping, 2984 mad_priv_dma_size(mad_priv), 2985 DMA_FROM_DEVICE); 2986 kfree(mad_priv); 2987 dev_err(&qp_info->port_priv->device->dev, 2988 "ib_post_recv failed: %d\n", ret); 2989 break; 2990 } 2991 } while (post); 2992 2993 return ret; 2994 } 2995 2996 /* 2997 * Return all the posted receive MADs 2998 */ 2999 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) 3000 { 3001 struct ib_mad_private_header *mad_priv_hdr; 3002 struct ib_mad_private *recv; 3003 struct ib_mad_list_head *mad_list; 3004 3005 if (!qp_info->qp) 3006 return; 3007 3008 while (!list_empty(&qp_info->recv_queue.list)) { 3009 3010 mad_list = list_entry(qp_info->recv_queue.list.next, 3011 struct ib_mad_list_head, list); 3012 mad_priv_hdr = container_of(mad_list, 3013 struct ib_mad_private_header, 3014 mad_list); 3015 recv = container_of(mad_priv_hdr, struct ib_mad_private, 3016 header); 3017 3018 /* Remove from posted receive MAD list */ 3019 list_del(&mad_list->list); 3020 3021 ib_dma_unmap_single(qp_info->port_priv->device, 3022 recv->header.mapping, 3023 mad_priv_dma_size(recv), 3024 DMA_FROM_DEVICE); 3025 kfree(recv); 3026 } 3027 3028 qp_info->recv_queue.count = 0; 3029 } 3030 3031 /* 3032 * Start the port 3033 */ 3034 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) 3035 { 3036 int ret, i; 3037 struct ib_qp_attr *attr; 3038 struct ib_qp *qp; 3039 u16 pkey_index; 3040 3041 attr = kmalloc(sizeof *attr, GFP_KERNEL); 3042 if (!attr) 3043 return -ENOMEM; 3044 3045 ret = ib_find_pkey(port_priv->device, port_priv->port_num, 3046 IB_DEFAULT_PKEY_FULL, &pkey_index); 3047 if (ret) 3048 pkey_index = 0; 3049 3050 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 3051 qp = port_priv->qp_info[i].qp; 3052 if (!qp) 3053 continue; 3054 3055 /* 3056 * PKey index for QP1 is irrelevant but 3057 * one is needed for the Reset to Init transition 3058 */ 3059 attr->qp_state = IB_QPS_INIT; 3060 attr->pkey_index = pkey_index; 3061 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 3062 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 3063 IB_QP_PKEY_INDEX | IB_QP_QKEY); 3064 if (ret) { 3065 dev_err(&port_priv->device->dev, 3066 "Couldn't change QP%d state to INIT: %d\n", 3067 i, ret); 3068 goto out; 3069 } 3070 3071 attr->qp_state = IB_QPS_RTR; 3072 ret = ib_modify_qp(qp, attr, IB_QP_STATE); 3073 if (ret) { 3074 dev_err(&port_priv->device->dev, 3075 "Couldn't change QP%d state to RTR: %d\n", 3076 i, ret); 3077 goto out; 3078 } 3079 3080 attr->qp_state = IB_QPS_RTS; 3081 attr->sq_psn = IB_MAD_SEND_Q_PSN; 3082 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); 3083 if (ret) { 3084 dev_err(&port_priv->device->dev, 3085 "Couldn't change QP%d state to RTS: %d\n", 3086 i, ret); 3087 goto out; 3088 } 3089 } 3090 3091 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 3092 if (ret) { 3093 dev_err(&port_priv->device->dev, 3094 "Failed to request completion notification: %d\n", 3095 ret); 3096 goto out; 3097 } 3098 3099 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 3100 if (!port_priv->qp_info[i].qp) 3101 continue; 3102 3103 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); 3104 if (ret) { 3105 dev_err(&port_priv->device->dev, 3106 "Couldn't post receive WRs\n"); 3107 goto out; 3108 } 3109 } 3110 out: 3111 kfree(attr); 3112 return ret; 3113 } 3114 3115 static void qp_event_handler(struct ib_event *event, void *qp_context) 3116 { 3117 struct ib_mad_qp_info *qp_info = qp_context; 3118 3119 /* It's worse than that! He's dead, Jim! */ 3120 dev_err(&qp_info->port_priv->device->dev, 3121 "Fatal error (%d) on MAD QP (%d)\n", 3122 event->event, qp_info->qp->qp_num); 3123 } 3124 3125 static void init_mad_queue(struct ib_mad_qp_info *qp_info, 3126 struct ib_mad_queue *mad_queue) 3127 { 3128 mad_queue->qp_info = qp_info; 3129 mad_queue->count = 0; 3130 spin_lock_init(&mad_queue->lock); 3131 INIT_LIST_HEAD(&mad_queue->list); 3132 } 3133 3134 static void init_mad_qp(struct ib_mad_port_private *port_priv, 3135 struct ib_mad_qp_info *qp_info) 3136 { 3137 qp_info->port_priv = port_priv; 3138 init_mad_queue(qp_info, &qp_info->send_queue); 3139 init_mad_queue(qp_info, &qp_info->recv_queue); 3140 INIT_LIST_HEAD(&qp_info->overflow_list); 3141 spin_lock_init(&qp_info->snoop_lock); 3142 qp_info->snoop_table = NULL; 3143 qp_info->snoop_table_size = 0; 3144 atomic_set(&qp_info->snoop_count, 0); 3145 } 3146 3147 static int create_mad_qp(struct ib_mad_qp_info *qp_info, 3148 enum ib_qp_type qp_type) 3149 { 3150 struct ib_qp_init_attr qp_init_attr; 3151 int ret; 3152 3153 memset(&qp_init_attr, 0, sizeof qp_init_attr); 3154 qp_init_attr.send_cq = qp_info->port_priv->cq; 3155 qp_init_attr.recv_cq = qp_info->port_priv->cq; 3156 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 3157 qp_init_attr.cap.max_send_wr = mad_sendq_size; 3158 qp_init_attr.cap.max_recv_wr = mad_recvq_size; 3159 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 3160 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 3161 qp_init_attr.qp_type = qp_type; 3162 qp_init_attr.port_num = qp_info->port_priv->port_num; 3163 qp_init_attr.qp_context = qp_info; 3164 qp_init_attr.event_handler = qp_event_handler; 3165 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); 3166 if (IS_ERR(qp_info->qp)) { 3167 dev_err(&qp_info->port_priv->device->dev, 3168 "Couldn't create ib_mad QP%d\n", 3169 get_spl_qp_index(qp_type)); 3170 ret = PTR_ERR(qp_info->qp); 3171 goto error; 3172 } 3173 /* Use minimum queue sizes unless the CQ is resized */ 3174 qp_info->send_queue.max_active = mad_sendq_size; 3175 qp_info->recv_queue.max_active = mad_recvq_size; 3176 return 0; 3177 3178 error: 3179 return ret; 3180 } 3181 3182 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) 3183 { 3184 if (!qp_info->qp) 3185 return; 3186 3187 ib_destroy_qp(qp_info->qp); 3188 kfree(qp_info->snoop_table); 3189 } 3190 3191 /* 3192 * Open the port 3193 * Create the QP, PD, MR, and CQ if needed 3194 */ 3195 static int ib_mad_port_open(struct ib_device *device, 3196 int port_num) 3197 { 3198 int ret, cq_size; 3199 struct ib_mad_port_private *port_priv; 3200 unsigned long flags; 3201 char name[sizeof "ib_mad123"]; 3202 int has_smi; 3203 3204 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) 3205 return -EFAULT; 3206 3207 if (WARN_ON(rdma_cap_opa_mad(device, port_num) && 3208 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) 3209 return -EFAULT; 3210 3211 /* Create new device info */ 3212 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 3213 if (!port_priv) 3214 return -ENOMEM; 3215 3216 port_priv->device = device; 3217 port_priv->port_num = port_num; 3218 spin_lock_init(&port_priv->reg_lock); 3219 init_mad_qp(port_priv, &port_priv->qp_info[0]); 3220 init_mad_qp(port_priv, &port_priv->qp_info[1]); 3221 3222 cq_size = mad_sendq_size + mad_recvq_size; 3223 has_smi = rdma_cap_ib_smi(device, port_num); 3224 if (has_smi) 3225 cq_size *= 2; 3226 3227 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, 3228 IB_POLL_UNBOUND_WORKQUEUE); 3229 if (IS_ERR(port_priv->cq)) { 3230 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); 3231 ret = PTR_ERR(port_priv->cq); 3232 goto error3; 3233 } 3234 3235 port_priv->pd = ib_alloc_pd(device, 0); 3236 if (IS_ERR(port_priv->pd)) { 3237 dev_err(&device->dev, "Couldn't create ib_mad PD\n"); 3238 ret = PTR_ERR(port_priv->pd); 3239 goto error4; 3240 } 3241 3242 if (has_smi) { 3243 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); 3244 if (ret) 3245 goto error6; 3246 } 3247 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); 3248 if (ret) 3249 goto error7; 3250 3251 snprintf(name, sizeof name, "ib_mad%d", port_num); 3252 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 3253 if (!port_priv->wq) { 3254 ret = -ENOMEM; 3255 goto error8; 3256 } 3257 3258 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3259 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 3260 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3261 3262 ret = ib_mad_port_start(port_priv); 3263 if (ret) { 3264 dev_err(&device->dev, "Couldn't start port\n"); 3265 goto error9; 3266 } 3267 3268 return 0; 3269 3270 error9: 3271 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3272 list_del_init(&port_priv->port_list); 3273 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3274 3275 destroy_workqueue(port_priv->wq); 3276 error8: 3277 destroy_mad_qp(&port_priv->qp_info[1]); 3278 error7: 3279 destroy_mad_qp(&port_priv->qp_info[0]); 3280 error6: 3281 ib_dealloc_pd(port_priv->pd); 3282 error4: 3283 ib_free_cq(port_priv->cq); 3284 cleanup_recv_queue(&port_priv->qp_info[1]); 3285 cleanup_recv_queue(&port_priv->qp_info[0]); 3286 error3: 3287 kfree(port_priv); 3288 3289 return ret; 3290 } 3291 3292 /* 3293 * Close the port 3294 * If there are no classes using the port, free the port 3295 * resources (CQ, MR, PD, QP) and remove the port's info structure 3296 */ 3297 static int ib_mad_port_close(struct ib_device *device, int port_num) 3298 { 3299 struct ib_mad_port_private *port_priv; 3300 unsigned long flags; 3301 3302 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3303 port_priv = __ib_get_mad_port(device, port_num); 3304 if (port_priv == NULL) { 3305 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3306 dev_err(&device->dev, "Port %d not found\n", port_num); 3307 return -ENODEV; 3308 } 3309 list_del_init(&port_priv->port_list); 3310 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3311 3312 destroy_workqueue(port_priv->wq); 3313 destroy_mad_qp(&port_priv->qp_info[1]); 3314 destroy_mad_qp(&port_priv->qp_info[0]); 3315 ib_dealloc_pd(port_priv->pd); 3316 ib_free_cq(port_priv->cq); 3317 cleanup_recv_queue(&port_priv->qp_info[1]); 3318 cleanup_recv_queue(&port_priv->qp_info[0]); 3319 /* XXX: Handle deallocation of MAD registration tables */ 3320 3321 kfree(port_priv); 3322 3323 return 0; 3324 } 3325 3326 static void ib_mad_init_device(struct ib_device *device) 3327 { 3328 int start, i; 3329 3330 start = rdma_start_port(device); 3331 3332 for (i = start; i <= rdma_end_port(device); i++) { 3333 if (!rdma_cap_ib_mad(device, i)) 3334 continue; 3335 3336 if (ib_mad_port_open(device, i)) { 3337 dev_err(&device->dev, "Couldn't open port %d\n", i); 3338 goto error; 3339 } 3340 if (ib_agent_port_open(device, i)) { 3341 dev_err(&device->dev, 3342 "Couldn't open port %d for agents\n", i); 3343 goto error_agent; 3344 } 3345 } 3346 return; 3347 3348 error_agent: 3349 if (ib_mad_port_close(device, i)) 3350 dev_err(&device->dev, "Couldn't close port %d\n", i); 3351 3352 error: 3353 while (--i >= start) { 3354 if (!rdma_cap_ib_mad(device, i)) 3355 continue; 3356 3357 if (ib_agent_port_close(device, i)) 3358 dev_err(&device->dev, 3359 "Couldn't close port %d for agents\n", i); 3360 if (ib_mad_port_close(device, i)) 3361 dev_err(&device->dev, "Couldn't close port %d\n", i); 3362 } 3363 } 3364 3365 static void ib_mad_remove_device(struct ib_device *device, void *client_data) 3366 { 3367 unsigned int i; 3368 3369 rdma_for_each_port (device, i) { 3370 if (!rdma_cap_ib_mad(device, i)) 3371 continue; 3372 3373 if (ib_agent_port_close(device, i)) 3374 dev_err(&device->dev, 3375 "Couldn't close port %d for agents\n", i); 3376 if (ib_mad_port_close(device, i)) 3377 dev_err(&device->dev, "Couldn't close port %d\n", i); 3378 } 3379 } 3380 3381 static struct ib_client mad_client = { 3382 .name = "mad", 3383 .add = ib_mad_init_device, 3384 .remove = ib_mad_remove_device 3385 }; 3386 3387 int ib_mad_init(void) 3388 { 3389 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); 3390 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); 3391 3392 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); 3393 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); 3394 3395 INIT_LIST_HEAD(&ib_mad_port_list); 3396 3397 if (ib_register_client(&mad_client)) { 3398 pr_err("Couldn't register ib_mad client\n"); 3399 return -EINVAL; 3400 } 3401 3402 return 0; 3403 } 3404 3405 void ib_mad_cleanup(void) 3406 { 3407 ib_unregister_client(&mad_client); 3408 } 3409